file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs
|
));
Ok(storage)
}
/// Sets up an instance of `Storage`, with git turned on.
pub fn setup_luigi_with_git() -> Result<Storage<Project>> {
trace!("setup_luigi()");
let working = try!(::CONFIG.get_str("dirs/working").ok_or("Faulty config: dirs/working does not contain a value"));
let archive = try!(::CONFIG.get_str("dirs/archive").ok_or("Faulty config: dirs/archive does not contain a value"));
let templates = try!(::CONFIG.get_str("dirs/templates").ok_or("Faulty config: dirs/templates does not contain a value"));
let storage = try!(Storage::new_with_git(util::get_storage_path(), working, archive, templates));
Ok(storage)
}
pub fn simple_with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F)
where F:Fn(&Project)
{
match with_projects(dir, search_terms, |p| {f(p);Ok(())}){
Ok(_) => {},
Err(e) => error!("{}",e)
}
}
/// Helper method that passes projects matching the `search_terms` to the passt closure `f`
pub fn with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F) -> Result<()>
where F:Fn(&Project)->Result<()>
{
trace!("with_projects({:?})", search_terms);
let luigi = try!(setup_luigi());
let projects = try!(luigi.search_projects_any(dir, search_terms));
if projects.is_empty() {
return Err(format!("Nothing found for {:?}", search_terms).into())
}
for project in &projects{
try!(f(project));
}
Ok(())
}
pub fn csv(year:i32) -> Result<String> {
let luigi = try!(setup_luigi());
let mut projects = try!(luigi.open_projects(StorageDir::Year(year)));
projects.sort_by(|pa,pb| pa.index().unwrap_or_else(||"zzzz".to_owned()).cmp( &pb.index().unwrap_or("zzzz".to_owned())));
projects_to_csv(&projects)
}
/// Produces a csv string from a list of `Project`s
/// TODO this still contains german terms
pub fn projects_to_csv(projects:&[Project]) -> Result<String>{
let mut string = String::new();
let splitter = ";";
try!(writeln!(&mut string, "{}", [ "Rnum", "Bezeichnung", "Datum", "Rechnungsdatum", "Betreuer", "Verantwortlich", "Bezahlt am", "Betrag", "Canceled"].join(splitter)));
for project in projects{
try!(writeln!(&mut string, "{}", [
project.get("InvoiceNumber").unwrap_or_else(|| String::from(r#""""#)),
project.get("Name").unwrap_or_else(|| String::from(r#""""#)),
project.get("event/dates/0/begin").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Caterers").unwrap_or_else(|| String::from(r#""""#)),
project.get("Responsible").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/payed_date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Final").unwrap_or_else(|| String::from(r#""""#)),
project.canceled_string().to_owned()
].join(splitter)));
}
Ok(string)
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn project_to_doc(project: &Project, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
let template_ext = ::CONFIG.get_str("extensions/output_template").expect("Faulty default config");
let output_ext = ::CONFIG.get_str("extensions/output_file").expect("Faulty default config");
let convert_ext = ::CONFIG.get_str("convert/output_extension").expect("Faulty default config");
let trash_exts = ::CONFIG.get("convert/trash_extensions") .expect("Faulty default config")
.as_vec().expect("Faulty default config")
.into_iter()
.map(|v|v.as_str()).collect::<Vec<_>>();
let mut template_path = PathBuf::new();
template_path.push(util::get_storage_path());
template_path.push(::CONFIG.get_str("dirs/templates").expect("Faulty config: dirs/templates does not contain a value"));
template_path.push(template_name);
template_path.set_extension(template_ext);
debug!("template file={:?} exists={}", template_path, template_path.exists());
if !template_path.exists() {
return Err(format!("Template not found at {}", template_path.display()).into())
}
let convert_tool = ::CONFIG.get_str("convert/tool");
let output_folder = ::CONFIG.get_str("output_path").and_then(util::get_valid_path).expect("Faulty config \"output_path\"");
let ready_for_offer = project.is_ready_for_offer();
let ready_for_invoice = project.is_ready_for_invoice();
let project_file = project.file();
// tiny little helper
let to_local_file = |file:&Path, ext| {
let mut _tmpfile = file.to_owned();
_tmpfile.set_extension(ext);
Path::new(_tmpfile.file_name().unwrap().into()).to_owned()
};
use BillType::*;
let (dyn_bill_type, outfile_tex):
(Option<BillType>, Option<PathBuf>) =
match (bill_type, ready_for_offer, ready_for_invoice)
{
(&Some(Offer), Ok(_), _ ) |
(&None, Ok(_), Err(_)) => (Some(Offer), Some(project.dir().join(project.offer_file_name(output_ext).expect("this should have been cought by ready_for_offer()")))),
(&Some(Invoice), _, Ok(_)) |
(&None, _, Ok(_)) => (Some(Invoice), Some(project.dir().join(project.invoice_file_name(output_ext).expect("this should have been cought by ready_for_invoice()")))),
(&Some(Offer), Err(e), _ ) => {error!("cannot create an offer, check out:{:#?}",e);(None,None)},
(&Some(Invoice), _, Err(e)) => {error!("cannot create an invoice, check out:{:#?}",e);(None,None)},
(_, Err(e), Err(_)) => {error!("Neither an Offer nor an Invoice can be created from this project\n please check out {:#?}", e);(None,None)}
};
//debug!("{:?} -> {:?}",(bill_type, project.is_ready_for_offer(), project.is_ready_for_invoice()), (dyn_bill_type, outfile_tex));
if let (Some(outfile), Some(dyn_bill)) = (outfile_tex, dyn_bill_type) {
let filled = try!(fill_template(project, &dyn_bill, &template_path));
let pdffile = to_local_file(&outfile, convert_ext);
let target = output_folder.join(&pdffile);
// ok, so apparently we can create a tex file, so lets do it
if !force && target.exists() && try!(file_age(&target)) < try!(file_age(&project_file)){
// no wait, nothing has changed, so lets save ourselves the work
info!("nothing to be done, {} is younger than {}\n use -f if you don't agree", target.display(), project_file.display());
} else {
// \o/ we created a tex file
if dry_run{
warn!("Dry run! This does not produce any output:\n * {}\n * {}", outfile.display(), pdffile.display());
} else {
let outfileb = try!(project.write_to_file(&filled,&dyn_bill,output_ext));
debug!("{} vs\n {}", outfile.display(), outfileb.display());
util::pass_to_command(&convert_tool, &[&outfileb]);
}
// clean up expected trash files
for trash_ext in trash_exts.iter().filter_map(|x|*x){
let trash_file = to_local_file(&outfile, trash_ext);
if trash_file.exists()
|
else {
debug!("I expected there to be a {}, but there wasn't any ?", trash_file.display())
}
}
if pdffile.exists(){
debug!("now there is be a {:?} -> {:?}", pdffile, target);
try!(fs::rename(&pdffile, &target));
}
}
}
Ok(())
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn projects_to_doc(dir:StorageDir, search_term:&str, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
with_projects(dir, &[search_term], |p| project_to_doc(p, template_name, bill_type, dry_run, force) )
}
fn file_age(path:&Path) -> Result<time::Duration> {
let metadata = try!(fs::metadata(path));
let accessed = try!(metadata.accessed());
Ok(try!(accessed.elapsed()))
}
/// Testing only,
|
{
try!(fs::remove_file(&trash_file));
debug!("just deleted: {}", trash_file.display())
}
|
conditional_block
|
secretcache.go
|
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package cache is the in-memory secret store.
package cache
import (
"bytes"
"context"
"errors"
"sync"
"sync/atomic"
"time"
"istio.io/istio/pkg/log"
ca "istio.io/istio/security/pkg/nodeagent/caclient/interface"
"istio.io/istio/security/pkg/nodeagent/model"
"istio.io/istio/security/pkg/pki/util"
)
const (
// The size of a private key for a leaf certificate.
keySize = 2048
// max retry number to wait CSR response come back to parse root cert from it.
maxRetryNum = 5
// initial retry wait time duration when waiting root cert is available.
retryWaitDuration = 200 * time.Millisecond
// RootCertReqResourceName is resource name of discovery request for root certificate.
RootCertReqResourceName = "ROOTCA"
)
// Options provides all of the configuration parameters for secret cache.
type Options struct {
// secret TTL.
SecretTTL time.Duration
// Secret rotation job running interval.
RotationInterval time.Duration
// Secret eviction duration.
EvictionDuration time.Duration
}
// SecretManager defines secrets management interface which is used by SDS.
type SecretManager interface {
// GenerateSecret generates new secret and cache the secret.
GenerateSecret(ctx context.Context, proxyID, resourceName, token string) (*model.SecretItem, error)
// SecretExist checks if secret already existed.
SecretExist(proxyID, resourceName, token, version string) bool
}
// ConnKey is the key of one SDS connection.
type ConnKey struct {
ProxyID string
// ResourceName of SDS request, get from SDS.DiscoveryRequest.ResourceName
// Current it's `ROOTCA` for root cert request, and 'default' for normal key/cert request.
ResourceName string
}
// SecretCache is the in-memory cache for secrets.
type SecretCache struct {
// secrets map is the cache for secrets.
// map key is Envoy instance ID, map value is secretItem.
secrets sync.Map
rotationTicker *time.Ticker
caClient ca.Client
// Cached secret will be removed from cache if (time.now - secretItem.CreatedTime >= evictionDuration), this prevents cache growing indefinitely.
evictionDuration time.Duration
// Key rotation job running interval.
rotationInterval time.Duration
// secret TTL.
secretTTL time.Duration
// How may times that key rotation job has detected secret change happened, used in unit test.
secretChangedCount uint64
// callback function to invoke when detecting secret change.
notifyCallback func(proxyID string, resourceName string, secret *model.SecretItem) error
// Right now always skip the check, since key rotation job checks token expire only when cert has expired;
// since token's TTL is much shorter than the cert, we could skip the check in normal cases.
// The flag is used in unit test, use uint32 instead of boolean because there is no atomic boolean
// type in golang, atomic is needed to avoid racing condition in unit test.
skipTokenExpireCheck uint32
// close channel.
closing chan bool
rootCertMutex *sync.Mutex
rootCert []byte
}
// NewSecretCache creates a new secret cache.
func NewSecretCache(cl ca.Client, notifyCb func(string, string, *model.SecretItem) error, options Options) *SecretCache {
ret := &SecretCache{
caClient: cl,
closing: make(chan bool),
evictionDuration: options.EvictionDuration,
notifyCallback: notifyCb,
rootCertMutex: &sync.Mutex{},
rotationInterval: options.RotationInterval,
secretTTL: options.SecretTTL,
}
atomic.StoreUint64(&ret.secretChangedCount, 0)
atomic.StoreUint32(&ret.skipTokenExpireCheck, 1)
go ret.keyCertRotationJob()
return ret
}
// GenerateSecret generates new secret and cache the secret, this function is called by SDS.StreamSecrets
// and SDS.FetchSecret. Since credential passing from client may change, regenerate secret every time
// instead of reading from cache.
func (sc *SecretCache) GenerateSecret(ctx context.Context, proxyID, resourceName, token string) (*model.SecretItem, error) {
var ns *model.SecretItem
key := ConnKey{
ProxyID: proxyID,
ResourceName: resourceName,
}
if resourceName != RootCertReqResourceName {
// Request for normal key/cert pair.
ns, err := sc.generateSecret(ctx, token, resourceName, time.Now())
if err != nil {
log.Errorf("Failed to generate secret for proxy %q: %v", proxyID, err)
return nil, err
}
sc.secrets.Store(key, *ns)
return ns, nil
}
// If request is for root certificate,
// retry since rootCert may be empty until there is CSR response returned from CA.
if sc.rootCert == nil {
wait := retryWaitDuration
retryNum := 0
for ; retryNum < maxRetryNum; retryNum++ {
time.Sleep(retryWaitDuration)
if sc.rootCert != nil {
break
}
wait *= 2
}
}
if sc.rootCert == nil {
log.Errorf("Failed to get root cert for proxy %q", proxyID)
return nil, errors.New("faied to get root cert")
}
t := time.Now()
ns = &model.SecretItem{
ResourceName: resourceName,
RootCert: sc.rootCert,
Token: token,
CreatedTime: t,
Version: t.String(),
}
sc.secrets.Store(key, *ns)
return ns, nil
}
// SecretExist checks if secret already existed.
func (sc *SecretCache) SecretExist(proxyID, resourceName, token, version string) bool {
key := ConnKey{
ProxyID: proxyID,
ResourceName: resourceName,
}
val, exist := sc.secrets.Load(key)
if !exist {
return false
}
e := val.(model.SecretItem)
if e.ResourceName == resourceName && e.Token == token && e.Version == version {
return true
}
return false
}
// Close shuts down the secret cache.
func (sc *SecretCache) Close() {
sc.closing <- true
}
func (sc *SecretCache) keyCertRotationJob() {
// Wake up once in a while and refresh stale items.
sc.rotationTicker = time.NewTicker(sc.rotationInterval)
for {
select {
case <-sc.rotationTicker.C:
sc.rotate()
case <-sc.closing:
if sc.rotationTicker != nil {
sc.rotationTicker.Stop()
}
}
}
}
func (sc *SecretCache) rotate() {
log.Debug("Refresh job running")
secretMap := map[ConnKey]*model.SecretItem{}
wg := sync.WaitGroup{}
sc.secrets.Range(func(k interface{}, v interface{}) bool {
key := k.(ConnKey)
// skip the refresh if cached item is root cert.
if key.ResourceName == RootCertReqResourceName {
return true
}
proxyID := key.ProxyID
now := time.Now()
e := v.(model.SecretItem)
// Remove stale secrets from cache, this prevent the cache growing indefinitely.
if now.After(e.CreatedTime.Add(sc.evictionDuration)) {
sc.secrets.Delete(key)
return true
}
// Re-generate secret if it's expired.
if sc.shouldRefresh(&e) {
wg.Add(1)
go func() {
defer wg.Done()
if sc.isTokenExpired(&e) {
log.Debugf("Token for %q expired for proxy %q", e.ResourceName, proxyID)
if sc.notifyCallback != nil {
// Send the notification to close the stream connection if both cert and token have expired.
if err := sc.notifyCallback(key.ProxyID, key.ResourceName, nil /*nil indicates close the streaming connection to proxy*/); err != nil {
log.Errorf("Failed to notify for proxy %q: %v", proxyID, err)
}
} else {
log.Warnf("secret cache notify callback isn't set")
}
return
}
// If token is still valid, re-generated the secret and push change to proxy.
// Most likey this code path may not necessary, since TTL of cert is much longer than token.
// When cert has expired, we could make it simple by assuming token has already expired.
ns, err := sc.generateSecret(context.Background(), e.Token, e.ResourceName, now)
if err != nil {
log.Errorf("Failed to generate secret for proxy %q: %v", proxyID, err)
return
}
secretMap[key] = ns
atomic.AddUint64(&sc.secretChangedCount, 1)
if sc.notifyCallback != nil {
if err := sc.notifyCallback(proxyID,
|
// You may obtain a copy of the License at
//
|
random_line_split
|
|
secretcache.go
|
"istio.io/istio/security/pkg/nodeagent/model"
"istio.io/istio/security/pkg/pki/util"
)
const (
// The size of a private key for a leaf certificate.
keySize = 2048
// max retry number to wait CSR response come back to parse root cert from it.
maxRetryNum = 5
// initial retry wait time duration when waiting root cert is available.
retryWaitDuration = 200 * time.Millisecond
// RootCertReqResourceName is resource name of discovery request for root certificate.
RootCertReqResourceName = "ROOTCA"
)
// Options provides all of the configuration parameters for secret cache.
type Options struct {
// secret TTL.
SecretTTL time.Duration
// Secret rotation job running interval.
RotationInterval time.Duration
// Secret eviction duration.
EvictionDuration time.Duration
}
// SecretManager defines secrets management interface which is used by SDS.
type SecretManager interface {
// GenerateSecret generates new secret and cache the secret.
GenerateSecret(ctx context.Context, proxyID, resourceName, token string) (*model.SecretItem, error)
// SecretExist checks if secret already existed.
SecretExist(proxyID, resourceName, token, version string) bool
}
// ConnKey is the key of one SDS connection.
type ConnKey struct {
ProxyID string
// ResourceName of SDS request, get from SDS.DiscoveryRequest.ResourceName
// Current it's `ROOTCA` for root cert request, and 'default' for normal key/cert request.
ResourceName string
}
// SecretCache is the in-memory cache for secrets.
type SecretCache struct {
// secrets map is the cache for secrets.
// map key is Envoy instance ID, map value is secretItem.
secrets sync.Map
rotationTicker *time.Ticker
caClient ca.Client
// Cached secret will be removed from cache if (time.now - secretItem.CreatedTime >= evictionDuration), this prevents cache growing indefinitely.
evictionDuration time.Duration
// Key rotation job running interval.
rotationInterval time.Duration
// secret TTL.
secretTTL time.Duration
// How may times that key rotation job has detected secret change happened, used in unit test.
secretChangedCount uint64
// callback function to invoke when detecting secret change.
notifyCallback func(proxyID string, resourceName string, secret *model.SecretItem) error
// Right now always skip the check, since key rotation job checks token expire only when cert has expired;
// since token's TTL is much shorter than the cert, we could skip the check in normal cases.
// The flag is used in unit test, use uint32 instead of boolean because there is no atomic boolean
// type in golang, atomic is needed to avoid racing condition in unit test.
skipTokenExpireCheck uint32
// close channel.
closing chan bool
rootCertMutex *sync.Mutex
rootCert []byte
}
// NewSecretCache creates a new secret cache.
func NewSecretCache(cl ca.Client, notifyCb func(string, string, *model.SecretItem) error, options Options) *SecretCache {
ret := &SecretCache{
caClient: cl,
closing: make(chan bool),
evictionDuration: options.EvictionDuration,
notifyCallback: notifyCb,
rootCertMutex: &sync.Mutex{},
rotationInterval: options.RotationInterval,
secretTTL: options.SecretTTL,
}
atomic.StoreUint64(&ret.secretChangedCount, 0)
atomic.StoreUint32(&ret.skipTokenExpireCheck, 1)
go ret.keyCertRotationJob()
return ret
}
// GenerateSecret generates new secret and cache the secret, this function is called by SDS.StreamSecrets
// and SDS.FetchSecret. Since credential passing from client may change, regenerate secret every time
// instead of reading from cache.
func (sc *SecretCache) GenerateSecret(ctx context.Context, proxyID, resourceName, token string) (*model.SecretItem, error) {
var ns *model.SecretItem
key := ConnKey{
ProxyID: proxyID,
ResourceName: resourceName,
}
if resourceName != RootCertReqResourceName {
// Request for normal key/cert pair.
ns, err := sc.generateSecret(ctx, token, resourceName, time.Now())
if err != nil {
log.Errorf("Failed to generate secret for proxy %q: %v", proxyID, err)
return nil, err
}
sc.secrets.Store(key, *ns)
return ns, nil
}
// If request is for root certificate,
// retry since rootCert may be empty until there is CSR response returned from CA.
if sc.rootCert == nil {
wait := retryWaitDuration
retryNum := 0
for ; retryNum < maxRetryNum; retryNum++ {
time.Sleep(retryWaitDuration)
if sc.rootCert != nil {
break
}
wait *= 2
}
}
if sc.rootCert == nil {
log.Errorf("Failed to get root cert for proxy %q", proxyID)
return nil, errors.New("faied to get root cert")
}
t := time.Now()
ns = &model.SecretItem{
ResourceName: resourceName,
RootCert: sc.rootCert,
Token: token,
CreatedTime: t,
Version: t.String(),
}
sc.secrets.Store(key, *ns)
return ns, nil
}
// SecretExist checks if secret already existed.
func (sc *SecretCache) SecretExist(proxyID, resourceName, token, version string) bool {
key := ConnKey{
ProxyID: proxyID,
ResourceName: resourceName,
}
val, exist := sc.secrets.Load(key)
if !exist {
return false
}
e := val.(model.SecretItem)
if e.ResourceName == resourceName && e.Token == token && e.Version == version {
return true
}
return false
}
// Close shuts down the secret cache.
func (sc *SecretCache) Close() {
sc.closing <- true
}
func (sc *SecretCache) keyCertRotationJob() {
// Wake up once in a while and refresh stale items.
sc.rotationTicker = time.NewTicker(sc.rotationInterval)
for {
select {
case <-sc.rotationTicker.C:
sc.rotate()
case <-sc.closing:
if sc.rotationTicker != nil {
sc.rotationTicker.Stop()
}
}
}
}
func (sc *SecretCache) rotate() {
log.Debug("Refresh job running")
secretMap := map[ConnKey]*model.SecretItem{}
wg := sync.WaitGroup{}
sc.secrets.Range(func(k interface{}, v interface{}) bool {
key := k.(ConnKey)
// skip the refresh if cached item is root cert.
if key.ResourceName == RootCertReqResourceName {
return true
}
proxyID := key.ProxyID
now := time.Now()
e := v.(model.SecretItem)
// Remove stale secrets from cache, this prevent the cache growing indefinitely.
if now.After(e.CreatedTime.Add(sc.evictionDuration)) {
sc.secrets.Delete(key)
return true
}
// Re-generate secret if it's expired.
if sc.shouldRefresh(&e) {
wg.Add(1)
go func() {
defer wg.Done()
if sc.isTokenExpired(&e) {
log.Debugf("Token for %q expired for proxy %q", e.ResourceName, proxyID)
if sc.notifyCallback != nil {
// Send the notification to close the stream connection if both cert and token have expired.
if err := sc.notifyCallback(key.ProxyID, key.ResourceName, nil /*nil indicates close the streaming connection to proxy*/); err != nil {
log.Errorf("Failed to notify for proxy %q: %v", proxyID, err)
}
} else {
log.Warnf("secret cache notify callback isn't set")
}
return
}
// If token is still valid, re-generated the secret and push change to proxy.
// Most likey this code path may not necessary, since TTL of cert is much longer than token.
// When cert has expired, we could make it simple by assuming token has already expired.
ns, err := sc.generateSecret(context.Background(), e.Token, e.ResourceName, now)
if err != nil {
log.Errorf("Failed to generate secret for proxy %q: %v", proxyID, err)
return
}
secretMap[key] = ns
atomic.AddUint64(&sc.secretChangedCount, 1)
if sc.notifyCallback != nil {
if err := sc.notifyCallback(proxyID, key.ResourceName, ns); err != nil {
log.Errorf("Failed to notify secret change for proxy %q: %v", proxyID, err)
}
} else {
log.Warnf("secret cache notify callback isn't set")
}
}()
}
return true
})
wg.Wait()
for key, secret := range secretMap {
sc.secrets.Store(key, *secret)
}
}
func (sc *SecretCache)
|
(ctx context.Context, token, resourceName string, t time.Time) (*model.SecretItem, error) {
options := util.CertOptions{
Host: resourceName,
RSAKeySize: keySize,
}
// Generate
|
generateSecret
|
identifier_name
|
secretcache.go
|
istio.io/istio/security/pkg/nodeagent/model"
"istio.io/istio/security/pkg/pki/util"
)
const (
// The size of a private key for a leaf certificate.
keySize = 2048
// max retry number to wait CSR response come back to parse root cert from it.
maxRetryNum = 5
// initial retry wait time duration when waiting root cert is available.
retryWaitDuration = 200 * time.Millisecond
// RootCertReqResourceName is resource name of discovery request for root certificate.
RootCertReqResourceName = "ROOTCA"
)
// Options provides all of the configuration parameters for secret cache.
type Options struct {
// secret TTL.
SecretTTL time.Duration
// Secret rotation job running interval.
RotationInterval time.Duration
// Secret eviction duration.
EvictionDuration time.Duration
}
// SecretManager defines secrets management interface which is used by SDS.
type SecretManager interface {
// GenerateSecret generates new secret and cache the secret.
GenerateSecret(ctx context.Context, proxyID, resourceName, token string) (*model.SecretItem, error)
// SecretExist checks if secret already existed.
SecretExist(proxyID, resourceName, token, version string) bool
}
// ConnKey is the key of one SDS connection.
type ConnKey struct {
ProxyID string
// ResourceName of SDS request, get from SDS.DiscoveryRequest.ResourceName
// Current it's `ROOTCA` for root cert request, and 'default' for normal key/cert request.
ResourceName string
}
// SecretCache is the in-memory cache for secrets.
type SecretCache struct {
// secrets map is the cache for secrets.
// map key is Envoy instance ID, map value is secretItem.
secrets sync.Map
rotationTicker *time.Ticker
caClient ca.Client
// Cached secret will be removed from cache if (time.now - secretItem.CreatedTime >= evictionDuration), this prevents cache growing indefinitely.
evictionDuration time.Duration
// Key rotation job running interval.
rotationInterval time.Duration
// secret TTL.
secretTTL time.Duration
// How may times that key rotation job has detected secret change happened, used in unit test.
secretChangedCount uint64
// callback function to invoke when detecting secret change.
notifyCallback func(proxyID string, resourceName string, secret *model.SecretItem) error
// Right now always skip the check, since key rotation job checks token expire only when cert has expired;
// since token's TTL is much shorter than the cert, we could skip the check in normal cases.
// The flag is used in unit test, use uint32 instead of boolean because there is no atomic boolean
// type in golang, atomic is needed to avoid racing condition in unit test.
skipTokenExpireCheck uint32
// close channel.
closing chan bool
rootCertMutex *sync.Mutex
rootCert []byte
}
// NewSecretCache creates a new secret cache.
func NewSecretCache(cl ca.Client, notifyCb func(string, string, *model.SecretItem) error, options Options) *SecretCache {
ret := &SecretCache{
caClient: cl,
closing: make(chan bool),
evictionDuration: options.EvictionDuration,
notifyCallback: notifyCb,
rootCertMutex: &sync.Mutex{},
rotationInterval: options.RotationInterval,
secretTTL: options.SecretTTL,
}
atomic.StoreUint64(&ret.secretChangedCount, 0)
atomic.StoreUint32(&ret.skipTokenExpireCheck, 1)
go ret.keyCertRotationJob()
return ret
}
// GenerateSecret generates new secret and cache the secret, this function is called by SDS.StreamSecrets
// and SDS.FetchSecret. Since credential passing from client may change, regenerate secret every time
// instead of reading from cache.
func (sc *SecretCache) GenerateSecret(ctx context.Context, proxyID, resourceName, token string) (*model.SecretItem, error) {
var ns *model.SecretItem
key := ConnKey{
ProxyID: proxyID,
ResourceName: resourceName,
}
if resourceName != RootCertReqResourceName {
// Request for normal key/cert pair.
ns, err := sc.generateSecret(ctx, token, resourceName, time.Now())
if err != nil {
log.Errorf("Failed to generate secret for proxy %q: %v", proxyID, err)
return nil, err
}
sc.secrets.Store(key, *ns)
return ns, nil
}
// If request is for root certificate,
// retry since rootCert may be empty until there is CSR response returned from CA.
if sc.rootCert == nil
|
if sc.rootCert == nil {
log.Errorf("Failed to get root cert for proxy %q", proxyID)
return nil, errors.New("faied to get root cert")
}
t := time.Now()
ns = &model.SecretItem{
ResourceName: resourceName,
RootCert: sc.rootCert,
Token: token,
CreatedTime: t,
Version: t.String(),
}
sc.secrets.Store(key, *ns)
return ns, nil
}
// SecretExist checks if secret already existed.
func (sc *SecretCache) SecretExist(proxyID, resourceName, token, version string) bool {
key := ConnKey{
ProxyID: proxyID,
ResourceName: resourceName,
}
val, exist := sc.secrets.Load(key)
if !exist {
return false
}
e := val.(model.SecretItem)
if e.ResourceName == resourceName && e.Token == token && e.Version == version {
return true
}
return false
}
// Close shuts down the secret cache.
func (sc *SecretCache) Close() {
sc.closing <- true
}
func (sc *SecretCache) keyCertRotationJob() {
// Wake up once in a while and refresh stale items.
sc.rotationTicker = time.NewTicker(sc.rotationInterval)
for {
select {
case <-sc.rotationTicker.C:
sc.rotate()
case <-sc.closing:
if sc.rotationTicker != nil {
sc.rotationTicker.Stop()
}
}
}
}
func (sc *SecretCache) rotate() {
log.Debug("Refresh job running")
secretMap := map[ConnKey]*model.SecretItem{}
wg := sync.WaitGroup{}
sc.secrets.Range(func(k interface{}, v interface{}) bool {
key := k.(ConnKey)
// skip the refresh if cached item is root cert.
if key.ResourceName == RootCertReqResourceName {
return true
}
proxyID := key.ProxyID
now := time.Now()
e := v.(model.SecretItem)
// Remove stale secrets from cache, this prevent the cache growing indefinitely.
if now.After(e.CreatedTime.Add(sc.evictionDuration)) {
sc.secrets.Delete(key)
return true
}
// Re-generate secret if it's expired.
if sc.shouldRefresh(&e) {
wg.Add(1)
go func() {
defer wg.Done()
if sc.isTokenExpired(&e) {
log.Debugf("Token for %q expired for proxy %q", e.ResourceName, proxyID)
if sc.notifyCallback != nil {
// Send the notification to close the stream connection if both cert and token have expired.
if err := sc.notifyCallback(key.ProxyID, key.ResourceName, nil /*nil indicates close the streaming connection to proxy*/); err != nil {
log.Errorf("Failed to notify for proxy %q: %v", proxyID, err)
}
} else {
log.Warnf("secret cache notify callback isn't set")
}
return
}
// If token is still valid, re-generated the secret and push change to proxy.
// Most likey this code path may not necessary, since TTL of cert is much longer than token.
// When cert has expired, we could make it simple by assuming token has already expired.
ns, err := sc.generateSecret(context.Background(), e.Token, e.ResourceName, now)
if err != nil {
log.Errorf("Failed to generate secret for proxy %q: %v", proxyID, err)
return
}
secretMap[key] = ns
atomic.AddUint64(&sc.secretChangedCount, 1)
if sc.notifyCallback != nil {
if err := sc.notifyCallback(proxyID, key.ResourceName, ns); err != nil {
log.Errorf("Failed to notify secret change for proxy %q: %v", proxyID, err)
}
} else {
log.Warnf("secret cache notify callback isn't set")
}
}()
}
return true
})
wg.Wait()
for key, secret := range secretMap {
sc.secrets.Store(key, *secret)
}
}
func (sc *SecretCache) generateSecret(ctx context.Context, token, resourceName string, t time.Time) (*model.SecretItem, error) {
options := util.CertOptions{
Host: resourceName,
RSAKeySize: keySize,
}
// Generate
|
{
wait := retryWaitDuration
retryNum := 0
for ; retryNum < maxRetryNum; retryNum++ {
time.Sleep(retryWaitDuration)
if sc.rootCert != nil {
break
}
wait *= 2
}
}
|
conditional_block
|
secretcache.go
|
istio.io/istio/security/pkg/nodeagent/model"
"istio.io/istio/security/pkg/pki/util"
)
const (
// The size of a private key for a leaf certificate.
keySize = 2048
// max retry number to wait CSR response come back to parse root cert from it.
maxRetryNum = 5
// initial retry wait time duration when waiting root cert is available.
retryWaitDuration = 200 * time.Millisecond
// RootCertReqResourceName is resource name of discovery request for root certificate.
RootCertReqResourceName = "ROOTCA"
)
// Options provides all of the configuration parameters for secret cache.
type Options struct {
// secret TTL.
SecretTTL time.Duration
// Secret rotation job running interval.
RotationInterval time.Duration
// Secret eviction duration.
EvictionDuration time.Duration
}
// SecretManager defines secrets management interface which is used by SDS.
type SecretManager interface {
// GenerateSecret generates new secret and cache the secret.
GenerateSecret(ctx context.Context, proxyID, resourceName, token string) (*model.SecretItem, error)
// SecretExist checks if secret already existed.
SecretExist(proxyID, resourceName, token, version string) bool
}
// ConnKey is the key of one SDS connection.
type ConnKey struct {
ProxyID string
// ResourceName of SDS request, get from SDS.DiscoveryRequest.ResourceName
// Current it's `ROOTCA` for root cert request, and 'default' for normal key/cert request.
ResourceName string
}
// SecretCache is the in-memory cache for secrets.
type SecretCache struct {
// secrets map is the cache for secrets.
// map key is Envoy instance ID, map value is secretItem.
secrets sync.Map
rotationTicker *time.Ticker
caClient ca.Client
// Cached secret will be removed from cache if (time.now - secretItem.CreatedTime >= evictionDuration), this prevents cache growing indefinitely.
evictionDuration time.Duration
// Key rotation job running interval.
rotationInterval time.Duration
// secret TTL.
secretTTL time.Duration
// How may times that key rotation job has detected secret change happened, used in unit test.
secretChangedCount uint64
// callback function to invoke when detecting secret change.
notifyCallback func(proxyID string, resourceName string, secret *model.SecretItem) error
// Right now always skip the check, since key rotation job checks token expire only when cert has expired;
// since token's TTL is much shorter than the cert, we could skip the check in normal cases.
// The flag is used in unit test, use uint32 instead of boolean because there is no atomic boolean
// type in golang, atomic is needed to avoid racing condition in unit test.
skipTokenExpireCheck uint32
// close channel.
closing chan bool
rootCertMutex *sync.Mutex
rootCert []byte
}
// NewSecretCache creates a new secret cache.
func NewSecretCache(cl ca.Client, notifyCb func(string, string, *model.SecretItem) error, options Options) *SecretCache
|
// GenerateSecret generates new secret and cache the secret, this function is called by SDS.StreamSecrets
// and SDS.FetchSecret. Since credential passing from client may change, regenerate secret every time
// instead of reading from cache.
func (sc *SecretCache) GenerateSecret(ctx context.Context, proxyID, resourceName, token string) (*model.SecretItem, error) {
var ns *model.SecretItem
key := ConnKey{
ProxyID: proxyID,
ResourceName: resourceName,
}
if resourceName != RootCertReqResourceName {
// Request for normal key/cert pair.
ns, err := sc.generateSecret(ctx, token, resourceName, time.Now())
if err != nil {
log.Errorf("Failed to generate secret for proxy %q: %v", proxyID, err)
return nil, err
}
sc.secrets.Store(key, *ns)
return ns, nil
}
// If request is for root certificate,
// retry since rootCert may be empty until there is CSR response returned from CA.
if sc.rootCert == nil {
wait := retryWaitDuration
retryNum := 0
for ; retryNum < maxRetryNum; retryNum++ {
time.Sleep(retryWaitDuration)
if sc.rootCert != nil {
break
}
wait *= 2
}
}
if sc.rootCert == nil {
log.Errorf("Failed to get root cert for proxy %q", proxyID)
return nil, errors.New("faied to get root cert")
}
t := time.Now()
ns = &model.SecretItem{
ResourceName: resourceName,
RootCert: sc.rootCert,
Token: token,
CreatedTime: t,
Version: t.String(),
}
sc.secrets.Store(key, *ns)
return ns, nil
}
// SecretExist checks if secret already existed.
func (sc *SecretCache) SecretExist(proxyID, resourceName, token, version string) bool {
key := ConnKey{
ProxyID: proxyID,
ResourceName: resourceName,
}
val, exist := sc.secrets.Load(key)
if !exist {
return false
}
e := val.(model.SecretItem)
if e.ResourceName == resourceName && e.Token == token && e.Version == version {
return true
}
return false
}
// Close shuts down the secret cache.
func (sc *SecretCache) Close() {
sc.closing <- true
}
func (sc *SecretCache) keyCertRotationJob() {
// Wake up once in a while and refresh stale items.
sc.rotationTicker = time.NewTicker(sc.rotationInterval)
for {
select {
case <-sc.rotationTicker.C:
sc.rotate()
case <-sc.closing:
if sc.rotationTicker != nil {
sc.rotationTicker.Stop()
}
}
}
}
func (sc *SecretCache) rotate() {
log.Debug("Refresh job running")
secretMap := map[ConnKey]*model.SecretItem{}
wg := sync.WaitGroup{}
sc.secrets.Range(func(k interface{}, v interface{}) bool {
key := k.(ConnKey)
// skip the refresh if cached item is root cert.
if key.ResourceName == RootCertReqResourceName {
return true
}
proxyID := key.ProxyID
now := time.Now()
e := v.(model.SecretItem)
// Remove stale secrets from cache, this prevent the cache growing indefinitely.
if now.After(e.CreatedTime.Add(sc.evictionDuration)) {
sc.secrets.Delete(key)
return true
}
// Re-generate secret if it's expired.
if sc.shouldRefresh(&e) {
wg.Add(1)
go func() {
defer wg.Done()
if sc.isTokenExpired(&e) {
log.Debugf("Token for %q expired for proxy %q", e.ResourceName, proxyID)
if sc.notifyCallback != nil {
// Send the notification to close the stream connection if both cert and token have expired.
if err := sc.notifyCallback(key.ProxyID, key.ResourceName, nil /*nil indicates close the streaming connection to proxy*/); err != nil {
log.Errorf("Failed to notify for proxy %q: %v", proxyID, err)
}
} else {
log.Warnf("secret cache notify callback isn't set")
}
return
}
// If token is still valid, re-generated the secret and push change to proxy.
// Most likey this code path may not necessary, since TTL of cert is much longer than token.
// When cert has expired, we could make it simple by assuming token has already expired.
ns, err := sc.generateSecret(context.Background(), e.Token, e.ResourceName, now)
if err != nil {
log.Errorf("Failed to generate secret for proxy %q: %v", proxyID, err)
return
}
secretMap[key] = ns
atomic.AddUint64(&sc.secretChangedCount, 1)
if sc.notifyCallback != nil {
if err := sc.notifyCallback(proxyID, key.ResourceName, ns); err != nil {
log.Errorf("Failed to notify secret change for proxy %q: %v", proxyID, err)
}
} else {
log.Warnf("secret cache notify callback isn't set")
}
}()
}
return true
})
wg.Wait()
for key, secret := range secretMap {
sc.secrets.Store(key, *secret)
}
}
func (sc *SecretCache) generateSecret(ctx context.Context, token, resourceName string, t time.Time) (*model.SecretItem, error) {
options := util.CertOptions{
Host: resourceName,
RSAKeySize: keySize,
}
// Generate
|
{
ret := &SecretCache{
caClient: cl,
closing: make(chan bool),
evictionDuration: options.EvictionDuration,
notifyCallback: notifyCb,
rootCertMutex: &sync.Mutex{},
rotationInterval: options.RotationInterval,
secretTTL: options.SecretTTL,
}
atomic.StoreUint64(&ret.secretChangedCount, 0)
atomic.StoreUint32(&ret.skipTokenExpireCheck, 1)
go ret.keyCertRotationJob()
return ret
}
|
identifier_body
|
main.rs
|
a < 0 {
b = -1;
}
else {
b = 0;
}
println!("b is {}", b);
}
fn f14() {
let a = 3;
let number = if a > 0 { 1 } else { -1 };
println!("number 为 {}", number);
}
fn f15() {
let mut number = 1;
while number != 4 {
println!("{}", number);
number += 1;
}
println!("EXIT");
}
fn f16() {
let a = [10, 20, 30, 40, 50];
for i in a.iter() {
println!("值为 : {}", i);
}
}
fn f17() {
let a = [10, 20, 30, 40, 50];
for i in 0..5 {
println!("a[{}] = {}", i, a[i]);
}
}
fn f18() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
loop {
let ch = s[i];
if ch == 'O' {
break;
}
println!("\'{}\'", ch);
i += 1;
}
}
fn f19() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
let location = loop {
let ch = s[i];
if ch == 'O' {
break i;
}
i += 1;
};
println!(" \'O\' 的索引为 {}", location);
}
fn f20() {
let s1 = String::from("hello");
let s2 = s1.clone();
println!("s1 = {}, s2 = {}", s1, s2);
}
fn f21() {
let s = String::from("hello");
// s 被声明有效
takes_ownership(s);
// s 的值被当作参数传入函数
// 所以可以当作 s 已经被移动,从这里开始已经无效
let x = 5;
// x 被声明有效
makes_copy(x);
// x 的值被当作参数传入函数
// 但 x 是基本类型,依然有效
// 在这里依然可以使用 x 却不能使用 s
} // 函数结束, x 无效, 然后是 s. 但 s 已被移动, 所以不用被释放
fn takes_ownership(some_string: String) {
// 一个 String 参数 some_string 传入,有效
println!("{}", some_string);
} // 函数结束, 参数 some_string 在这里释放
fn makes_copy(some_integer: i32) {
// 一个 i32 参数 some_integer 传入,有效
println!("{}", some_integer);
} // 函数结束, 参数 some_integer 是基本类型, 无需释放
fn f22() {
let s1 = gives_ownership();
// gives_ownership 移动它的返回值到 s1
let s2 = String::from("hello");
// s2 被声明有效
let s3 = takes_and_gives_back(s2);
// s2 被当作参数移动, s3 获得返回值所有权
} // s3 无效被释放, s2 被移动, s1 无效被释放.
fn gives_ownership() -> String {
let some_string = String::from("hello");
// some_string 被声明有效
return some_string;
// some_string 被当作返回值移动出函数
}
fn takes_and_gives_back(a_string: String) -> String {
// a_string 被声明有效
a_string // a_string 被当作返回值移出函数
}
fn f23() {
let s1 = String::from("hello");
let s2 = &s1;
println!("s1 is {}, s2 is {}", s1, s2);
}
fn f24() {
let s1 = String::from("hello");
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
}
fn calculate_length(s: &String) -> usize {
s.len()
}
fn f25() {
let s1 = String::from("hello");
let s2 = s1.clone();
let s3 = s1;
println!("{}", s2);
}
fn f26() {
let s1 = String::from("hello");
let mut s2 = &s1;
let s3 = s2;
s2 = &s3; // 重新从 s3 租借所有权
println!("{}", s2);
}
fn f27() {
let s1 = String::from("run");
let s2 = &s1;
println!("{}", s2);
println!("{}", s2);
}
fn f28() {
let mut s1 = String::from("run");
// s1 是可变的
let s2 = &mut s1;
// s2 是可变的引用
s2.push_str("oob");
println!("{}", s2);
}
fn f29() {
}
fn f30() {
let s = String::from("broadcast");
let part1 = &s[0..5];
let part2 = &s[5..9];
println!("{}={}+{}", s, part1, part2);
}
fn f31() {
let mut s = String::from("runoob");
let slice = &s[0..3];
println!("slice = {}", slice);
}
fn f32() {
let arr = [1, 3, 5, 7, 9];
let part = &arr[0..3];
for i in part.iter() {
println!("{}", i);
}
}
struct Site {
domain: String,
name: String,
nation: String,
found: u32
}
struct Color(u8, u8, u8);
struct Point2(f64, f64);
fn f33() {
struct Color(u8, u8, u8);
struct Point(f64, f64);
let black = Color(0, 0, 0);
let origin = Point(0.0, 0.0);
println!("black = ({}, {}, {})", black.0, black.1, black.2);
println!("origin = ({}, {})", origin.0, origin.1);
}
struct Rectangle2 {
width: u32,
height: u32,
}
fn f34() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1 is {:?}", rect1);
}
struct Rectangle3 {
width: u32,
height: u32,
}
fn f35() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1's area is {}", rect1.area());
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn wider(&self, rect: &Rectangle) -> bool {
self.width > rect.width
}
}
fn f36() {
let rect1 = Rectangle { width: 30, height: 50 };
let rect2 = Rectangle { width: 40, height: 20 }
|
u32,
}
impl Rectangle {
fn create(width: u32, height: u32) -> Rectangle {
Rectangle { width, height }
}
}
fn f37() {
let rect = Rectangle::create(30, 50);
println!("{:?}", rect);
}
#[derive(Debug)]
enum Book {
Papery, Electronic
}
fn f38() {
let book = Book::Papery;
println!("{:?}", book);
}
fn f39() {
enum Book {
Papery {index: u32},
Electronic {url: String},
}
let book = Book::Papery{index: 1001};
let ebook = Book::Electronic{url: String::from("url...")};
match book {
Book::Papery { index } => {
println!("Papery book {}", index);
},
Book::Electronic { url } => {
println!("E-book {}", url);
}
}
}
fn f40() {
let opt = Option::Some("Hello");
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None => {
println!("opt is nothing");
}
}
}
fn f41() {
let opt: Option<&str> = Option::None;
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None
|
;
println!("{}", rect1.wider(&rect2));
}
struct Rectangle4 {
width: u32,
height:
|
identifier_body
|
main.rs
|
b = -1;
}
else {
b = 0;
}
println!("b is {}", b);
}
fn f14() {
let a = 3;
let number = if a > 0 { 1 } else { -1 };
println!("number 为 {}", number);
}
fn f15() {
let mut number = 1;
while number != 4 {
println!("{}", number);
number += 1;
}
println!("EXIT");
}
fn f16() {
let a = [10, 20, 30, 40, 50];
for i in a.iter() {
println!("值为 : {}", i);
}
}
fn f17() {
let a = [10, 20, 30, 40, 50];
for i in 0..5 {
println!("a[{}] = {}", i, a[i]);
}
}
fn f18() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
loop {
let ch = s[i];
if ch == 'O' {
break;
}
println!("\'{}\'", ch);
i += 1;
}
}
fn f19() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
let location = loop {
let ch = s[i];
if ch == 'O' {
break i;
}
i += 1;
};
println!(" \'O\' 的索引为 {}", location);
}
fn f20() {
let s1 = String::from("hello");
let s2 = s1.clone();
println!("s1 = {}, s2 = {}", s1, s2);
}
fn f21() {
let s = String::from("hello");
// s 被声明有效
takes_ownership(s);
// s 的值被当作参数传入函数
// 所以可以当作 s 已经被移动,从这里开始已经无效
let x = 5;
// x 被声明有效
makes_copy(x);
// x 的值被当作参数传入函数
// 但 x 是基本类型,依然有效
// 在这里依然可以使用 x 却不能使用 s
} // 函数结束, x 无效, 然后是 s. 但 s 已被移动, 所以不用被释放
fn takes_ownership(some_string: String) {
// 一个 String 参数 some_string 传入,有效
println!("{}", some_string);
} // 函数结束, 参数 some_string 在这里释放
fn makes_copy(some_integer: i32) {
// 一个 i32 参数 some_integer 传入,有效
println!("{}", some_integer);
} // 函数结束, 参数 some_integer 是基本类型, 无需释放
fn f22() {
let s1 = gives_ownership();
// gives_ownership 移动它的返回值到 s1
let s2 = String::from("hello");
// s2 被声明有效
let s3 = takes_and_gives_back(s2);
// s2 被当作参数移动, s3 获得返回值所有权
} // s3 无效被释放, s2 被移动, s1 无效被释放.
fn gives_ownership() -> String {
let some_string = String::from("hello");
// some_string 被声明有效
return some_string;
// some_string 被当作返回值移动出函数
}
fn takes_and_gives_back(a_string: String) -> String {
// a_string 被声明有效
a_string // a_string 被当作返回值移出函数
}
fn f23() {
let s1 = String::from("hello");
let s2 = &s1;
println!("s1 is {}, s2 is {}", s1, s2);
}
fn f24() {
let s1 = String::from("hello");
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
}
fn calculate_length(s: &String) -> usize {
s.len()
}
fn f25() {
let s1 = String::from("hello");
let s2 = s1.clone();
let s3 = s1;
println!("{}", s2);
}
fn f26() {
let s1 = String::from("hello");
let mut s2 = &s1;
let s3 = s2;
s2 = &s3; // 重新从 s3 租借所有权
println!("{}", s2);
}
fn f27() {
let s1 = String::from("run");
let s2 = &s1;
println!("{}", s2);
println!("{}", s2);
}
fn f28() {
let mut s1 = String::from("run");
// s1 是可变的
let s2 = &mut s1;
// s2 是可变的引用
s2.push_str("oob");
println!("{}", s2);
}
fn f29() {
}
fn f30() {
let s = String::from("broadcast");
let part1 = &s[0..5];
let part2 = &s[5..9];
println!("{}={}+{}", s, part1, part2);
}
fn f31() {
let mut s = String::from("runoob");
let slice = &s[0..3];
println!("slice = {}", slice);
}
fn f32() {
let arr = [1, 3, 5, 7, 9];
let part = &arr[0..3];
for i in part.iter() {
println!("{}", i);
}
}
struct Site {
domain: String,
name: String,
nation: String,
found: u32
}
struct Color(u8, u8, u8);
struct Point2(f64, f64);
fn f33() {
struct Color(u8, u8, u8);
struct Point(f64, f64);
let black = Color(0, 0, 0);
let origin = Point(0.0, 0.0);
println!("black = ({}, {}, {})", black.0, black.1, black.2);
println!("origin = ({}, {})", origin.0, origin.1);
}
struct Rectangle2 {
width: u32,
height: u32,
}
fn f34() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1 is {:?}", rect1);
}
struct Rectangle3 {
width: u32,
height: u32,
}
fn f35() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1's area is {}", rect1.area());
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn wider(&self, rect: &Rectangle) -> bool {
self.width > rect.width
}
}
fn f36() {
let rect1 = Rectangle { width: 30, height: 50 };
let rect2 = Rectangle { width: 40, height: 20 };
println!("{}", rect1.wider(&rect2));
}
struct Rectangle4 {
width: u32,
height: u32,
}
impl Rectangle {
fn create(width: u32, height: u32) -> Rectangle {
Rectangle { width, height }
}
}
fn f37() {
let rect = Rectangle::create(30, 50);
println!("{:?}", rect);
}
#[derive(Debug)]
enum Book {
Papery, Electronic
}
fn f38() {
let book = Book::Papery;
println!("{:?}", book);
}
fn f39() {
enum Book {
Papery {index: u32},
Electronic {url: String},
}
let book = Book::Papery{index: 1001};
let ebook = Book::Electronic{url: String::from("url...")};
match book {
Book::Papery { index } => {
println!("Papery book {}", index);
},
Book::Electronic { url } => {
println!("E-book {}", url);
}
}
}
fn f40() {
let opt = Option::Some("Hello");
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None => {
println!("opt is nothing");
}
}
}
fn f41() {
let opt: Option<&str> = Option::None;
match opt {
Option::Some(something) => {
|
b = 1;
}
else if a < 0 {
|
conditional_block
|
|
main.rs
|
String,
name: String,
nation: String,
found: u32
}
struct Color(u8, u8, u8);
struct Point2(f64, f64);
fn f33() {
struct Color(u8, u8, u8);
struct Point(f64, f64);
let black = Color(0, 0, 0);
let origin = Point(0.0, 0.0);
println!("black = ({}, {}, {})", black.0, black.1, black.2);
println!("origin = ({}, {})", origin.0, origin.1);
}
struct Rectangle2 {
width: u32,
height: u32,
}
fn f34() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1 is {:?}", rect1);
}
struct Rectangle3 {
width: u32,
height: u32,
}
fn f35() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1's area is {}", rect1.area());
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn wider(&self, rect: &Rectangle) -> bool {
self.width > rect.width
}
}
fn f36() {
let rect1 = Rectangle { width: 30, height: 50 };
let rect2 = Rectangle { width: 40, height: 20 };
println!("{}", rect1.wider(&rect2));
}
struct Rectangle4 {
width: u32,
height: u32,
}
impl Rectangle {
fn create(width: u32, height: u32) -> Rectangle {
Rectangle { width, height }
}
}
fn f37() {
let rect = Rectangle::create(30, 50);
println!("{:?}", rect);
}
#[derive(Debug)]
enum Book {
Papery, Electronic
}
fn f38() {
let book = Book::Papery;
println!("{:?}", book);
}
fn f39() {
enum Book {
Papery {index: u32},
Electronic {url: String},
}
let book = Book::Papery{index: 1001};
let ebook = Book::Electronic{url: String::from("url...")};
match book {
Book::Papery { index } => {
println!("Papery book {}", index);
},
Book::Electronic { url } => {
println!("E-book {}", url);
}
}
}
fn f40() {
let opt = Option::Some("Hello");
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None => {
println!("opt is nothing");
}
}
}
fn f41() {
let opt: Option<&str> = Option::None;
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None => {
println!("opt is nothing");
}
}
}
fn f42() {
let t = Some(64);
match t {
Some(64) => println!("Yes"),
_ => println!("No"),
}
}
fn f43() {
enum Book {
Papery(u32),
Electronic(String)
}
let book = Book::Electronic(String::from("url"));
if let Book::Papery(index) = book {
println!("Papery {}", index);
} else {
println!("Not papery book");
}
}
mod nation {
pub mod government {
pub fn govern() {}
}
mod congress {
pub fn legislate() {}
}
mod court {
fn judicial() {
super::congress::legislate();
}
}
}
fn f44() {
nation::government::govern();
}
mod back_of_house {
pub struct Breakfast {
pub toast: String,
seasonal_fruit: String,
}
impl Breakfast {
pub fn summer(toast: &str) -> Breakfast {
Breakfast {
toast: String::from(toast),
seasonal_fruit: String::from("peaches"),
}
}
}
}
pub fn eat_at_restaurant() {
let mut meal = back_of_house::Breakfast::summer("Rye");
meal.toast = String::from("Wheat");
println!("I'd like {} toast please", meal.toast);
}
fn f45() {
eat_at_restaurant()
}
mod SomeModule {
pub enum Person {
King {
name: String
},
Quene
}
}
fn f46() {
let person = SomeModule::Person::King{
name: String::from("Blue")
};
match person {
SomeModule::Person::King {name} => {
println!("{}", name);
}
_ => {}
}
}
fn max2(array: &[i32]) -> i32 {
let mut max_index = 0;
let mut i = 1;
while i < array.len() {
if array[i] > array[max_index] {
max_index = i;
}
i += 1;
}
array[max_index]
}
fn f47() {
let a = [2, 4, 6, 3, 1];
println!("max = {}", max(&a));
}
struct Point<T> {
x: T,
y: T,
}
impl<T> Point<T> {
fn x(&self) -> &T {
&self.x
}
}
fn f48() {
let p = Point { x: 1, y: 2 };
println!("p.x = {}", p.x());
}
trait Descriptive {
fn describe(&self) -> String {
String::from("[Object]")
}
}
struct Person {
name: String,
age: u8
}
impl Descriptive for Person {
fn describe(&self) -> String {
format!("{} {}", self.name, self.age)
}
}
fn f49() {
let cali = Person {
name: String::from("Cali"),
age: 24
};
println!("{}", cali.describe());
}
trait Comparable {
fn compare(&self, object: &Self) -> i8;
}
fn max<T: Comparable>(array: &[T]) -> &T {
let mut max_index = 0;
let mut i = 1;
while i < array.len() {
if array[i].compare(&array[max_index]) > 0 {
max_index = i;
}
i += 1;
}
&array[max_index]
}
impl Comparable for f64 {
fn compare(&self, object: &f64) -> i8 {
if &self > &object { 1 }
else if &self == &object { 0 }
else { -1 }
}
}
impl Comparable for i8 {
fn compare(&self, object: &i8) -> i8 {
if &self > &object { 1 }
else if &self == &object { 0 }
else { -1 }
}
}
fn f50() {
let arr = [1.0, 3.0, 5.0, 4.0, 2.0];
println!("maximum of arr is {}", max(&arr));
}
fn f51() {
let mut vector = vec![1, 2, 4, 8];
vector.push(16);
vector.push(32);
vector.push(64);
println!("{:?}", vector);
}
fn f52() {
let mut v1: Vec<i32> = vec![1, 2, 4, 8];
let mut v2: Vec<i32> = vec![16, 32, 64];
v1.append(&mut v2);
println!("{:?}", v1);
}
fn f53() {
let mut v = vec![1, 2, 4, 8];
println!("{}", match v.get(0) {
Some(value) => value.to_string(),
None => "None".to_string()
});
}
fn f54() {
let v = vec![1, 2, 4, 8];
println!("{}", v[1]);
}
fn f55() {
let v = vec![100, 32, 57];
for i in &v {
println!("{}", i);
}
}
fn f56() {
let mut v = vec![100, 32, 57];
for i in &mut v {
*i += 50;
}
}
fn f57() {
let s = String::from("hello中文");
for c in s.chars() {
println!("{}", c);
}
}
fn f58() {
let s = String::from("EN中文");
let a = s.chars().nth(2);
println!("{:?}", a);
|
}
fn f59() {
let s = String::from("EN中文");
|
random_line_split
|
|
main.rs
|
a < 0 {
b = -1;
}
else {
b = 0;
}
println!("b is {}", b);
}
fn f14() {
let a = 3;
let number = if a > 0 { 1 } else { -1 };
println!("number 为 {}", number);
}
fn f15() {
let mut number = 1;
while number != 4 {
println!("{}", number);
number += 1;
}
println!("EXIT");
}
fn f16() {
let a = [10, 20, 30, 40, 50];
for i in a.iter() {
println!("值为 : {}", i);
}
}
fn f17() {
let a = [10, 20, 30, 40, 50];
for i in 0..5 {
println!("a[{}] = {}", i, a[i]);
}
}
fn f18() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
loop {
let ch = s[i];
if ch == 'O' {
break;
}
println!("\'{}\'", ch);
i += 1;
}
}
fn f19() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
let location = loop {
let ch = s[i];
if ch == 'O' {
break i;
}
i += 1;
};
println!(" \'O\' 的索引为 {}", location);
}
fn f20() {
let s1 = String::from("hello");
let s2 = s1.clone();
println!("s1 = {}, s2 = {}", s1, s2);
}
fn f21() {
let s = String::from("hello");
// s 被声明有效
takes_ownership(s);
// s 的值被当作参数传入函数
// 所以可以当作 s 已经被移动,从这里开始已经无效
let x = 5;
// x 被声明有效
makes_copy(x);
// x 的值被当作参数传入函数
// 但 x 是基本类型,依然有效
// 在这里依然可以使用 x 却不能使用 s
} // 函数结束, x 无效, 然后是 s. 但 s 已被移动, 所以不用被释放
fn takes_ownership(some_string: String) {
// 一个 String 参数 some_string 传入,有效
println!("{}", some_string);
} // 函数结束, 参数 some_string 在这里释放
fn makes_copy(some_integer: i32) {
// 一个 i32 参数 some_integer 传入,有效
println!("{}", some_integer);
} // 函数结束, 参数 some_integer 是基本类型, 无需释放
fn f22() {
let s1 = gives_ownership();
// gives_ownership 移动它的返回值到 s1
let s2 = String::from("hello");
// s2 被声明有效
let s3 = takes_and_gives_back(s2);
// s2 被当作参数移动, s3 获得返回值所有权
} // s3 无效被释放, s2 被移动, s1 无效被释放.
fn gives_ownership() -> St
|
let some_string = String::from("hello");
// some_string 被声明有效
return some_string;
// some_string 被当作返回值移动出函数
}
fn takes_and_gives_back(a_string: String) -> String {
// a_string 被声明有效
a_string // a_string 被当作返回值移出函数
}
fn f23() {
let s1 = String::from("hello");
let s2 = &s1;
println!("s1 is {}, s2 is {}", s1, s2);
}
fn f24() {
let s1 = String::from("hello");
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
}
fn calculate_length(s: &String) -> usize {
s.len()
}
fn f25() {
let s1 = String::from("hello");
let s2 = s1.clone();
let s3 = s1;
println!("{}", s2);
}
fn f26() {
let s1 = String::from("hello");
let mut s2 = &s1;
let s3 = s2;
s2 = &s3; // 重新从 s3 租借所有权
println!("{}", s2);
}
fn f27() {
let s1 = String::from("run");
let s2 = &s1;
println!("{}", s2);
println!("{}", s2);
}
fn f28() {
let mut s1 = String::from("run");
// s1 是可变的
let s2 = &mut s1;
// s2 是可变的引用
s2.push_str("oob");
println!("{}", s2);
}
fn f29() {
}
fn f30() {
let s = String::from("broadcast");
let part1 = &s[0..5];
let part2 = &s[5..9];
println!("{}={}+{}", s, part1, part2);
}
fn f31() {
let mut s = String::from("runoob");
let slice = &s[0..3];
println!("slice = {}", slice);
}
fn f32() {
let arr = [1, 3, 5, 7, 9];
let part = &arr[0..3];
for i in part.iter() {
println!("{}", i);
}
}
struct Site {
domain: String,
name: String,
nation: String,
found: u32
}
struct Color(u8, u8, u8);
struct Point2(f64, f64);
fn f33() {
struct Color(u8, u8, u8);
struct Point(f64, f64);
let black = Color(0, 0, 0);
let origin = Point(0.0, 0.0);
println!("black = ({}, {}, {})", black.0, black.1, black.2);
println!("origin = ({}, {})", origin.0, origin.1);
}
struct Rectangle2 {
width: u32,
height: u32,
}
fn f34() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1 is {:?}", rect1);
}
struct Rectangle3 {
width: u32,
height: u32,
}
fn f35() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1's area is {}", rect1.area());
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn wider(&self, rect: &Rectangle) -> bool {
self.width > rect.width
}
}
fn f36() {
let rect1 = Rectangle { width: 30, height: 50 };
let rect2 = Rectangle { width: 40, height: 20 };
println!("{}", rect1.wider(&rect2));
}
struct Rectangle4 {
width: u32,
height: u32,
}
impl Rectangle {
fn create(width: u32, height: u32) -> Rectangle {
Rectangle { width, height }
}
}
fn f37() {
let rect = Rectangle::create(30, 50);
println!("{:?}", rect);
}
#[derive(Debug)]
enum Book {
Papery, Electronic
}
fn f38() {
let book = Book::Papery;
println!("{:?}", book);
}
fn f39() {
enum Book {
Papery {index: u32},
Electronic {url: String},
}
let book = Book::Papery{index: 1001};
let ebook = Book::Electronic{url: String::from("url...")};
match book {
Book::Papery { index } => {
println!("Papery book {}", index);
},
Book::Electronic { url } => {
println!("E-book {}", url);
}
}
}
fn f40() {
let opt = Option::Some("Hello");
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None => {
println!("opt is nothing");
}
}
}
fn f41() {
let opt: Option<&str> = Option::None;
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None =>
|
ring {
|
identifier_name
|
run_scenario.py
|
else:
dataset_folder += '/text/questions'
text_dataset = True
if text_dataset:
setup = TextNetworkSetup(text_setup, dataset_folder, args, scenario=True, scenario_setup=scenario_setup)
return setup.scenario_loader, setup.q_network
else:
setup = ImageNetworkSetup(image_setup, dataset_folder, args, scenario=True, scenario_setup=scenario_setup)
return setup.scenario_loader, setup.q_network
def add_list(list1, list2, dim=1):
if dim == 1:
for l in range(len(list2)):
list1[l] += list2[l]
elif dim == 2:
for l in range(len(list2)):
for i in range(len(list2[l])):
list1[l][i] += list2[l][i]
def divide_list(list1, iterations, dim=1):
if dim == 1:
for l in range(len(list1)):
list1[l] = float(list1[l]/iterations)
elif dim == 2:
for l in range(len(list1)):
for i in range(len(list1[l])):
list1[l][i] = float(list1[l][i]/iterations)
def bar_plot(lists, bar_type, name, labels, size, ylabel="", display=True):
plot_list = []
for i in range(len(lists)):
if type(lists[i]) != type(0.2):
plot_list.append([])
for j in range(len(lists[i])):
plot_list[i].append(abs(lists[i][j]))
else:
plot_list.append(abs(lists[i]))
fig, ax = plt.subplots()
ax.yaxis.grid(True)
colors = ["red", "green", "blue", "yellow", "magenta", "white", "grey"]
if "Percentage" in bar_type:
if scenario_type == 0:
x1 = np.arange(1, scenario_size + 1)
plt.bar(x1, plot_list[0:scenario_size], color=colors[0], label=labels[0], edgecolor="black")
for i in range(0, len(plot_list) - scenario_size):
plt.bar(scenario_size + i + 1, plot_list[scenario_size + i], color=colors[i+1],
label=labels[i+1], edgecolor="black")
elif scenario_type == 1:
x_curr = 0
for i in range(0, classes):
if i == class_choice:
x = np.arange(i + 1, i + 1 + scenario_size)
x_curr += scenario_size
y = plot_list[i : i + scenario_size]
else:
x = x_curr + 1
y = plot_list[x_curr]
x_curr += 1
plt.bar(x, y, color=colors[i], label=labels[i], edgecolor="black")
elif scenario_type == 2:
for i in range(classes):
x = np.arange(int(i*scenario_size), int((i+1)*scenario_size))
plt.bar(x, plot_list[int(i*scenario_size) : int((i+1)*scenario_size)], color=colors[i], label=labels[i],
edgecolor="black")
else:
for i in range(0, classes):
plt.bar(i + 1, plot_list[i], color=colors[i], label=labels[i], edgecolor="black")
x1 = np.arange(classes + 1, classes + scenario_size)
plt.bar(x1, plot_list[classes:], color=colors[class_choice], edgecolor="black")
if len(ylabel) > 0:
plt.ylabel(ylabel)
else:
plt.ylabel("% Label Requests")
else:
np_lists = np.array(plot_list).transpose()
x = np.arange(1, len(plot_list) + 1)
bottom_list = []
for i in range(len(np_lists)):
if len(bottom_list) == 0:
plt.bar(x, np_lists[i], color=colors[i], label=labels[i], edgecolor="black")
bottom_list = np_lists[i]
else:
plt.bar(x, np_lists[i], bottom=bottom_list, color=colors[i], label=labels[i], edgecolor="black")
bottom_list += np_lists[i]
plt.ylabel("Class Q-value")
plt.legend(loc=9)
plt.title("ReinforcementLearning Scenario")
plt.xlabel("Time step")
plt.ylim((0, 1))
if not os.path.exists(directory + name):
os.makedirs(directory + name)
plt.savefig(directory + name + bar_type + "_" + str(size) + ".png")
if display:
plt.show()
def get_pretrained_models():
result_folder = 'pretrained/'
pretrained_models = []
if os.path.exists(result_folder):
for root, dirs, files in os.walk(result_folder):
for dir in dirs:
pretrained_models.append(dir)
return pretrained_models
def get_selected_model():
n = 25
print('Models:\n')
for i in range(n):
print(str(i) + ': ' + pretrained_models[i] + '\n')
selected_model = input('Select model to run scenario for [0-N]:\n')
while True:
try:
selected_model_index = int(selected_model)
if selected_model_index < len(pretrained_models):
choice = pretrained_models[selected_model_index] + '/'
model = 'lstm'
if 'ntm' in choice:
model = 'ntm'
elif 'lrua' in choice:
model = 'lrua'
return choice, model
else:
print('Selected model was not in list!')
selected_model = input('Select model to run scenario for [0-N]:\n')
except:
print('Selected model must be an integer!')
selected_model = input('Select model to run scenario for [0-N]:\n')
def get_scenario():
scenarios = ['Meta Scenario', 'Zero Shot Scenario', 'K Shot Scenario', 'One Shot Scenario', 'All scenarios']
for i in range(len(scenarios)):
print(str(i) + ': ' + scenarios[i] + '\n')
return get_integer_input('Select scenario to run [0-N]:\n', 'scenario', len(scenarios))
def get_data_set():
for i in range(len(data_sets)):
print(str(i) + ': ' + data_sets[i] + '\n')
return get_integer_input('Select data set to test on:\n', 'data set', len(data_sets))
def get_integer_input(msg, object, limit):
selected_class = input(msg)
while True:
try:
selected_class_int = int(selected_class)
if selected_class_int < limit:
return selected_class_int
else:
print('Selected ' + object + ' doesn\'t exist!')
selected_class = input(msg)
except:
print('Selected ' + object + ' must be an integer!')
selected_class = input(msg)
class Args:
def __init__(self, setup):
self.class_vector_size = setup['class_vector_size']
self.episode_size = setup['episode_size']
self.scenario_size = setup['scenario_size']
self.GLOVE = setup['GLOVE']
self.scenario_batch_size = setup['scenario_batch_size']
self.batch_size = setup['batch_size']
self.cuda = setup['cuda']
self.train = setup['train']
self.embedding_size = setup['embedding_size']
self.sentence_length = setup['sentence_length']
self.number_of_sentences = setup['number_of_sentences']
self.LSTM = setup['LSTM']
self.NTM = setup['NTM']
self.LRUA = setup['LRUA']
if __name__ == '__main__':
data_sets = ['OMNIGLOT', 'MNIST', 'INH', 'REUTERS', 'QA']
directory = "results/plots/"
nof_scenarios = 1
pretrained_models = get_pretrained_models()
chosen_model_to_train, model_type = get_selected_model()
scenario_type = get_scenario()
if scenario_type == 4:
nof_scenarios = 4
classes = get_integer_input('Set number of classes to train on (Can not be more than what the model is trained on)'
' [2-5]:\n', 'number of classes', 5)
class_choice = get_integer_input('Select class to test on [0-' + str(classes-1) + ']:\n', 'class', classes)
batch_size = get_integer_input('Set batch size:\n', 'batch size', 256)
scenario_size = get_integer_input('Set scenario size:\n', 'scenario', 30)
dataset = get_data_set()
|
dataset_folder = 'data'
# Collecting static image setup
image_setup = ImageModelSetup(False, 0, 0)
# Collecting static text setup
text_dataset = False
text_setup = TextModelSetup(False, 0, 0, args.embedding_size, args.sentence_length)
# Creating setup based on data-set
if dataset == 0:
dataset_folder += '/images/omniglot'
elif dataset == 1:
dataset_folder += '/images/mnist'
elif dataset == 2:
dataset_folder += '/text/headlines'
text_dataset = True
elif dataset == 3:
dataset_folder += '/text/reuters'
text_dataset = True
|
identifier_body
|
|
run_scenario.py
|
def add_list(list1, list2, dim=1):
if dim == 1:
for l in range(len(list2)):
list1[l] += list2[l]
elif dim == 2:
for l in range(len(list2)):
for i in range(len(list2[l])):
list1[l][i] += list2[l][i]
def divide_list(list1, iterations, dim=1):
if dim == 1:
for l in range(len(list1)):
list1[l] = float(list1[l]/iterations)
elif dim == 2:
for l in range(len(list1)):
for i in range(len(list1[l])):
list1[l][i] = float(list1[l][i]/iterations)
def bar_plot(lists, bar_type, name, labels, size, ylabel="", display=True):
plot_list = []
for i in range(len(lists)):
if type(lists[i]) != type(0.2):
plot_list.append([])
for j in range(len(lists[i])):
plot_list[i].append(abs(lists[i][j]))
else:
plot_list.append(abs(lists[i]))
fig, ax = plt.subplots()
ax.yaxis.grid(True)
colors = ["red", "green", "blue", "yellow", "magenta", "white", "grey"]
if "Percentage" in bar_type:
if scenario_type == 0:
x1 = np.arange(1, scenario_size + 1)
plt.bar(x1, plot_list[0:scenario_size], color=colors[0], label=labels[0], edgecolor="black")
for i in range(0, len(plot_list) - scenario_size):
plt.bar(scenario_size + i + 1, plot_list[scenario_size + i], color=colors[i+1],
label=labels[i+1], edgecolor="black")
elif scenario_type == 1:
x_curr = 0
for i in range(0, classes):
if i == class_choice:
x = np.arange(i + 1, i + 1 + scenario_size)
x_curr += scenario_size
y = plot_list[i : i + scenario_size]
else:
x = x_curr + 1
y = plot_list[x_curr]
x_curr += 1
plt.bar(x, y, color=colors[i], label=labels[i], edgecolor="black")
elif scenario_type == 2:
for i in range(classes):
x = np.arange(int(i*scenario_size), int((i+1)*scenario_size))
plt.bar(x, plot_list[int(i*scenario_size) : int((i+1)*scenario_size)], color=colors[i], label=labels[i],
edgecolor="black")
else:
for i in range(0, classes):
plt.bar(i + 1, plot_list[i], color=colors[i], label=labels[i], edgecolor="black")
x1 = np.arange(classes + 1, classes + scenario_size)
plt.bar(x1, plot_list[classes:], color=colors[class_choice], edgecolor="black")
if len(ylabel) > 0:
plt.ylabel(ylabel)
else:
plt.ylabel("% Label Requests")
else:
np_lists = np.array(plot_list).transpose()
x = np.arange(1, len(plot_list) + 1)
bottom_list = []
for i in range(len(np_lists)):
|
plt.ylabel("Class Q-value")
plt.legend(loc=9)
plt.title("ReinforcementLearning Scenario")
plt.xlabel("Time step")
plt.ylim((0, 1))
if not os.path.exists(directory + name):
os.makedirs(directory + name)
plt.savefig(directory + name + bar_type + "_" + str(size) + ".png")
if display:
plt.show()
def get_pretrained_models():
result_folder = 'pretrained/'
pretrained_models = []
if os.path.exists(result_folder):
for root, dirs, files in os.walk(result_folder):
for dir in dirs:
pretrained_models.append(dir)
return pretrained_models
def get_selected_model():
n = 25
print('Models:\n')
for i in range(n):
print(str(i) + ': ' + pretrained_models[i] + '\n')
selected_model = input('Select model to run scenario for [0-N]:\n')
while True:
try:
selected_model_index = int(selected_model)
if selected_model_index < len(pretrained_models):
choice = pretrained_models[selected_model_index] + '/'
model = 'lstm'
if 'ntm' in choice:
model = 'ntm'
elif 'lrua' in choice:
model = 'lrua'
return choice, model
else:
print('Selected model was not in list!')
selected_model = input('Select model to run scenario for [0-N]:\n')
except:
print('Selected model must be an integer!')
selected_model = input('Select model to run scenario for [0-N]:\n')
def get_scenario():
scenarios = ['Meta Scenario', 'Zero Shot Scenario', 'K Shot Scenario', 'One Shot Scenario', 'All scenarios']
for i in range(len(scenarios)):
print(str(i) + ': ' + scenarios[i] + '\n')
return get_integer_input('Select scenario to run [0-N]:\n', 'scenario', len(scenarios))
def get_data_set():
for i in range(len(data_sets)):
print(str(i) + ': ' + data_sets[i] + '\n')
return get_integer_input('Select data set to test on:\n', 'data set', len(data_sets))
def get_integer_input(msg, object, limit):
selected_class = input(msg)
while True:
try:
selected_class_int = int(selected_class)
if selected_class_int < limit:
return selected_class_int
else:
print('Selected ' + object + ' doesn\'t exist!')
selected_class = input(msg)
except:
print('Selected ' + object + ' must be an integer!')
selected_class = input(msg)
class Args:
def __init__(self, setup):
self.class_vector_size = setup['class_vector_size']
self.episode_size = setup['episode_size']
self.scenario_size = setup['scenario_size']
self.GLOVE = setup['GLOVE']
self.scenario_batch_size = setup['scenario_batch_size']
self.batch_size = setup['batch_size']
self.cuda = setup['cuda']
self.train = setup['train']
self.embedding_size = setup['embedding_size']
self.sentence_length = setup['sentence_length']
self.number_of_sentences = setup['number_of_sentences']
self.LSTM = setup['LSTM']
self.NTM = setup['NTM']
self.LRUA = setup['LRUA']
if __name__ == '__main__':
data_sets = ['OMNIGLOT', 'MNIST', 'INH', 'REUTERS', 'QA']
directory = "results/plots/"
nof_scenarios = 1
pretrained_models = get_pretrained_models()
chosen_model_to_train, model_type = get_selected_model()
scenario_type = get_scenario()
if scenario_type == 4:
nof_scenarios = 4
classes = get_integer_input('Set number of classes to train on (Can not be more than what the model is trained on)'
' [2-5]:\n', 'number of classes', 5)
class_choice = get_integer_input('Select class to test on [0-' + str(classes-1) + ']:\n', 'class', classes)
batch_size = get_integer_input('Set batch size:\n', 'batch size', 256)
scenario_size = get_integer_input('Set scenario size:\n', 'scenario', 30)
dataset = get_data_set()
model_choice = [0, 0, 0]
if model_type == 'lstm':
model_choice[0] = 1
elif model_type == 'ntm':
model_choice[1] = 1
else:
model_choice[2] = 1
checkpoint = 'pretrained/' + chosen_model_to_train + 'best.pth.tar'
# Training or test dataset
TRAIN = True
# TEXT AND MODEL DETAILS:
EMBEDDING_SIZE = 100
SENTENCE_LENGTH = 6
NUMBER_OF_SENTENCES = 1
DICTIONARY_MAX_SIZE = 10000
argument_setup = {
'class_vector_size': 3,
'episode_size': 0,
'scenario_size': scenario_size,
'GLOVE': True,
'scenario_batch_size': batch_size,
'batch_size': batch_size,
'cuda': False,
'train': TRAIN,
'embedding_size': EMBEDDING_SIZE,
'sentence_length': SENTENCE_LENGTH,
'number_of_sentences': NUMBER_OF_SENTENCES,
'LSTM
|
if len(bottom_list) == 0:
plt.bar(x, np_lists[i], color=colors[i], label=labels[i], edgecolor="black")
bottom_list = np_lists[i]
else:
plt.bar(x, np_lists[i], bottom=bottom_list, color=colors[i], label=labels[i], edgecolor="black")
bottom_list += np_lists[i]
|
conditional_block
|
run_scenario.py
|
def add_list(list1, list2, dim=1):
if dim == 1:
for l in range(len(list2)):
list1[l] += list2[l]
elif dim == 2:
for l in range(len(list2)):
for i in range(len(list2[l])):
list1[l][i] += list2[l][i]
def divide_list(list1, iterations, dim=1):
if dim == 1:
for l in range(len(list1)):
list1[l] = float(list1[l]/iterations)
elif dim == 2:
for l in range(len(list1)):
for i in range(len(list1[l])):
list1[l][i] = float(list1[l][i]/iterations)
def bar_plot(lists, bar_type, name, labels, size, ylabel="", display=True):
plot_list = []
for i in range(len(lists)):
if type(lists[i]) != type(0.2):
plot_list.append([])
for j in range(len(lists[i])):
plot_list[i].append(abs(lists[i][j]))
else:
plot_list.append(abs(lists[i]))
fig, ax = plt.subplots()
ax.yaxis.grid(True)
colors = ["red", "green", "blue", "yellow", "magenta", "white", "grey"]
if "Percentage" in bar_type:
if scenario_type == 0:
x1 = np.arange(1, scenario_size + 1)
plt.bar(x1, plot_list[0:scenario_size], color=colors[0], label=labels[0], edgecolor="black")
for i in range(0, len(plot_list) - scenario_size):
plt.bar(scenario_size + i + 1, plot_list[scenario_size + i], color=colors[i+1],
label=labels[i+1], edgecolor="black")
elif scenario_type == 1:
x_curr = 0
for i in range(0, classes):
if i == class_choice:
x = np.arange(i + 1, i + 1 + scenario_size)
x_curr += scenario_size
y = plot_list[i : i + scenario_size]
else:
x = x_curr + 1
y = plot_list[x_curr]
x_curr += 1
plt.bar(x, y, color=colors[i], label=labels[i], edgecolor="black")
elif scenario_type == 2:
for i in range(classes):
x = np.arange(int(i*scenario_size), int((i+1)*scenario_size))
plt.bar(x, plot_list[int(i*scenario_size) : int((i+1)*scenario_size)], color=colors[i], label=labels[i],
edgecolor="black")
else:
for i in range(0, classes):
plt.bar(i + 1, plot_list[i], color=colors[i], label=labels[i], edgecolor="black")
x1 = np.arange(classes + 1, classes + scenario_size)
plt.bar(x1, plot_list[classes:], color=colors[class_choice], edgecolor="black")
if len(ylabel) > 0:
plt.ylabel(ylabel)
else:
plt.ylabel("% Label Requests")
else:
np_lists = np.array(plot_list).transpose()
x = np.arange(1, len(plot_list) + 1)
bottom_list = []
for i in range(len(np_lists)):
if len(bottom_list) == 0:
plt.bar(x, np_lists[i], color=colors[i], label=labels[i], edgecolor="black")
bottom_list = np_lists[i]
else:
plt.bar(x, np_lists[i], bottom=bottom_list, color=colors[i], label=labels[i], edgecolor="black")
bottom_list += np_lists[i]
plt.ylabel("Class Q-value")
plt.legend(loc=9)
plt.title("ReinforcementLearning Scenario")
plt.xlabel("Time step")
plt.ylim((0, 1))
if not os.path.exists(directory + name):
os.makedirs(directory + name)
plt.savefig(directory + name + bar_type + "_" + str(size) + ".png")
if display:
plt.show()
def get_pretrained_models():
result_folder = 'pretrained/'
pretrained_models = []
if os.path.exists(result_folder):
for root, dirs, files in os.walk(result_folder):
for dir in dirs:
pretrained_models.append(dir)
return pretrained_models
def get_selected_model():
n = 25
print('Models:\n')
for i in range(n):
print(str(i) + ': ' + pretrained_models[i] + '\n')
selected_model = input('Select model to run scenario for [0-N]:\n')
while True:
try:
selected_model_index = int(selected_model)
if selected_model_index < len(pretrained_models):
choice = pretrained_models[selected_model_index] + '/'
model = 'lstm'
if 'ntm' in choice:
model = 'ntm'
elif 'lrua' in choice:
model = 'lrua'
return choice, model
else:
print('Selected model was not in list!')
selected_model = input('Select model to run scenario for [0-N]:\n')
except:
print('Selected model must be an integer!')
selected_model = input('Select model to run scenario for [0-N]:\n')
def
|
():
scenarios = ['Meta Scenario', 'Zero Shot Scenario', 'K Shot Scenario', 'One Shot Scenario', 'All scenarios']
for i in range(len(scenarios)):
print(str(i) + ': ' + scenarios[i] + '\n')
return get_integer_input('Select scenario to run [0-N]:\n', 'scenario', len(scenarios))
def get_data_set():
for i in range(len(data_sets)):
print(str(i) + ': ' + data_sets[i] + '\n')
return get_integer_input('Select data set to test on:\n', 'data set', len(data_sets))
def get_integer_input(msg, object, limit):
selected_class = input(msg)
while True:
try:
selected_class_int = int(selected_class)
if selected_class_int < limit:
return selected_class_int
else:
print('Selected ' + object + ' doesn\'t exist!')
selected_class = input(msg)
except:
print('Selected ' + object + ' must be an integer!')
selected_class = input(msg)
class Args:
def __init__(self, setup):
self.class_vector_size = setup['class_vector_size']
self.episode_size = setup['episode_size']
self.scenario_size = setup['scenario_size']
self.GLOVE = setup['GLOVE']
self.scenario_batch_size = setup['scenario_batch_size']
self.batch_size = setup['batch_size']
self.cuda = setup['cuda']
self.train = setup['train']
self.embedding_size = setup['embedding_size']
self.sentence_length = setup['sentence_length']
self.number_of_sentences = setup['number_of_sentences']
self.LSTM = setup['LSTM']
self.NTM = setup['NTM']
self.LRUA = setup['LRUA']
if __name__ == '__main__':
data_sets = ['OMNIGLOT', 'MNIST', 'INH', 'REUTERS', 'QA']
directory = "results/plots/"
nof_scenarios = 1
pretrained_models = get_pretrained_models()
chosen_model_to_train, model_type = get_selected_model()
scenario_type = get_scenario()
if scenario_type == 4:
nof_scenarios = 4
classes = get_integer_input('Set number of classes to train on (Can not be more than what the model is trained on)'
' [2-5]:\n', 'number of classes', 5)
class_choice = get_integer_input('Select class to test on [0-' + str(classes-1) + ']:\n', 'class', classes)
batch_size = get_integer_input('Set batch size:\n', 'batch size', 256)
scenario_size = get_integer_input('Set scenario size:\n', 'scenario', 30)
dataset = get_data_set()
model_choice = [0, 0, 0]
if model_type == 'lstm':
model_choice[0] = 1
elif model_type == 'ntm':
model_choice[1] = 1
else:
model_choice[2] = 1
checkpoint = 'pretrained/' + chosen_model_to_train + 'best.pth.tar'
# Training or test dataset
TRAIN = True
# TEXT AND MODEL DETAILS:
EMBEDDING_SIZE = 100
SENTENCE_LENGTH = 6
NUMBER_OF_SENTENCES = 1
DICTIONARY_MAX_SIZE = 10000
argument_setup = {
'class_vector_size': 3,
'episode_size': 0,
'scenario_size': scenario_size,
'GLOVE': True,
'scenario_batch_size': batch_size,
'batch_size': batch_size,
'cuda': False,
'train': TRAIN,
'embedding_size': EMBEDDING_SIZE,
'sentence_length': SENTENCE_LENGTH,
'number_of_sentences': NUMBER_OF_SENTENCES,
'LSTM
|
get_scenario
|
identifier_name
|
run_scenario.py
|
def add_list(list1, list2, dim=1):
if dim == 1:
for l in range(len(list2)):
list1[l] += list2[l]
elif dim == 2:
for l in range(len(list2)):
for i in range(len(list2[l])):
list1[l][i] += list2[l][i]
def divide_list(list1, iterations, dim=1):
if dim == 1:
for l in range(len(list1)):
list1[l] = float(list1[l]/iterations)
elif dim == 2:
for l in range(len(list1)):
for i in range(len(list1[l])):
list1[l][i] = float(list1[l][i]/iterations)
def bar_plot(lists, bar_type, name, labels, size, ylabel="", display=True):
plot_list = []
for i in range(len(lists)):
if type(lists[i]) != type(0.2):
plot_list.append([])
for j in range(len(lists[i])):
plot_list[i].append(abs(lists[i][j]))
else:
plot_list.append(abs(lists[i]))
fig, ax = plt.subplots()
ax.yaxis.grid(True)
colors = ["red", "green", "blue", "yellow", "magenta", "white", "grey"]
if "Percentage" in bar_type:
if scenario_type == 0:
x1 = np.arange(1, scenario_size + 1)
plt.bar(x1, plot_list[0:scenario_size], color=colors[0], label=labels[0], edgecolor="black")
for i in range(0, len(plot_list) - scenario_size):
plt.bar(scenario_size + i + 1, plot_list[scenario_size + i], color=colors[i+1],
label=labels[i+1], edgecolor="black")
elif scenario_type == 1:
x_curr = 0
for i in range(0, classes):
if i == class_choice:
x = np.arange(i + 1, i + 1 + scenario_size)
x_curr += scenario_size
y = plot_list[i : i + scenario_size]
else:
x = x_curr + 1
y = plot_list[x_curr]
x_curr += 1
plt.bar(x, y, color=colors[i], label=labels[i], edgecolor="black")
elif scenario_type == 2:
for i in range(classes):
x = np.arange(int(i*scenario_size), int((i+1)*scenario_size))
plt.bar(x, plot_list[int(i*scenario_size) : int((i+1)*scenario_size)], color=colors[i], label=labels[i],
edgecolor="black")
else:
for i in range(0, classes):
plt.bar(i + 1, plot_list[i], color=colors[i], label=labels[i], edgecolor="black")
x1 = np.arange(classes + 1, classes + scenario_size)
plt.bar(x1, plot_list[classes:], color=colors[class_choice], edgecolor="black")
if len(ylabel) > 0:
plt.ylabel(ylabel)
else:
plt.ylabel("% Label Requests")
else:
np_lists = np.array(plot_list).transpose()
x = np.arange(1, len(plot_list) + 1)
bottom_list = []
for i in range(len(np_lists)):
if len(bottom_list) == 0:
plt.bar(x, np_lists[i], color=colors[i], label=labels[i], edgecolor="black")
bottom_list = np_lists[i]
else:
plt.bar(x, np_lists[i], bottom=bottom_list, color=colors[i], label=labels[i], edgecolor="black")
bottom_list += np_lists[i]
plt.ylabel("Class Q-value")
plt.legend(loc=9)
plt.title("ReinforcementLearning Scenario")
plt.xlabel("Time step")
plt.ylim((0, 1))
if not os.path.exists(directory + name):
os.makedirs(directory + name)
plt.savefig(directory + name + bar_type + "_" + str(size) + ".png")
if display:
plt.show()
def get_pretrained_models():
result_folder = 'pretrained/'
pretrained_models = []
if os.path.exists(result_folder):
for root, dirs, files in os.walk(result_folder):
for dir in dirs:
pretrained_models.append(dir)
return pretrained_models
def get_selected_model():
n = 25
print('Models:\n')
for i in range(n):
print(str(i) + ': ' + pretrained_models[i] + '\n')
selected_model = input('Select model to run scenario for [0-N]:\n')
while True:
try:
selected_model_index = int(selected_model)
if selected_model_index < len(pretrained_models):
choice = pretrained_models[selected_model_index] + '/'
model = 'lstm'
if 'ntm' in choice:
model = 'ntm'
elif 'lrua' in choice:
model = 'lrua'
return choice, model
else:
print('Selected model was not in list!')
selected_model = input('Select model to run scenario for [0-N]:\n')
except:
print('Selected model must be an integer!')
selected_model = input('Select model to run scenario for [0-N]:\n')
def get_scenario():
scenarios = ['Meta Scenario', 'Zero Shot Scenario', 'K Shot Scenario', 'One Shot Scenario', 'All scenarios']
for i in range(len(scenarios)):
print(str(i) + ': ' + scenarios[i] + '\n')
return get_integer_input('Select scenario to run [0-N]:\n', 'scenario', len(scenarios))
def get_data_set():
for i in range(len(data_sets)):
print(str(i) + ': ' + data_sets[i] + '\n')
return get_integer_input('Select data set to test on:\n', 'data set', len(data_sets))
def get_integer_input(msg, object, limit):
selected_class = input(msg)
while True:
try:
selected_class_int = int(selected_class)
if selected_class_int < limit:
return selected_class_int
else:
print('Selected ' + object + ' doesn\'t exist!')
selected_class = input(msg)
except:
print('Selected ' + object + ' must be an integer!')
selected_class = input(msg)
class Args:
def __init__(self, setup):
self.class_vector_size = setup['class_vector_size']
self.episode_size = setup['episode_size']
self.scenario_size = setup['scenario_size']
self.GLOVE = setup['GLOVE']
self.scenario_batch_size = setup['scenario_batch_size']
self.batch_size = setup['batch_size']
self.cuda = setup['cuda']
self.train = setup['train']
self.embedding_size = setup['embedding_size']
self.sentence_length = setup['sentence_length']
self.number_of_sentences = setup['number_of_sentences']
|
if __name__ == '__main__':
data_sets = ['OMNIGLOT', 'MNIST', 'INH', 'REUTERS', 'QA']
directory = "results/plots/"
nof_scenarios = 1
pretrained_models = get_pretrained_models()
chosen_model_to_train, model_type = get_selected_model()
scenario_type = get_scenario()
if scenario_type == 4:
nof_scenarios = 4
classes = get_integer_input('Set number of classes to train on (Can not be more than what the model is trained on)'
' [2-5]:\n', 'number of classes', 5)
class_choice = get_integer_input('Select class to test on [0-' + str(classes-1) + ']:\n', 'class', classes)
batch_size = get_integer_input('Set batch size:\n', 'batch size', 256)
scenario_size = get_integer_input('Set scenario size:\n', 'scenario', 30)
dataset = get_data_set()
model_choice = [0, 0, 0]
if model_type == 'lstm':
model_choice[0] = 1
elif model_type == 'ntm':
model_choice[1] = 1
else:
model_choice[2] = 1
checkpoint = 'pretrained/' + chosen_model_to_train + 'best.pth.tar'
# Training or test dataset
TRAIN = True
# TEXT AND MODEL DETAILS:
EMBEDDING_SIZE = 100
SENTENCE_LENGTH = 6
NUMBER_OF_SENTENCES = 1
DICTIONARY_MAX_SIZE = 10000
argument_setup = {
'class_vector_size': 3,
'episode_size': 0,
'scenario_size': scenario_size,
'GLOVE': True,
'scenario_batch_size': batch_size,
'batch_size': batch_size,
'cuda': False,
'train': TRAIN,
'embedding_size': EMBEDDING_SIZE,
'sentence_length': SENTENCE_LENGTH,
'number_of_sentences': NUMBER_OF_SENTENCES,
'LSTM
|
self.LSTM = setup['LSTM']
self.NTM = setup['NTM']
self.LRUA = setup['LRUA']
|
random_line_split
|
wakers.rs
|
Future;
use core::task::{Context, Poll};
use core::pin::Pin;
/// Used to signal to one of many waiters that the condition they're waiting on has happened.
pub(crate) struct Notifier {
notify_pending: Mutex<(bool, Option<Arc<Mutex<FutureState>>>)>,
condvar: Condvar,
}
impl Notifier {
pub(crate) fn new() -> Self {
Self {
notify_pending: Mutex::new((false, None)),
condvar: Condvar::new(),
}
}
pub(crate) fn wait(&self) {
loop {
let mut guard = self.notify_pending.lock().unwrap();
if guard.0 {
guard.0 = false;
return;
}
guard = self.condvar.wait(guard).unwrap();
let result = guard.0;
if result {
guard.0 = false;
return
}
}
}
#[cfg(any(test, feature = "std"))]
pub(crate) fn wait_timeout(&self, max_wait: Duration) -> bool {
let current_time = Instant::now();
loop {
let mut guard = self.notify_pending.lock().unwrap();
if guard.0 {
guard.0 = false;
return true;
}
guard = self.condvar.wait_timeout(guard, max_wait).unwrap().0;
// Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
// desired wait time has actually passed, and if not then restart the loop with a reduced wait
// time. Note that this logic can be highly simplified through the use of
// `Condvar::wait_while` and `Condvar::wait_timeout_while`, if and when our MSRV is raised to
// 1.42.0.
let elapsed = current_time.elapsed();
let result = guard.0;
if result || elapsed >= max_wait {
guard.0 = false;
return result;
}
match max_wait.checked_sub(elapsed) {
None => return result,
Some(_) => continue
}
}
}
/// Wake waiters, tracking that wake needs to occur even if there are currently no waiters.
pub(crate) fn notify(&self) {
let mut lock = self.notify_pending.lock().unwrap();
lock.0 = true;
if let Some(future_state) = lock.1.take() {
future_state.lock().unwrap().complete();
}
mem::drop(lock);
self.condvar.notify_all();
}
/// Gets a [`Future`] that will get woken up with any waiters
pub(crate) fn get_future(&self) -> Future {
let mut lock = self.notify_pending.lock().unwrap();
if lock.0 {
Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
}
} else if let Some(existing_state) = &lock.1 {
Future { state: Arc::clone(&existing_state) }
} else {
let state = Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}));
lock.1 = Some(Arc::clone(&state));
Future { state }
}
}
#[cfg(any(test, feature = "_test_utils"))]
pub fn notify_pending(&self) -> bool {
self.notify_pending.lock().unwrap().0
}
}
/// A callback which is called when a [`Future`] completes.
///
/// Note that this MUST NOT call back into LDK directly, it must instead schedule actions to be
/// taken later. Rust users should use the [`std::future::Future`] implementation for [`Future`]
/// instead.
///
/// Note that the [`std::future::Future`] implementation may only work for runtimes which schedule
/// futures when they receive a wake, rather than immediately executing them.
pub trait FutureCallback : Send {
/// The method which is called.
fn call(&self);
}
impl<F: Fn() + Send> FutureCallback for F {
fn call(&self) { (self)(); }
}
pub(crate) struct FutureState {
callbacks: Vec<Box<dyn FutureCallback>>,
complete: bool,
}
impl FutureState {
fn complete(&mut self) {
for callback in self.callbacks.drain(..) {
callback.call();
}
self.complete = true;
}
}
/// A simple future which can complete once, and calls some callback(s) when it does so.
pub struct Future {
state: Arc<Mutex<FutureState>>,
}
impl Future {
/// Registers a callback to be called upon completion of this future. If the future has already
/// completed, the callback will be called immediately.
pub fn register_callback(&self, callback: Box<dyn FutureCallback>) {
let mut state = self.state.lock().unwrap();
if state.complete {
mem::drop(state);
callback.call();
|
}
}
mod std_future {
use core::task::Waker;
pub struct StdWaker(pub Waker);
impl super::FutureCallback for StdWaker {
fn call(&self) { self.0.wake_by_ref() }
}
}
/// (C-not exported) as Rust Futures aren't usable in language bindings.
impl<'a> StdFuture for Future {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut state = self.state.lock().unwrap();
if state.complete {
Poll::Ready(())
} else {
let waker = cx.waker().clone();
state.callbacks.push(Box::new(std_future::StdWaker(waker)));
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use core::sync::atomic::{AtomicBool, Ordering};
use core::future::Future as FutureTrait;
use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
#[cfg(feature = "std")]
#[test]
fn test_wait_timeout() {
use sync::Arc;
use std::thread;
let persistence_notifier = Arc::new(Notifier::new());
let thread_notifier = Arc::clone(&persistence_notifier);
let exit_thread = Arc::new(AtomicBool::new(false));
let exit_thread_clone = exit_thread.clone();
thread::spawn(move || {
loop {
let mut lock = thread_notifier.notify_pending.lock().unwrap();
lock.0 = true;
thread_notifier.condvar.notify_all();
if exit_thread_clone.load(Ordering::SeqCst) {
break
}
}
});
// Check that we can block indefinitely until updates are available.
let _ = persistence_notifier.wait();
// Check that the Notifier will return after the given duration if updates are
// available.
loop {
if persistence_notifier.wait_timeout(Duration::from_millis(100)) {
break
}
}
exit_thread.store(true, Ordering::SeqCst);
// Check that the Notifier will return after the given duration even if no updates
// are available.
loop {
if !persistence_notifier.wait_timeout(Duration::from_millis(100)) {
break
}
}
}
#[test]
fn test_future_callbacks() {
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
let callback = Arc::new(AtomicBool::new(false));
let callback_ref = Arc::clone(&callback);
future.register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
assert!(!callback.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
assert!(callback.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
}
#[test]
fn test_pre_completed_future_callbacks() {
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
future.state.lock().unwrap().complete();
let callback = Arc::new(AtomicBool::new(false));
let callback_ref = Arc::clone(&callback);
future.register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
assert!(callback.load(Ordering::SeqCst));
assert!(future.state.lock().unwrap().callbacks.is_empty());
}
// Rather annoyingly, there's no safe way in Rust std to construct a Waker despite it being
// totally possible to construct from a trait implementation (though somewhat less effecient
// compared to a raw VTable). Instead, we have to write out a lot of boilerplate to build a
// waker, which we do here with a trivial Arc<AtomicBool> data element to track woke-ness.
const WAKER_V_TABLE: RawWakerVTable = RawWakerVTable::new(waker_clone, wake, wake_by_ref, drop);
unsafe fn wake_by_ref(ptr: *const ()) { let p = ptr as *
|
} else {
state.callbacks.push(callback);
}
|
random_line_split
|
wakers.rs
|
;
use core::task::{Context, Poll};
use core::pin::Pin;
/// Used to signal to one of many waiters that the condition they're waiting on has happened.
pub(crate) struct Notifier {
notify_pending: Mutex<(bool, Option<Arc<Mutex<FutureState>>>)>,
condvar: Condvar,
}
impl Notifier {
pub(crate) fn new() -> Self {
Self {
notify_pending: Mutex::new((false, None)),
condvar: Condvar::new(),
}
}
pub(crate) fn wait(&self) {
loop {
let mut guard = self.notify_pending.lock().unwrap();
if guard.0 {
guard.0 = false;
return;
}
guard = self.condvar.wait(guard).unwrap();
let result = guard.0;
if result {
guard.0 = false;
return
}
}
}
#[cfg(any(test, feature = "std"))]
pub(crate) fn wait_timeout(&self, max_wait: Duration) -> bool {
let current_time = Instant::now();
loop {
let mut guard = self.notify_pending.lock().unwrap();
if guard.0 {
guard.0 = false;
return true;
}
guard = self.condvar.wait_timeout(guard, max_wait).unwrap().0;
// Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
// desired wait time has actually passed, and if not then restart the loop with a reduced wait
// time. Note that this logic can be highly simplified through the use of
// `Condvar::wait_while` and `Condvar::wait_timeout_while`, if and when our MSRV is raised to
// 1.42.0.
let elapsed = current_time.elapsed();
let result = guard.0;
if result || elapsed >= max_wait {
guard.0 = false;
return result;
}
match max_wait.checked_sub(elapsed) {
None => return result,
Some(_) => continue
}
}
}
/// Wake waiters, tracking that wake needs to occur even if there are currently no waiters.
pub(crate) fn notify(&self) {
let mut lock = self.notify_pending.lock().unwrap();
lock.0 = true;
if let Some(future_state) = lock.1.take() {
future_state.lock().unwrap().complete();
}
mem::drop(lock);
self.condvar.notify_all();
}
/// Gets a [`Future`] that will get woken up with any waiters
pub(crate) fn get_future(&self) -> Future {
let mut lock = self.notify_pending.lock().unwrap();
if lock.0 {
Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
}
} else if let Some(existing_state) = &lock.1 {
Future { state: Arc::clone(&existing_state) }
} else {
let state = Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}));
lock.1 = Some(Arc::clone(&state));
Future { state }
}
}
#[cfg(any(test, feature = "_test_utils"))]
pub fn notify_pending(&self) -> bool {
self.notify_pending.lock().unwrap().0
}
}
/// A callback which is called when a [`Future`] completes.
///
/// Note that this MUST NOT call back into LDK directly, it must instead schedule actions to be
/// taken later. Rust users should use the [`std::future::Future`] implementation for [`Future`]
/// instead.
///
/// Note that the [`std::future::Future`] implementation may only work for runtimes which schedule
/// futures when they receive a wake, rather than immediately executing them.
pub trait FutureCallback : Send {
/// The method which is called.
fn call(&self);
}
impl<F: Fn() + Send> FutureCallback for F {
fn
|
(&self) { (self)(); }
}
pub(crate) struct FutureState {
callbacks: Vec<Box<dyn FutureCallback>>,
complete: bool,
}
impl FutureState {
fn complete(&mut self) {
for callback in self.callbacks.drain(..) {
callback.call();
}
self.complete = true;
}
}
/// A simple future which can complete once, and calls some callback(s) when it does so.
pub struct Future {
state: Arc<Mutex<FutureState>>,
}
impl Future {
/// Registers a callback to be called upon completion of this future. If the future has already
/// completed, the callback will be called immediately.
pub fn register_callback(&self, callback: Box<dyn FutureCallback>) {
let mut state = self.state.lock().unwrap();
if state.complete {
mem::drop(state);
callback.call();
} else {
state.callbacks.push(callback);
}
}
}
mod std_future {
use core::task::Waker;
pub struct StdWaker(pub Waker);
impl super::FutureCallback for StdWaker {
fn call(&self) { self.0.wake_by_ref() }
}
}
/// (C-not exported) as Rust Futures aren't usable in language bindings.
impl<'a> StdFuture for Future {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut state = self.state.lock().unwrap();
if state.complete {
Poll::Ready(())
} else {
let waker = cx.waker().clone();
state.callbacks.push(Box::new(std_future::StdWaker(waker)));
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use core::sync::atomic::{AtomicBool, Ordering};
use core::future::Future as FutureTrait;
use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
#[cfg(feature = "std")]
#[test]
fn test_wait_timeout() {
use sync::Arc;
use std::thread;
let persistence_notifier = Arc::new(Notifier::new());
let thread_notifier = Arc::clone(&persistence_notifier);
let exit_thread = Arc::new(AtomicBool::new(false));
let exit_thread_clone = exit_thread.clone();
thread::spawn(move || {
loop {
let mut lock = thread_notifier.notify_pending.lock().unwrap();
lock.0 = true;
thread_notifier.condvar.notify_all();
if exit_thread_clone.load(Ordering::SeqCst) {
break
}
}
});
// Check that we can block indefinitely until updates are available.
let _ = persistence_notifier.wait();
// Check that the Notifier will return after the given duration if updates are
// available.
loop {
if persistence_notifier.wait_timeout(Duration::from_millis(100)) {
break
}
}
exit_thread.store(true, Ordering::SeqCst);
// Check that the Notifier will return after the given duration even if no updates
// are available.
loop {
if !persistence_notifier.wait_timeout(Duration::from_millis(100)) {
break
}
}
}
#[test]
fn test_future_callbacks() {
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
let callback = Arc::new(AtomicBool::new(false));
let callback_ref = Arc::clone(&callback);
future.register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
assert!(!callback.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
assert!(callback.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
}
#[test]
fn test_pre_completed_future_callbacks() {
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
future.state.lock().unwrap().complete();
let callback = Arc::new(AtomicBool::new(false));
let callback_ref = Arc::clone(&callback);
future.register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
assert!(callback.load(Ordering::SeqCst));
assert!(future.state.lock().unwrap().callbacks.is_empty());
}
// Rather annoyingly, there's no safe way in Rust std to construct a Waker despite it being
// totally possible to construct from a trait implementation (though somewhat less effecient
// compared to a raw VTable). Instead, we have to write out a lot of boilerplate to build a
// waker, which we do here with a trivial Arc<AtomicBool> data element to track woke-ness.
const WAKER_V_TABLE: RawWakerVTable = RawWakerVTable::new(waker_clone, wake, wake_by_ref, drop);
unsafe fn wake_by_ref(ptr: *const ()) { let p = ptr as *
|
call
|
identifier_name
|
wakers.rs
|
;
use core::task::{Context, Poll};
use core::pin::Pin;
/// Used to signal to one of many waiters that the condition they're waiting on has happened.
pub(crate) struct Notifier {
notify_pending: Mutex<(bool, Option<Arc<Mutex<FutureState>>>)>,
condvar: Condvar,
}
impl Notifier {
pub(crate) fn new() -> Self {
Self {
notify_pending: Mutex::new((false, None)),
condvar: Condvar::new(),
}
}
pub(crate) fn wait(&self) {
loop {
let mut guard = self.notify_pending.lock().unwrap();
if guard.0 {
guard.0 = false;
return;
}
guard = self.condvar.wait(guard).unwrap();
let result = guard.0;
if result {
guard.0 = false;
return
}
}
}
#[cfg(any(test, feature = "std"))]
pub(crate) fn wait_timeout(&self, max_wait: Duration) -> bool {
let current_time = Instant::now();
loop {
let mut guard = self.notify_pending.lock().unwrap();
if guard.0 {
guard.0 = false;
return true;
}
guard = self.condvar.wait_timeout(guard, max_wait).unwrap().0;
// Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
// desired wait time has actually passed, and if not then restart the loop with a reduced wait
// time. Note that this logic can be highly simplified through the use of
// `Condvar::wait_while` and `Condvar::wait_timeout_while`, if and when our MSRV is raised to
// 1.42.0.
let elapsed = current_time.elapsed();
let result = guard.0;
if result || elapsed >= max_wait {
guard.0 = false;
return result;
}
match max_wait.checked_sub(elapsed) {
None => return result,
Some(_) => continue
}
}
}
/// Wake waiters, tracking that wake needs to occur even if there are currently no waiters.
pub(crate) fn notify(&self) {
let mut lock = self.notify_pending.lock().unwrap();
lock.0 = true;
if let Some(future_state) = lock.1.take() {
future_state.lock().unwrap().complete();
}
mem::drop(lock);
self.condvar.notify_all();
}
/// Gets a [`Future`] that will get woken up with any waiters
pub(crate) fn get_future(&self) -> Future
|
#[cfg(any(test, feature = "_test_utils"))]
pub fn notify_pending(&self) -> bool {
self.notify_pending.lock().unwrap().0
}
}
/// A callback which is called when a [`Future`] completes.
///
/// Note that this MUST NOT call back into LDK directly, it must instead schedule actions to be
/// taken later. Rust users should use the [`std::future::Future`] implementation for [`Future`]
/// instead.
///
/// Note that the [`std::future::Future`] implementation may only work for runtimes which schedule
/// futures when they receive a wake, rather than immediately executing them.
pub trait FutureCallback : Send {
/// The method which is called.
fn call(&self);
}
impl<F: Fn() + Send> FutureCallback for F {
fn call(&self) { (self)(); }
}
pub(crate) struct FutureState {
callbacks: Vec<Box<dyn FutureCallback>>,
complete: bool,
}
impl FutureState {
fn complete(&mut self) {
for callback in self.callbacks.drain(..) {
callback.call();
}
self.complete = true;
}
}
/// A simple future which can complete once, and calls some callback(s) when it does so.
pub struct Future {
state: Arc<Mutex<FutureState>>,
}
impl Future {
/// Registers a callback to be called upon completion of this future. If the future has already
/// completed, the callback will be called immediately.
pub fn register_callback(&self, callback: Box<dyn FutureCallback>) {
let mut state = self.state.lock().unwrap();
if state.complete {
mem::drop(state);
callback.call();
} else {
state.callbacks.push(callback);
}
}
}
mod std_future {
use core::task::Waker;
pub struct StdWaker(pub Waker);
impl super::FutureCallback for StdWaker {
fn call(&self) { self.0.wake_by_ref() }
}
}
/// (C-not exported) as Rust Futures aren't usable in language bindings.
impl<'a> StdFuture for Future {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut state = self.state.lock().unwrap();
if state.complete {
Poll::Ready(())
} else {
let waker = cx.waker().clone();
state.callbacks.push(Box::new(std_future::StdWaker(waker)));
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use core::sync::atomic::{AtomicBool, Ordering};
use core::future::Future as FutureTrait;
use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
#[cfg(feature = "std")]
#[test]
fn test_wait_timeout() {
use sync::Arc;
use std::thread;
let persistence_notifier = Arc::new(Notifier::new());
let thread_notifier = Arc::clone(&persistence_notifier);
let exit_thread = Arc::new(AtomicBool::new(false));
let exit_thread_clone = exit_thread.clone();
thread::spawn(move || {
loop {
let mut lock = thread_notifier.notify_pending.lock().unwrap();
lock.0 = true;
thread_notifier.condvar.notify_all();
if exit_thread_clone.load(Ordering::SeqCst) {
break
}
}
});
// Check that we can block indefinitely until updates are available.
let _ = persistence_notifier.wait();
// Check that the Notifier will return after the given duration if updates are
// available.
loop {
if persistence_notifier.wait_timeout(Duration::from_millis(100)) {
break
}
}
exit_thread.store(true, Ordering::SeqCst);
// Check that the Notifier will return after the given duration even if no updates
// are available.
loop {
if !persistence_notifier.wait_timeout(Duration::from_millis(100)) {
break
}
}
}
#[test]
fn test_future_callbacks() {
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
let callback = Arc::new(AtomicBool::new(false));
let callback_ref = Arc::clone(&callback);
future.register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
assert!(!callback.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
assert!(callback.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
}
#[test]
fn test_pre_completed_future_callbacks() {
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
future.state.lock().unwrap().complete();
let callback = Arc::new(AtomicBool::new(false));
let callback_ref = Arc::clone(&callback);
future.register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
assert!(callback.load(Ordering::SeqCst));
assert!(future.state.lock().unwrap().callbacks.is_empty());
}
// Rather annoyingly, there's no safe way in Rust std to construct a Waker despite it being
// totally possible to construct from a trait implementation (though somewhat less effecient
// compared to a raw VTable). Instead, we have to write out a lot of boilerplate to build a
// waker, which we do here with a trivial Arc<AtomicBool> data element to track woke-ness.
const WAKER_V_TABLE: RawWakerVTable = RawWakerVTable::new(waker_clone, wake, wake_by_ref, drop);
unsafe fn wake_by_ref(ptr: *const ()) { let p = ptr as
|
{
let mut lock = self.notify_pending.lock().unwrap();
if lock.0 {
Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
}
} else if let Some(existing_state) = &lock.1 {
Future { state: Arc::clone(&existing_state) }
} else {
let state = Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}));
lock.1 = Some(Arc::clone(&state));
Future { state }
}
}
|
identifier_body
|
svm_digits.py
|
dataArr, classLabels):
X = mat(dataArr)
labelMat = mat(classLabels).T
m, n =shape(X)
w = zeros((n, 1))
for i in range(m):
w += multiply(alphas[i] * labelMat[i], X[i,:].T)
return w
class optStruct:
def __init__(self, dataMatIn, classLabels, C, toler, kTup):
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0]
self.alphas = mat(zeros((self.m, 1)))
self.b = 0
self.eCache = mat(zeros((self.m, 2)))
self.K = mat(zeros((self.m, self.m)))
for i in range(self.m):
self.K[:,i] = kernelTrans(self.X, self.X[i, :], kTup)
def calcEK(oS, k):
'''
计算误差
:param oS: 数据结构
:param k: 标号为k的数据
:return: EK - 标号为k的数据结构
'''
# fXK = float(multiply(oS.alphas, oS.labelMat).T *\
# (oS.X * oS.X[k, :].T)) + oS.b
fXk = float(multiply(oS.alphas, oS.labelMat).T * oS.K[:,k] + oS.b) # 类似第k组数据所得预测标签(未经过激活函数转换)
EK = fXk - float(oS.labelMat[k]) # 第k组预测结果与实际标签的差值
return EK #返回第k组的误差(标量)
def selectJ(i, oS, Ei):
'''
内循环启发方式2
:param i: 标号为i的数据的索引值
:param oS: 数据结构
:param Ei: 标号为i的数据误差
:return: j, maxK - 标号为j或maxK的数据的索引值
Ej - 标号为j的数据误差
'''
maxK = -1
maxDeltaE = 0
Ej = 0 # 数据初始化
oS.eCache[i] = [1, Ei] # 根据Ei值更新误差缓存
validEcacheList = nonzero(oS.eCache[:, 0].A)[0] # 返回误差不为0的数据的索引值
if (len(validEcacheList)) > 1: # 有不为0的误差
for k in validEcacheList: # 遍历,找到最大的Ek
if k == i: # 不计算i,浪费时间
continue
Ek = calcEK(oS, k) # 计算Ek
deltaE = abs(Ei - Ek) # 计算|Ei-Ek|差的绝对值
if (deltaE > maxDeltaE): # 判断比较差值与预设最大差值的大小
maxK = k # 如果所得差值较大,返回最大值对应的索引值
maxDeltaE = deltaE
Ej = Ek
return maxK, Ej # 返回maxK, Ej
else: # 误差为零
j = selectJrand(i, oS.m) # 随机选择alpha_j的索引值
Ej = calcEK(oS, j) # 计算相应的误差Ej
return j, Ej # 返回索引j和误差Ej
def updataEK(oS, k):
'''
计算误差EK,并更新误差缓存
:param oS: 数据结构
:param k: 标号为k的数据
:return: 无
'''
Ek = calcEK(oS, k)
oS.eCache[k] = [1, Ek]
def innerL(i, oS):
'''
SMO算法的优化部分
:param i: 标号为i的数据的索引值
:param oS: 数据结构
:return: 1 - 有任意一对alpha值发生变化
0 - 没有任意一对alpha值发生变化或变化太小
'''
Ei = calcEK(oS, i) # 计算误差
# 优化alpha,设定一定的容错率
if ((oS.labelMat[i] * Ei < - oS.tol) and (oS.alphas[i] < oS.C)) or \
((oS.labelMat[i] * Ei > oS.tol) and (oS.alphas[i] > 0)):
j, Ej = selectJ(i, oS, Ei) # 使用内循环启发方式2选择alpha_j,并计算Ej
alphaIold = oS.alphas[i].copy() # 保存更新前的alpha值,使用深拷贝(创建新的变量)
alphaJold = oS.alphas[j].copy()
# 步骤2:计算上下界L和H
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L == H:
print('L == H')
return 0
# eta = 2.0 * oS.X[i,:] * oS.X[j,:].T -oS.X[i,:] * oS.X[i,:].T -oS.X[j,:] * oS.X[j,:].T
# 步骤3:计算eta
eta = 2.0 * oS.K[i,j] - oS.K[i,i] - oS.K[j,j]
if eta >= 0:
print('eta >= 0')
return 0
# 步骤4:更新alpha_j,所得为未经剪辑的解
oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej) / eta
# 步骤5:修剪alpha_j
oS.alphas[j] = clipAlpha(oS.alphas[j], H, L)
# 更新Ej至误差缓存
updataEK(oS, j)
if (abs(oS.alphas[j] - alphaJold) < 0.00001):
print('j not moving enough')
return 0
# 步骤6:更新alpha_i
oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] *\
(alphaJold - oS.alphas[j])
# 更新Ei至误差缓存
updataEK(oS, i)
# b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) *\
# oS.X[i,:] * oS.X[j,:].T - oS.labelMat[j] *\
# (oS.alphas[j,:] - alphaJold) * oS.X[i,:] * oS.X[j,:].T
# 步骤7:更新b_1和b_2
b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i,i] -\
oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[i,j]
# b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) *\
# oS.X[i,:] * oS.X[i,:].T - oS.labelMat[j] *\
# (oS.alphas[j] - alphaJold) * oS.X[j,:] * oS.X[j,:].T
b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i,j] -\
oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[j
|
]
dataMatrix = mat(dataMat)
labelMatrix = mat(labelMat).T
for i in range(m):
index = 0
if (alphas[i] > 0) and (labelMatrix[i] > 0):
index = i
break
b1 = zeros((1,1))
b = labelMatrix[index, :]
for i in range(m):
b1 += alphas[i] * labelMatrix[i] * dataMatrix[i,:] * dataMatrix[index,:].T
b -= b1
return b
def calcWs(alphas,
|
identifier_body
|
|
svm_digits.py
|
标号为k的数据结构
'''
# fXK = float(multiply(oS.alphas, oS.labelMat).T *\
# (oS.X * oS.X[k, :].T)) + oS.b
fXk = float(multiply(oS.alphas, oS.labelMat).T * oS.K[:,k] + oS.b) # 类似第k组数据所得预测标签(未经过激活函数转换)
EK = fXk - float(oS.labelMat[k]) # 第k组预测结果与实际标签的差值
return EK #返回第k组的误差(标量)
def selectJ(i, oS, Ei):
'''
内循环启发方式2
:param i: 标号为i的数据的索引值
:param oS: 数据结构
:param Ei: 标号为i的数据误差
:return: j, maxK - 标号为j或maxK的数据的索引值
Ej - 标号为j的数据误差
'''
maxK = -1
maxDeltaE = 0
Ej = 0 # 数据初始化
oS.eCache[i] = [1, Ei] # 根据Ei值更新误差缓存
validEcacheList = nonzero(oS.eCache[:, 0].A)[0] # 返回误差不为0的数据的索引值
if (len(validEcacheList)) > 1: # 有不为0的误差
for k in validEcacheList: # 遍历,找到最大的Ek
if k == i: # 不计算i,浪费时间
continue
Ek = calcEK(oS, k) # 计算Ek
deltaE = abs(Ei - Ek) # 计算|Ei-Ek|差的绝对值
if (deltaE > maxDeltaE): # 判断比较差值与预设最大差值的大小
maxK = k # 如果所得差值较大,返回最大值对应的索引值
maxDeltaE = deltaE
Ej = Ek
return maxK, Ej # 返回maxK, Ej
else: # 误差为零
j = selectJrand(i, oS.m) # 随机选择alpha_j的索引值
Ej = calcEK(oS, j) # 计算相应的误差Ej
return j, Ej # 返回索引j和误差Ej
def updataEK(oS, k):
'''
计算误差EK,并更新误差缓存
:param oS: 数据结构
:param k: 标号为k的数据
:return: 无
'''
Ek = calcEK(oS, k)
oS.eCache[k] = [1, Ek]
def innerL(i, oS):
'''
SMO算法的优化部分
:param i: 标号为i的数据的索引值
:param oS: 数据结构
:return: 1 - 有任意一对alpha值发生变化
0 - 没有任意一对alpha值发生变化或变化太小
'''
Ei = calcEK(oS, i) # 计算误差
# 优化alpha,设定一定的容错率
if ((oS.labelMat[i] * Ei < - oS.tol) and (oS.alphas[i] < oS.C)) or \
((oS.labelMat[i] * Ei > oS.tol) and (oS.alphas[i] > 0)):
j, Ej = selectJ(i, oS, Ei) # 使用内循环启发方式2选择alpha_j,并计算Ej
alphaIold = oS.alphas[i].copy() # 保存更新前的alpha值,使用深拷贝(创建新的变量)
alphaJold = oS.alphas[j].copy()
# 步骤2:计算上下界L和H
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L == H:
print('L == H')
return 0
# eta = 2.0 * oS.X[i,:] * oS.X[j,:].T -oS.X[i,:] * oS.X[i,:].T -oS.X[j,:] * oS.X[j,:].T
# 步骤3:计算eta
eta = 2.0 * oS.K[i,j] - oS.K[i,i] - oS.K[j,j]
if eta >= 0:
print('eta >= 0')
return 0
# 步骤4:更新alpha_j,所得为未经剪辑的解
oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej) / eta
# 步骤5:修剪alpha_j
oS.alphas[j] = clipAlpha(oS.alphas[j], H, L)
# 更新Ej至误差缓存
updataEK(oS, j)
if (abs(oS.alphas[j] - alphaJold) < 0.00001):
print('j not moving enough')
return 0
# 步骤6:更新alpha_i
oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] *\
(alphaJold - oS.alphas[j])
# 更新Ei至误差缓存
updataEK(oS, i)
# b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) *\
# oS.X[i,:] * oS.X[j,:].T - oS.labelMat[j] *\
# (oS.alphas[j,:] - alphaJold) * oS.X[i,:] * oS.X[j,:].T
# 步骤7:更新b_1和b_2
b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i,i] -\
oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[i,j]
# b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) *\
# oS.X[i,:] * oS.X[i,:].T - oS.labelMat[j] *\
# (oS.alphas[j] - alphaJold) * oS.X[j,:] * oS.X[j,:].T
|
phas[i] - alphaIold) * oS.K[i,j] -\
oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[j,j]
# 步骤8:根据b_1和b_2更新b
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]):
oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]):
oS.b = b2
else: # alpha[i]、alpha[j]的值为0或者C,则它们之间的数都满足KKT条件,此时一般选择它们的中点作为oS.b
oS.b = (b1 + b2) / 2.0
return 1
else:
return 0
def smoP(dataMatIn, classLabels, C, toler, maxIter, kTup = ('lin', 0)):
'''
:param dataMatIn: 数据集
:param classLabels: 数据集标签
:param C: 松弛变量
:param toler:
:param maxIter: 最大迭代次数
:param kTup: 核函数信息元组
:return: 返回参数b和拉格朗日乘子alpha
'''
oS = optStruct(mat(dataMatIn), mat(classLabels).transpose(), C, toler, kTup) # 定义类
iter = 0
entireSet = True
alphaPairsChanged = 0 # 先定义alphas对未改变
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
alphaPairsChanged = 0
if entireSet: # 全集完整遍历
for i in range(oS.m):
alphaPairsChanged += innerL(i, oS
|
b2 = oS.b - Ej - oS.labelMat[i] * (oS.al
|
conditional_block
|
svm_digits.py
|
break
b1 = zeros((1,1))
b = labelMatrix[index, :]
for i in range(m):
b1 += alphas[i] * labelMatrix[i] * dataMatrix[i,:] * dataMatrix[index,:].T
b -= b1
return b
def calcWs(alphas, dataArr, classLabels):
X = mat(dataArr)
labelMat = mat(classLabels).T
m, n =shape(X)
w = zeros((n, 1))
for i in range(m):
w += multiply(alphas[i] * labelMat[i], X[i,:].T)
return w
class optStruct:
def __init__(self, dataMatIn, classLabels, C, toler, kTup):
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0]
self.alphas = mat(zeros((self.m, 1)))
self.b = 0
self.eCache = mat(zeros((self.m, 2)))
self.K = mat(zeros((self.m, self.m)))
for i in range(self.m):
self.K[:,i] = kernelTrans(self.X, self.X[i, :], kTup)
def calcEK(oS, k):
'''
计算误差
:param oS: 数据结构
:param k: 标号为k的数据
:return: EK - 标号为k的数据结构
'''
# fXK = float(multiply(oS.alphas, oS.labelMat).T *\
# (oS.X * oS.X[k, :].T)) + oS.b
fXk = float(multiply(oS.alphas, oS.labelMat).T * oS.K[:,k] + oS.b) # 类似第k组数据所得预测标签(未经过激活函数转换)
EK = fXk - float(oS.labelMat[k]) # 第k组预测结果与实际标签的差值
return EK #返回第k组的误差(标量)
def selectJ(i, oS, Ei):
'''
内循环启发方式2
:param i: 标号为i的数据的索引值
:param oS: 数据结构
:param Ei: 标号为i的数据误差
:return: j, maxK - 标号为j或maxK的数据的索引值
Ej - 标号为j的数据误差
'''
maxK = -1
maxDeltaE = 0
Ej = 0 # 数据初始化
oS.eCache[i] = [1, Ei] # 根据Ei值更新误差缓存
validEcacheList = nonzero(oS.eCache[:, 0].A)[0] # 返回误差不为0的数据的索引值
if (len(validEcacheList)) > 1: # 有不为0的误差
for k in validEcacheList: # 遍历,找到最大的Ek
if k == i: # 不计算i,浪费时间
continue
Ek = calcEK(oS, k) # 计算Ek
deltaE = abs(Ei - Ek) # 计算|Ei-Ek|差的绝对值
if (deltaE > maxDeltaE): # 判断比较差值与预设最大差值的大小
maxK = k # 如果所得差值较大,返回最大值对应的索引值
maxDeltaE = deltaE
Ej = Ek
return maxK, Ej # 返回maxK, Ej
else: # 误差为零
j = selectJrand(i, oS.m) # 随机选择alpha_j的索引值
Ej = calcEK(oS, j) # 计算相应的误差Ej
return j, Ej # 返回索引j和误差Ej
def updataEK(oS, k):
'''
计算误差EK,并更新误差缓存
:param oS: 数据结构
:param k: 标号为k的数据
:return: 无
'''
Ek = calcEK(oS, k)
oS.eCache[k] = [1, Ek]
def innerL(i, oS):
'''
SMO算法的优化部分
:param i: 标号为i的数据的索引值
:param oS: 数据结构
:return: 1 - 有任意一对alpha值发生变化
0 - 没有任意一对alpha值发生变化或变化太小
'''
Ei = calcEK(oS, i) # 计算误差
# 优化alpha,设定一定的容错率
if ((oS.labelMat[i] * Ei < - oS.tol) and (oS.alphas[i] < oS.C)) or \
((oS.labelMat[i] * Ei > oS.tol) and (oS.alphas[i] > 0)):
j, Ej = selectJ(i, oS, Ei) # 使用内循环启发方式2选择alpha_j,并计算Ej
alphaIold = oS.alphas[i].copy() # 保存更新前的alpha值,使用深拷贝(创建新的变量)
alphaJold = oS.alphas[j].copy()
# 步骤2:计算上下界L和H
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L == H:
print('L == H')
return 0
# eta = 2.0 * oS.X[i,:] * oS.X[j,:].T -oS.X[i,:] * oS.X[i,:].T -oS.X[j,:] * oS.X[j,:].T
# 步骤3:计算eta
eta = 2.0 * oS.K[i,j] - oS.K[i,i] - oS.K[j,j]
if eta >= 0:
print('eta >= 0')
return 0
# 步骤4:更新alpha_j,所得为未经剪辑的解
oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej) / eta
# 步骤5:修剪alpha_j
oS.alphas[j] = clipAlpha(oS.alphas[j], H, L)
# 更新Ej至误差缓存
updataEK(oS, j)
if (abs(oS.alphas[j] - alphaJold) < 0.00001):
print('j not moving enough')
return 0
# 步骤6:更新alpha_i
oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] *\
(alphaJold - oS.alphas[j])
# 更新Ei至误差缓存
updataEK(oS, i)
# b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) *\
# oS.X[i,:] * oS.X[j,:].T - oS.labelMat[j] *\
# (oS.alphas[j,:] - alphaJold) * oS.X[i,:] * oS.X[j,:].T
# 步骤7:更新b_1和b_2
b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i,i] -\
oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[i,j]
# b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) *\
# oS.X[i,:] * oS.X[i,:].T - oS.labelMat[j] *\
# (oS.alphas[j] - alphaJold) * oS.X[j,:] * oS.X[j,:].T
b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i,j] -\
oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[j,j]
# 步骤8:根据b_1和b_2更新b
if (0 < oS
|
index = 0
if (alphas[i] > 0) and (labelMatrix[i] > 0):
index = i
|
random_line_split
|
|
svm_digits.py
|
j > H:
aj = H
elif L > aj:
aj = L
return aj
def calcB(dataMat, labelMat, alphas):
m = shape(dataMat)[0]
dataMatrix = mat(dataMat)
labelMatrix = mat(labelMat).T
for i in range(m):
index = 0
if (alphas[i] > 0) and (labelMatrix[i] > 0):
index = i
break
b1 = zeros((1,1))
b = labelMatrix[index, :]
for i in range(m):
b1 += alphas[i] * labelMatrix[i] * dataMatrix[i,:] * dataMatrix[index,:].T
b -= b1
return b
def calcWs(alphas, dataArr, classLabels):
X = mat(dataArr)
labelMat = mat(classLabels).T
m, n =shape(X)
w = zeros((n, 1))
for i in range(m):
w += multiply(alphas[i] * labelMat[i], X[i,:].T)
return w
class optStruct:
def __init__(self, dataMatIn, classLabels, C, toler, kTup):
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0]
self.alphas = mat(zeros((self.m, 1)))
self.b = 0
self.eCache = mat(zeros((self.m, 2)))
self.K = mat(zeros((self.m, self.m)))
for i in range(self.m):
self.K[:,i] = kernelTrans(self.X, self.X[i, :], kTup)
def calcEK(oS, k):
'''
计算误差
:param oS: 数据结构
:param k: 标号为k的数据
:return: EK - 标号为k的数据结构
'''
# fXK = float(multiply(oS.alphas, oS.labelMat).T *\
# (oS.X * oS.X[k, :].T)) + oS.b
fXk = float(multiply(oS.alphas, oS.labelMat).T * oS.K[:,k] + oS.b) # 类似第k组数据所得预测标签(未经过激活函数转换)
EK = fXk - float(oS.labelMat[k]) # 第k组预测结果与实际标签的差值
return EK #返回第k组的误差(标量)
def selectJ(i, oS, Ei):
'''
内循环启发方式2
:param i: 标号为i的数据的索引值
:param oS: 数据结构
:param Ei: 标号为i的数据误差
:return: j, maxK - 标号为j或maxK的数据的索引值
Ej - 标号为j的数据误差
'''
maxK = -1
maxDeltaE = 0
Ej = 0 # 数据初始化
oS.eCache[i] = [1, Ei] # 根据Ei值更新误差缓存
validEcacheList = nonzero(oS.eCache[:, 0].A)[0] # 返回误差不为0的数据的索引值
if (len(validEcacheList)) > 1: # 有不为0的误差
for k in validEcacheList: # 遍历,找到最大的Ek
if k == i: # 不计算i,浪费时间
continue
Ek = calcEK(oS, k) # 计算Ek
deltaE = abs(Ei - Ek) # 计算|Ei-Ek|差的绝对值
if (deltaE > maxDeltaE): # 判断比较差值与预设最大差值的大小
maxK = k # 如果所得差值较大,返回最大值对应的索引值
maxDeltaE = deltaE
Ej = Ek
return maxK, Ej # 返回maxK, Ej
else: # 误差为零
j = selectJrand(i, oS.m) # 随机选择alpha_j的索引值
Ej = calcEK(oS, j) # 计算相应的误差Ej
return j, Ej # 返回索引j和误差Ej
def updataEK(oS, k):
'''
计算误差EK,并更新误差缓存
:param oS: 数据结构
:param k: 标号为k的数据
:return: 无
'''
Ek = calcEK(oS, k)
oS.eCache[k] = [1, Ek]
def innerL(i, oS):
'''
SMO算法的优化部分
:param i: 标号为i的数据的索引值
:param oS: 数据结构
:return: 1 - 有任意一对alpha值发生变化
0 - 没有任意一对alpha值发生变化或变化太小
'''
Ei = calcEK(oS, i) # 计算误差
# 优化alpha,设定一定的容错率
if ((oS.labelMat[i] * Ei < - oS.tol) and (oS.alphas[i] < oS.C)) or \
((oS.labelMat[i] * Ei > oS.tol) and (oS.alphas[i] > 0)):
j, Ej = selectJ(i, oS, Ei) # 使用内循环启发方式2选择alpha_j,并计算Ej
alphaIold = oS.alphas[i].copy() # 保存更新前的alpha值,使用深拷贝(创建新的变量)
alphaJold = oS.alphas[j].copy()
# 步骤2:计算上下界L和H
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L == H:
print('L == H')
return 0
# eta = 2.0 * oS.X[i,:] * oS.X[j,:].T -oS.X[i,:] * oS.X[i,:].T -oS.X[j,:] * oS.X[j,:].T
# 步骤3:计算eta
eta = 2.0 * oS.K[i,j] - oS.K[i,i] - oS.K[j,j]
if eta >= 0:
print('eta >= 0')
return 0
# 步骤4:更新alpha_j,所得为未经剪辑的解
oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej) / eta
# 步骤5:修剪alpha_j
oS.alphas[j] = clipAlpha(oS.alphas[j], H, L)
# 更新Ej至误差缓存
updataEK(oS, j)
if (abs(oS.alphas[j] - alphaJold) < 0.00001):
print('j not moving enough')
return 0
# 步骤6:更新alpha_i
oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] *\
(alphaJold - oS.alphas[j])
# 更新Ei至误差缓存
updataEK(oS, i)
# b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) *\
# oS.X[i,:] * oS.X[j,:].T - oS.labelMat[j] *\
# (oS.alphas[j,:] - alphaJold) * oS.X[i,:] * oS.X[j,:].T
# 步骤7:更新b_1和b_2
b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i,i] -\
oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[i,j]
# b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) *\
# oS.X[i,:] * oS.X[i,:].T - oS.labelMat[j] *\
# (oS.alphas[j] - alphaJold) * oS.X[j,:] * oS.X[j,:].T
b2 = oS.b - Ej - oS.label
|
if a
|
identifier_name
|
|
boilerplate.js
|
function () {
// Open external link in new windows
$('a[href^="http://"]').filter(function () {
return this.hostname && this.hostname !== location.hostname;
}).attr('target', '_blank');
// build an animated footer
$('#animated').each(function () {
$(this).hover(function () {
$(this).stop().animate({
opacity: 0.9
}, 400);
}, function () {
$(this).stop().animate({
opacity: 0.0
}, 200);
});
});
// scroll to top on request
if ($("a#totop").length) Limit.scrollToTop("a#totop");
// setup content boxes
if ($(".content-box").length) {
$(".content-box .head").css({
"cursor": "s-resize"
});
$(".content-box .body").slideUp();
// Give the header in content-box a different cursor
$(".content-box .head").toggle(
function () {
var that = this;
$(this).parent().find('.body').slideDown('slow', function () {
$(that).find('.caption').toggleClass("content-box-closed");
}); // Toggle the content
}, function () {
var that = this;
$(this).parent().find('.body').slideUp('slow', function () {
$(that).find('.caption').toggleClass("content-box-closed");
}); // Toggle the content
});
}
// custom tooltips to replace the default browser tooltips for <a title=""> <div title=""> and <span title="">
//$("a[title], div[title], span[title]").tipTip();
// if($('.dropdown-toggle').length){
// $('.dropdown-toggle').dropdown();
// }
if ($(".collapse").length) {
//$(".collapse").collapse();
}
}
if ($('#list-grid-toggle').length) {
$('.list-toggle-btn').click(function (e) {
if ($(e.currentTarget).hasClass("list-toggle-btn-up")) {
$(e.currentTarget).toggleClass("list-toggle-btn-on list-toggle-btn-up");
$('.grid-toggle-btn').toggleClass("grid-toggle-btn-on grid-toggle-btn-up");
$('#list-grid-toggle').addClass("videolist-heng");
}
e.preventDefault();
});
$('.grid-toggle-btn').click(function (e) {
if ($(e.currentTarget).hasClass("grid-toggle-btn-up")) {
$('.list-toggle-btn').toggleClass("list-toggle-btn-on list-toggle-btn-up");
$(e.currentTarget).toggleClass("grid-toggle-btn-on grid-toggle-btn-up");
$('#list-grid-toggle').removeClass("videolist-heng");
}
e.preventDefault();
});
}
$(function () {
$('.toggle-info').slideUp('fast');
$('.toggle-info-btn').toggle(
function () {
var that = this;
$("#" + $(this).attr('rel')).slideDown(
'slow', function () {
$(that).children().html('隐藏信息');
}
);
},
function () {
var that = this;
$("#" + $(this).attr('rel')).slideUp(
'fast', function () {
$(that).children().html('显示信息');
}
);
}
)
});
(function (doc) {
var addEvent = 'addEventListener',
type = 'gesturestart',
qsa = 'querySelectorAll',
scales = [1, 1],
meta = qsa in doc ? doc[qsa]('meta[name=viewport]') : [];
function fix() {
meta.content = 'width=device-width,minimum-scale=' + scales[0] + ',maximum-scale=' + scales[1];
doc.removeEventListener(type, fix, true);
}
if ((meta = meta[meta.length - 1]) && addEvent in doc) {
fix();
scales = [0.25, 1.6];
doc[addEvent](type, fix, true);
}
}(document));
function is_ie() {
return (/MSIE/g).test(window.navigator.userAgent);
}
//Plugin start
(function ($) {
var methods =
{
init: function (options) {
return this.each(function () {
var _this = $(this);
_this.data('marquee', options);
var _li = $('>li', _this);
_this.wrap('<div class="slide_container"></div>')
.height(_this.height())
.hover(function () { if ($(this).data('marquee').stop) { $(this).stop(true, false); } },
function () { if ($(this).data('marquee').stop) { $(this).marquee('slide'); } })
.parent()
.css({ position: 'relative', overflow: 'hidden', 'height': $('>li', _this).height() })
.find('>ul')
.css({ width: screen.width * 2, position: 'absolute' });
for (var i = 0; i < Math.ceil((screen.width * 3) / _this.width()) ; ++i) {
_this.append(_li.clone());
}
_this.marquee('slide');
});
},
slide: function () {
var $this = this;
$this.animate({ 'left': $('>li', $this).width() * -1 },
$this.data('marquee').duration,
'swing',
function () {
$this.css('left', 0).append($('>li:first', $this));
$this.delay($this.data('marquee').delay).marquee('slide');
}
);
}
};
$.fn.marquee = function (m) {
var settings = {
'delay': 2000,
'duration': 900,
'stop': true
};
if (typeof m === 'object' || !m) {
if (m) {
$.extend(settings, m);
}
return methods.init.apply(this, [settings]);
}
else {
return methods[m].apply(this);
}
};
}
)(jQuery);
//Plugin end
//call
$(document).ready(
function () { $('.slide').marquee({ delay: 3000 }); }
);
$(window).on('load resize', function () {
//if (getInternetExplorerVersion() != 8.0) {
// var height = $(document).height() - $('#header').height() - $('#footer').height();
// $('.app-sidebar').height(height - 32);
//}
});
function getInternetExplorerVersion()
// Returns the version of Internet Explorer or a -1
|
var rv = -1; // Return value assumes failure.
if (navigator.appName == 'Microsoft Internet Explorer') {
var ua = navigator.userAgent;
var re = new RegExp("MSIE ([0-9]{1,}[.0-9]{0,})");
if (re.exec(ua) != null)
rv = parseFloat(RegExp.$1);
}
return rv;
}
$(function () {
if ($("#nav-category").length) {
$(".nav-list:first a").click(function (e) {
if ($(e.currentTarget).attr("data-target") !== undefined) {
//be fix at start with in
$(".nav-list:first .nav-list .in").collapse("toggle");
$(".nav-list:first .nav-list .collapse").filter(".in").collapse("hide");
}
})
}
});
$(document).ready(
function () {
if ($(".flash_messages").children().filter("p").html() !== "message-here") {
$(".flash_messages").fadeIn();
}
}
);
window.flash = function flash(str) {
if($(".flash_messages")){
var temp = $(".flash_messages").first().clone(true);
$(temp).children().filter("p").html(str);
$(".flash_messages").last().before($(temp));
$(temp).fadeIn();
setTimeout(function () {
$(temp).fadeOut();
}, 10000);
}
}
// window.alert = function alert(str) {
// flash(str);
// }
/*!
* bootstrap-calendar plugin
* Original author: @ahmontero
* Licensed under the MIT license
*
* jQuery lightweight plugin boilerplate
* Original author: @ajpiano
* Further changes, comments: @addyosmani
* Licensed under the MIT license
*/
// the semi-colon before the function invocation is a safety
// net against concatenated scripts and/or other plugins
// that are not closed properly.
;(function ($, window, document, undefined) {
(function () {
var cache = {};
this.tmpl = function tmpl(str, data) {
// Figure out if we're getting a template, or if we need to
// load the template - and be sure to cache the result.
var fn = !/\W/.test(str) ?
cache[str] = cache[str] ||
tmpl(document.getElementById(str).innerHTML) :
// Generate a reusable function that will serve as a template
// generator (and which will be cached).
/*jshint -W054 */
new Function("obj",
"var p=[],print=function(){p.push.apply(p,arguments);};" +
// Introduce the data as local variables using with(){}
"with(obj){p.push('" +
// Convert the template into pure JavaScript
str
.replace(/[\r\t\n]/g, " ")
.split("<%").join("\t")
.replace(/((^
|
// (indicating the use of another browser).
{
|
random_line_split
|
boilerplate.js
|
curTop = document.body.scrollTop;
ad.DirV = true;
}
ad.style.left = curLeft + (ad.DirH ? 1 : -1) + "px";
ad.style.top = curTop + (ad.DirV ? 1 : -1) + "px";
}
}
}
/*---------------------------------------------------------------------
Template Name: lim.it
Version: 1.0
Release Date: July 12, 2010
File: lim.it.js
Updated: 2010-07-12
Copyright (c) 2010 Chanry Ian - http://wcweb.us
-----------------------------------------------------------------------
WARNING! DO NOT EDIT THIS FILE UNLESS YOU KNOW WHAT YOU ARE DOING!
---------------------------------------------------------------------*/
// globle use
// sometime you can preload some images
// imageObj = new Image();
// imgs = ["/static/style/img/toggle.gif", "/static/style/img/nyro/ajaxLoader.gif", "/static/style/img/nyro/prev.gif", "/static/style/img/nyro/next.gif"];
// for (i = 0; i <= imgs.length; i++) imageObj.src = imgs[i];
// lim object setup
if (!Limit) var Limit = {};
Limit.scrollToTop = function (e) {
$(e).hide().removeAttr("href");
if ($(window).scrollTop() != "0") {
$(e).fadeIn("slow")
}
var scrollDiv = $(e);
$(window).scroll(function () {
if ($(window).scrollTop() == "0") {
$(scrollDiv).fadeOut("slow")
} else {
$(scrollDiv).fadeIn("slow")
}
});
$(e).click(function () {
$("html, body").animate({
scrollTop: 0
}, "slow")
})
}
Limit.setup = function () {
// Open external link in new windows
$('a[href^="http://"]').filter(function () {
return this.hostname && this.hostname !== location.hostname;
}).attr('target', '_blank');
// build an animated footer
$('#animated').each(function () {
$(this).hover(function () {
$(this).stop().animate({
opacity: 0.9
}, 400);
}, function () {
$(this).stop().animate({
opacity: 0.0
}, 200);
});
});
// scroll to top on request
if ($("a#totop").length) Limit.scrollToTop("a#totop");
// setup content boxes
if ($(".content-box").length) {
$(".content-box .head").css({
"cursor": "s-resize"
});
$(".content-box .body").slideUp();
// Give the header in content-box a different cursor
$(".content-box .head").toggle(
function () {
var that = this;
$(this).parent().find('.body').slideDown('slow', function () {
$(that).find('.caption').toggleClass("content-box-closed");
}); // Toggle the content
}, function () {
var that = this;
$(this).parent().find('.body').slideUp('slow', function () {
$(that).find('.caption').toggleClass("content-box-closed");
}); // Toggle the content
});
}
// custom tooltips to replace the default browser tooltips for <a title=""> <div title=""> and <span title="">
//$("a[title], div[title], span[title]").tipTip();
// if($('.dropdown-toggle').length){
// $('.dropdown-toggle').dropdown();
// }
if ($(".collapse").length) {
//$(".collapse").collapse();
}
}
if ($('#list-grid-toggle').length) {
$('.list-toggle-btn').click(function (e) {
if ($(e.currentTarget).hasClass("list-toggle-btn-up")) {
$(e.currentTarget).toggleClass("list-toggle-btn-on list-toggle-btn-up");
$('.grid-toggle-btn').toggleClass("grid-toggle-btn-on grid-toggle-btn-up");
$('#list-grid-toggle').addClass("videolist-heng");
}
e.preventDefault();
});
$('.grid-toggle-btn').click(function (e) {
if ($(e.currentTarget).hasClass("grid-toggle-btn-up")) {
$('.list-toggle-btn').toggleClass("list-toggle-btn-on list-toggle-btn-up");
$(e.currentTarget).toggleClass("grid-toggle-btn-on grid-toggle-btn-up");
$('#list-grid-toggle').removeClass("videolist-heng");
}
e.preventDefault();
});
}
$(function () {
$('.toggle-info').slideUp('fast');
$('.toggle-info-btn').toggle(
function () {
var that = this;
$("#" + $(this).attr('rel')).slideDown(
'slow', function () {
$(that).children().html('隐藏信息');
}
);
},
function () {
var that = this;
$("#" + $(this).attr('rel')).slideUp(
'fast', function () {
$(that).children().html('显示信息');
}
);
}
)
});
(function (doc) {
var addEvent = 'addEventListener',
type = 'gesturestart',
qsa = 'querySelectorAll',
scales = [1, 1],
meta = qsa in doc ? doc[qsa]('meta[name=viewport]') : [];
function fix() {
meta.content = 'width=device-width,minimum-scale=' + scales[0] + ',maximum-scale=' + scales[1];
doc.removeEventListener(type, fix, true);
}
if ((meta = meta[meta.length - 1]) && addEvent in doc) {
fix();
scales = [0.25, 1.6];
doc[addEvent](type, fix, true);
}
}(document));
function is_ie() {
return (/MSIE/g).test(window.navigator.userAgent);
}
//Plugin start
(function ($) {
var methods =
{
init: function (options) {
return this.each(function () {
var _this = $(this);
_this.data('marquee', options);
var _li = $('>li', _this);
_this.wrap('<div class="slide_container"></div>')
.height(_this.height())
.hover(function () { if ($(this).data('marquee').stop) { $(this).stop(true, false); } },
function () { if ($(this).data('marquee').stop) { $(this).marquee('slide'); } })
.parent()
.css({ position: 'relative', overflow: 'hidden', 'height': $('>li', _this).height() })
.find('>ul')
.css({ width: screen.width * 2, position: 'absolute' });
for (var i = 0; i < Math.ceil((screen.width * 3) / _this.width()) ; ++i) {
_this.append(_li.clone());
}
_this.marquee('slide');
});
},
slide: function () {
var $this = this;
$this.animate({ 'left': $('>li', $this).width() * -1 },
$this.data('marquee').duration,
'swing',
function () {
$this.css('left', 0).append($('>li:first', $this));
$this.delay($this.data('marquee').delay).marquee('slide');
}
);
}
};
$.fn.marquee = function (m) {
var settings = {
'delay': 2000,
'duration': 900,
'stop': true
};
if (typeof m === 'object' || !m) {
if (m) {
$.extend(settings, m);
}
return methods.init.apply(this, [settings]);
}
else {
return methods[m].apply(this);
}
};
}
)(jQuery);
//Plugin end
//call
$(document).ready(
function () { $('.slide').marquee({ delay: 3000 }); }
);
$(window).on('load resize', function () {
//if (getInternetExplorerVersion() != 8.0) {
// var height = $(document).height() - $('#header').height() - $('#footer').height();
// $('.app-sidebar').height(height - 32);
//}
});
function getInternetExplorerVersion()
// Returns the version of Internet Explorer or a -1
// (indicating the use of another browser).
{
var rv = -1; // Return value assumes failure.
if (navigator.appName == 'Microsoft Internet Explorer') {
var ua = navigator.userAgent;
var re = new RegExp("MSIE ([
|
{
var curLeft = parseInt(ad.style.left);
var curTop = parseInt(ad.style.top);
if(ad.offsetWidth + curLeft > document.body.clientWidth + document.body.scrollLeft - 1)
{
curLeft = document.body.scrollLeft + document.body.clientWidth - ad.offsetWidth;
ad.DirH = false;
}
if(ad.offsetHeight + curTop > document.body.clientHeight + document.body.scrollTop - 1)
{
curTop = document.body.scrollTop + document.body.clientHeight - ad.offsetHeight;
ad.DirV = false;
}
if(curLeft < document.body.scrollLeft)
{
curLeft = document.body.scrollLeft;
ad.DirH = true;
}
if(curTop < document.body.scrollTop)
{
|
conditional_block
|
|
boilerplate.js
|
() {
// Open external link in new windows
$('a[href^="http://"]').filter(function () {
return this.hostname && this.hostname !== location.hostname;
}).attr('target', '_blank');
// build an animated footer
$('#animated').each(function () {
$(this).hover(function () {
$(this).stop().animate({
opacity: 0.9
}, 400);
}, function () {
$(this).stop().animate({
opacity: 0.0
}, 200);
});
});
// scroll to top on request
if ($("a#totop").length) Limit.scrollToTop("a#totop");
// setup content boxes
if ($(".content-box").length) {
$(".content-box .head").css({
"cursor": "s-resize"
});
$(".content-box .body").slideUp();
// Give the header in content-box a different cursor
$(".content-box .head").toggle(
function () {
var that = this;
$(this).parent().find('.body').slideDown('slow', function () {
$(that).find('.caption').toggleClass("content-box-closed");
}); // Toggle the content
}, function () {
var that = this;
$(this).parent().find('.body').slideUp('slow', function () {
$(that).find('.caption').toggleClass("content-box-closed");
}); // Toggle the content
});
}
// custom tooltips to replace the default browser tooltips for <a title=""> <div title=""> and <span title="">
//$("a[title], div[title], span[title]").tipTip();
// if($('.dropdown-toggle').length){
// $('.dropdown-toggle').dropdown();
// }
if ($(".collapse").length) {
//$(".collapse").collapse();
}
}
if ($('#list-grid-toggle').length) {
$('.list-toggle-btn').click(function (e) {
if ($(e.currentTarget).hasClass("list-toggle-btn-up")) {
$(e.currentTarget).toggleClass("list-toggle-btn-on list-toggle-btn-up");
$('.grid-toggle-btn').toggleClass("grid-toggle-btn-on grid-toggle-btn-up");
$('#list-grid-toggle').addClass("videolist-heng");
}
e.preventDefault();
});
$('.grid-toggle-btn').click(function (e) {
if ($(e.currentTarget).hasClass("grid-toggle-btn-up")) {
$('.list-toggle-btn').toggleClass("list-toggle-btn-on list-toggle-btn-up");
$(e.currentTarget).toggleClass("grid-toggle-btn-on grid-toggle-btn-up");
$('#list-grid-toggle').removeClass("videolist-heng");
}
e.preventDefault();
});
}
$(function () {
$('.toggle-info').slideUp('fast');
$('.toggle-info-btn').toggle(
function () {
var that = this;
$("#" + $(this).attr('rel')).slideDown(
'slow', function () {
$(that).children().html('隐藏信息');
}
);
},
function () {
var that = this;
$("#" + $(this).attr('rel')).slideUp(
'fast', function () {
$(that).children().html('显示信息');
}
);
}
)
});
(function (doc) {
var addEvent = 'addEventListener',
type = 'gesturestart',
qsa = 'querySelectorAll',
scales = [1, 1],
meta = qsa in doc ? doc[qsa]('meta[name=viewport]') : [];
function fix() {
meta.content = 'width=device-width,minimum-scale=' + scales[0] + ',maximum-scale=' + scales[1];
doc.removeEventListener(type, fix, true);
}
if ((meta = meta[meta.length - 1]) && addEvent in doc) {
fix();
scales = [0.25, 1.6];
doc[addEvent](type, fix, true);
}
}(document));
function is_ie() {
return (/MSIE/g).test(window.navigator.userAgent);
}
//Plugin start
(function ($) {
var methods =
{
init: function (options) {
return this.each(function () {
var _this = $(this);
_this.data('marquee', options);
var _li = $('>li', _this);
_this.wrap('<div class="slide_container"></div>')
.height(_this.height())
.hover(function () { if ($(this).data('marquee').stop) { $(this).stop(true, false); } },
function () { if ($(this).data('marquee').stop) { $(this).marquee('slide'); } })
.parent()
.css({ position: 'relative', overflow: 'hidden', 'height': $('>li', _this).height() })
.find('>ul')
.css({ width: screen.width * 2, position: 'absolute' });
for (var i = 0; i < Math.ceil((screen.width * 3) / _this.width()) ; ++i) {
_this.append(_li.clone());
}
_this.marquee('slide');
});
},
slide: function () {
var $this = this;
$this.animate({ 'left': $('>li', $this).width() * -1 },
$this.data('marquee').duration,
'swing',
function () {
$this.css('left', 0).append($('>li:first', $this));
$this.delay($this.data('marquee').delay).marquee('slide');
}
);
}
};
$.fn.marquee = function (m) {
var settings = {
'delay': 2000,
'duration': 900,
'stop': true
};
if (typeof m === 'object' || !m) {
if (m) {
$.extend(settings, m);
}
return methods.init.apply(this, [settings]);
}
else {
return methods[m].apply(this);
}
};
}
)(jQuery);
//Plugin end
//call
$(document).ready(
function () { $('.slide').marquee({ delay: 3000 }); }
);
$(window).on('load resize', function () {
//if (getInternetExplorerVersion() != 8.0) {
// var height = $(document).height() - $('#header').height() - $('#footer').height();
// $('.app-sidebar').height(height - 32);
//}
});
function getInternetExplo
|
s the version of Internet Explorer or a -1
// (indicating the use of another browser).
{
var rv = -1; // Return value assumes failure.
if (navigator.appName == 'Microsoft Internet Explorer') {
var ua = navigator.userAgent;
var re = new RegExp("MSIE ([0-9]{1,}[.0-9]{0,})");
if (re.exec(ua) != null)
rv = parseFloat(RegExp.$1);
}
return rv;
}
$(function () {
if ($("#nav-category").length) {
$(".nav-list:first a").click(function (e) {
if ($(e.currentTarget).attr("data-target") !== undefined) {
//be fix at start with in
$(".nav-list:first .nav-list .in").collapse("toggle");
$(".nav-list:first .nav-list .collapse").filter(".in").collapse("hide");
}
})
}
});
$(document).ready(
function () {
if ($(".flash_messages").children().filter("p").html() !== "message-here") {
$(".flash_messages").fadeIn();
}
}
);
window.flash = function flash(str) {
if($(".flash_messages")){
var temp = $(".flash_messages").first().clone(true);
$(temp).children().filter("p").html(str);
$(".flash_messages").last().before($(temp));
$(temp).fadeIn();
setTimeout(function () {
$(temp).fadeOut();
}, 10000);
}
}
// window.alert = function alert(str) {
// flash(str);
// }
/*!
* bootstrap-calendar plugin
* Original author: @ahmontero
* Licensed under the MIT license
*
* jQuery lightweight plugin boilerplate
* Original author: @ajpiano
* Further changes, comments: @addyosmani
* Licensed under the MIT license
*/
// the semi-colon before the function invocation is a safety
// net against concatenated scripts and/or other plugins
// that are not closed properly.
;(function ($, window, document, undefined) {
(function () {
var cache = {};
this.tmpl = function tmpl(str, data) {
// Figure out if we're getting a template, or if we need to
// load the template - and be sure to cache the result.
var fn = !/\W/.test(str) ?
cache[str] = cache[str] ||
tmpl(document.getElementById(str).innerHTML) :
// Generate a reusable function that will serve as a template
// generator (and which will be cached).
/*jshint -W054 */
new Function("obj",
"var p=[],print=function(){p.push.apply(p,arguments);};" +
// Introduce the data as local variables using with(){}
"with(obj){p.push('" +
// Convert the template into pure JavaScript
str
.replace(/[\r\t\n]/g, " ")
.split("<%").join("\t")
.replace
|
rerVersion()
// Return
|
identifier_name
|
boilerplate.js
|
quee('slide');
}
);
}
};
$.fn.marquee = function (m) {
var settings = {
'delay': 2000,
'duration': 900,
'stop': true
};
if (typeof m === 'object' || !m) {
if (m) {
$.extend(settings, m);
}
return methods.init.apply(this, [settings]);
}
else {
return methods[m].apply(this);
}
};
}
)(jQuery);
//Plugin end
//call
$(document).ready(
function () { $('.slide').marquee({ delay: 3000 }); }
);
$(window).on('load resize', function () {
//if (getInternetExplorerVersion() != 8.0) {
// var height = $(document).height() - $('#header').height() - $('#footer').height();
// $('.app-sidebar').height(height - 32);
//}
});
function getInternetExplorerVersion()
// Returns the version of Internet Explorer or a -1
// (indicating the use of another browser).
{
var rv = -1; // Return value assumes failure.
if (navigator.appName == 'Microsoft Internet Explorer') {
var ua = navigator.userAgent;
var re = new RegExp("MSIE ([0-9]{1,}[.0-9]{0,})");
if (re.exec(ua) != null)
rv = parseFloat(RegExp.$1);
}
return rv;
}
$(function () {
if ($("#nav-category").length) {
$(".nav-list:first a").click(function (e) {
if ($(e.currentTarget).attr("data-target") !== undefined) {
//be fix at start with in
$(".nav-list:first .nav-list .in").collapse("toggle");
$(".nav-list:first .nav-list .collapse").filter(".in").collapse("hide");
}
})
}
});
$(document).ready(
function () {
if ($(".flash_messages").children().filter("p").html() !== "message-here") {
$(".flash_messages").fadeIn();
}
}
);
window.flash = function flash(str) {
if($(".flash_messages")){
var temp = $(".flash_messages").first().clone(true);
$(temp).children().filter("p").html(str);
$(".flash_messages").last().before($(temp));
$(temp).fadeIn();
setTimeout(function () {
$(temp).fadeOut();
}, 10000);
}
}
// window.alert = function alert(str) {
// flash(str);
// }
/*!
* bootstrap-calendar plugin
* Original author: @ahmontero
* Licensed under the MIT license
*
* jQuery lightweight plugin boilerplate
* Original author: @ajpiano
* Further changes, comments: @addyosmani
* Licensed under the MIT license
*/
// the semi-colon before the function invocation is a safety
// net against concatenated scripts and/or other plugins
// that are not closed properly.
;(function ($, window, document, undefined) {
(function () {
var cache = {};
this.tmpl = function tmpl(str, data) {
// Figure out if we're getting a template, or if we need to
// load the template - and be sure to cache the result.
var fn = !/\W/.test(str) ?
cache[str] = cache[str] ||
tmpl(document.getElementById(str).innerHTML) :
// Generate a reusable function that will serve as a template
// generator (and which will be cached).
/*jshint -W054 */
new Function("obj",
"var p=[],print=function(){p.push.apply(p,arguments);};" +
// Introduce the data as local variables using with(){}
"with(obj){p.push('" +
// Convert the template into pure JavaScript
str
.replace(/[\r\t\n]/g, " ")
.split("<%").join("\t")
.replace(/((^|%>)[^\t]*)'/g, "$1\r")
.replace(/\t=(.*?)%>/g, "',$1,'")
.split("\t").join("');")
.split("%>").join("p.push('")
.split("\r").join("\\'") + "');}return p.join('');");
// Provide some basic currying to the user
return data ? fn(data) : fn;
};
})();
var pluginName = 'LiveScheduler',
defaults = {
weekStart: 1,
msg_days: ["Su", "Mo", "Tu", "We", "Th", "Fr", "Sa"],
msg_months: ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"],
msg_today: 'Today',
msg_events_today: 'Events Today', // when today click.
url: "",
currentday:null,
events: null
},// .aweek.btn-group>operator+date_id
timeline_template = tmpl(
'<div class=" timeline ">' +
'<div class="operator"><ul class="nav nav-pills">' +
'<li class="fast pre">' +
'<a href="#" class="">上一周</a>' +
'</li>' +
'<li class="pre">' +
'<a href="#" class=""><</a>' +
'</li>' +
'<li class="next">' +
'<a href="#" class="">></a>' +
'</li>' +
'<li class="fast next">' +
'<a href="#" class="">下一周</a>' +
'</li></ul></div></div>'),
days_template = tmpl(
'<ul class="nav nav-tabs" id="days_tabs">'+
'<% for (var i = 0, length = seven_day.length; i < length; i ++) { %>' +
'<li class="select_day select_by_<%= seven_day[i].format("D") %>" >' +
'<a href="#d<%= seven_day[i].format("D") %>" data-toggle="tab"><%= seven_day[i].format("dddd") %><br/> <%= seven_day[i].format("MM-DD") %></a>' +
'</li>' +
'<% } %>' +
'</ul>'+
'</ul>' ),
events_list_template = '<div class="tab-content" id="accordion_event_table" >' +
'<% for (var ix = 0, lengthx = seven_day_events.length; ix < lengthx; ix ++) { %>' +
'<div class="tab-pane" id="d<%= r_days[ix].format("D") %>">' +
'<% if (seven_day_events[ix].length>0){ %>' +
'<table class="table live_table">'+
'<thead><tr><th>课程名称</th><th>学科</th><th>授课时间</th><th>授课地点</th><th>状态</th><th>操作</th></tr></thead><tbody>' +
'<% for (var jx = 0 ,sde=seven_day_events[ix][0], lengthy = seven_day_events[ix][0].LiveInfo.length; jx < lengthy; jx ++) { %>' +
' <tr class="event">' +
'<td class="event_title">' +
'<% if (sde.LiveInfo[jx].url ){%><a href="' + '<%= sde.LiveInfo[jx].url%>"> <%= sde.LiveInfo[jx].title %></a> <%}else{%>' +
'<%= sde.LiveInfo[jx].title %>'+
'<% }%>' +
'<td class="event_course"><%= sde.LiveInfo[jx].course %></td>' +
'</td>'+
'<td class="event_time"><%= sde.LiveInfo[jx].time%></td>' +
'<td class="event_classroom"><%= sde.LiveInfo[jx].classroom%></td>' +//' <% if( sde.LiveInfo[jx].status !=null ){ %>'+
' <td style="background-color:<%= sde.LiveInfo[jx].color %>;">' +
' <%= sde.LiveInfo[jx].status %>' +
' </td>' +
'<td><a href="<%= sde.LiveInfo[jx].url ? sde.LiveInfo[jx].url : "#" %>"><%= sde.LiveInfo[jx].url_title %></a></td>' +//'<% } %>' +
' </tr>' +
'<% } %>' +
'</tbody></table>' +//'no data'+
'<% } else { %>'+'当天没有直播信息!'+'<% }%>'+
'</div>'+ //end if
'<% } %>',// end for
daysInMonth = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
today = new Date();
// The actual plugin constructor
function Plugin(element, options) {
this.element = $(element);
this.options = $.extend({}, defaults
|
, options);
this._defaults = defaults;
this._name = pluginName;
if(element) this.init();
}
Plugin.prototype.init = function () {
// Place initialization logic
|
identifier_body
|
|
heat_exchanger.py
|
Calculate
class HeatExchangerWindow(QMainWindow, Ui_MainWindow):
"""
Class obsahující komunikaci mezi uživatelským prostředím a backendem.
Samotné uživatelské prostředí se nachází v souboru heat_exchanger_ui odkud je importováno.
"""
def __init__(self):
super().__init__()
self.setupUi(self)
self.input_tube = UserInputMedium('Tube', 'Trubkovy prostor')
self.input_shell = UserInputShell('Shell', 'Mezitrubkovy prostor') # pouze docasny na testovani
#self.input_shell = UserInputMedium('Shell', 'Mezitrubkovy prostor') #ve finalni verzi
self.input_rest = UserInputRest()
self.add_medium_inputs(self.verticalLayoutTube, self.input_tube)
self.add_medium_inputs(self.verticalLayoutShell, self.input_shell)
self.add_main_inputs(self.verticalLayoutRest, self.input_rest)
self.prepare_table()
self.buttonRun.pressed.connect(lambda: self.run_simulation())
def add_medium_inputs(self, parent_layout, input) -> None:
"""
Vygenerovani vstupu pro jednotliva media do uzivatelskeho prostredi.
"""
self.add_main_inputs(parent_layout, input)
title_layout = QHBoxLayout()
for parameter in input.medium:
title = QLabel(parameter['name'])
title.setFont(QFont("Times", 12, QFont.Bold))
title.setToolTip(parameter['hint'])
title_layout.addWidget(title)
parent_layout.addLayout(title_layout)
for i in range(1):
new_layout = QHBoxLayout()
for parameter in input.medium:
name = input.name + parameter['name'] + str(i)
setattr(self, name, QLineEdit())
getattr(self, name).setFont(QFont("Times", 12))
getattr(self, name).setText(str(parameter['default_value']))
#getattr(self, name).setDisabled(True)
new_layout.addWidget(getattr(self, name))
parent_layout.addLayout(new_layout)
def add_main_inputs(self, parent_layout, input) -> None:
"""
Pridani vstupu hlavni vstupu do uzivatelskeho prostredi.
"""
title = QLabel(input.title)
title.setAlignment(Qt.AlignCenter)
title.setFont(QFont("Times", 16, QFont.Bold))
parent_layout.addWidget(title)
for element in input.parameters:
new_layout = QHBoxLayout()
label_text = "{} [{}]:".format(element['name'], element['unit'])
label = QLabel(label_text)
label.setFont(QFont("Times", 12))
label.setToolTip(element['hint'])
name = input.name + element['name']
setattr(self, name, QLineEdit())
getattr(self, name).setText(str(element['default_value']))
getattr(self, name).setFont(QFont("Times", 12))
if element['name'] == 'T2': getattr(self, name).setDisabled(True)
new_layout.addWidget(label)
new_layout.addWidget(getattr(self, name))
parent_layout.addLayout(new_layout)
def get_medium_inputs(self, input) -> dict:
"""
Ziskani vstupu od uzivatele o mediu.
"""
values = self.get_main_inputs(input)
medium = []
for i in range(1):
part_medium = [0, 0]
j = 0
for element in input.medium:
name = input.name + element['name'] + str(i)
value = getattr(self, name).text().replace(',', '.')
if value == '':
break
try:
parse_value = element['parse_function'](value) * element['to_SI']
except ValueError:
self.show_error_dialog_to_user('Hodnota {} ma spatny format. Pro "medium" zadejte retezec a pro "procenta" cislo!'.format(name))
part_medium[j] = parse_value
j += 1
if value != '': medium.append(part_medium)
values['Medium'] = medium
return values
def get_main_inputs(self, input) -> dict:
"""
Ziskani hlavnich vstupu od uzivatele
"""
values = {}
for element in input.parameters:
try:
name = input.name + element['name']
value = getattr(self, name).text().replace(",", ".")
parse_value = element['parse_function'](value) * element['to_SI']
if parse_value < 0: raise Exception('Hodnota {} je mensi nez nula. Zadejte kladne cislo.'.format(name))
values[element['name']] = parse_value
except ValueError:
self.show_error_dialog_to_user('Nezadali jste cislo u hodnoty {}!'.format(name))
except Exception as error:
self.show_error_dialog_to_user(error.args[0])
return values
def run_simulation(self):
"""
Spusteni po stisknuti tlacitka
Slouzi k vypoctu bilancni rovnice (vypoctu vystupnich teplot)
Slouzi k vypoctu jednotlivym vymeniku
"""
print('RUNNING')
self.table.clearContents()
self.table.setRowCount(0)
medium_tube = self.get_medium_inputs(self.input_tube)
medium_shell = self.get_medium_inputs(self.input_shell)
rest = self.get_main_inputs(self.input_rest)
try:
calculate = Calculate(medium_tube, medium_shell, rest)
getattr(self, 'TubeT2').setText(str(round(calculate.tube.t2, 2)))
getattr(self, 'ShellT2').setText(str(round(calculate.shell.t2, 2)))
vysledky = calculate.calculate_all()
except Exception as error:
self.show_error_dialog_to_user(error.args[0])
else:
print('Pozadavky na vymenik splnilo {} vymeniku.'.format(len(vysledky)))
self.show_output(vysledky)
self.show_graph(vysledky)
print('DONE!')
def show_error_dialog_to_user(self, error_message: str) -> None:
"""
Di
|
are_table(self):
"""
Pripraveni sloupcu v tabulce
"""
i = 0
for item in ['DN[-]', 'd_out[mm]', 'tl_trub[mm]', 'roztec_trub[mm]', 'delka[mm]', 'roztec_prep[mm]', 'vyska_prep[mm]']:
self.table.insertColumn(i)
self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))
i += 1
for item in ['tl_prep[mm]','pocet_prep[-]', 'pocet_trub[-]', 'TP[m/s]', 'MZP[m/s]', 'vykon [W]',
'tlak_ztraty[Pa]', 'hmotnost[kg]']:
self.table.insertColumn(i)
self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))
i += 1
def show_output(self, outputs):
"""
Vygenerovani vymeniku ktere splnili pozadavky
"""
i = 0
self.table.setSortingEnabled(True)
for output in outputs:
self.table.insertRow(i)
j = 0
for x in output[0]:
item = QTableWidgetItem()
if x == 'shell':
item.setData(0, output[0][x]['DN'])
else:
item.setData(0, output[0][x]*1000)
item.setFlags(Qt.ItemFlags(1))
self.table.setItem(i, j, item)
j += 1
for y in output[1]:
item = QTableWidgetItem()
if y == 'tl_prep':
item.setData(0, output[1][y]*1000)
else:
item.setData(0, output[1][y])
item.setFlags(Qt.ItemFlags(1))
self.table.setItem(i, j, item)
j += 1
i += 1
def show_graph(self, outputs):
graph_inputs = {
'x' : [],
'y' : [],
's' : [],
'c' : [],
}
for output in outputs:
graph_inputs['x'].append(output[1]['hmotnost'])
graph_inputs['y'].append(output[1]['tlak_ztraty'])
graph_inputs['s'].append(output[0]['length']*10)
if output[0]['tl'] == 0.000889:
graph_inputs['c'].append('gold')
elif output[0]['tl'] == 0.001244:
graph_inputs['c'].append('orange')
elif output[0]['
|
splays a separate dialog (alert) informing user of something bad, like
invalid user input or simulation errors.
Args:
error_message ... what should be shown to the user
"""
print(error_message)
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Error")
msg.setWindowTitle("Error")
msg.setInformativeText(error_message)
msg.exec_()
def prep
|
identifier_body
|
heat_exchanger.py
|
Calculate
class HeatExchangerWindow(QMainWindow, Ui_MainWindow):
"""
Class obsahující komunikaci mezi uživatelským prostředím a backendem.
Samotné uživatelské prostředí se nachází v souboru heat_exchanger_ui odkud je importováno.
"""
def __init__(self):
super().__init__()
self.setupUi(self)
self.input_tube = UserInputMedium('Tube', 'Trubkovy prostor')
self.input_shell = UserInputShell('Shell', 'Mezitrubkovy prostor') # pouze docasny na testovani
#self.input_shell = UserInputMedium('Shell', 'Mezitrubkovy prostor') #ve finalni verzi
self.input_rest = UserInputRest()
self.add_medium_inputs(self.verticalLayoutTube, self.input_tube)
self.add_medium_inputs(self.verticalLayoutShell, self.input_shell)
self.add_main_inputs(self.verticalLayoutRest, self.input_rest)
self.prepare_table()
self.buttonRun.pressed.connect(lambda: self.run_simulation())
def add_medium_inputs(self, parent_layout, input) -> None:
"""
Vygenerovani vstupu pro jednotliva media do uzivatelskeho prostredi.
"""
self.add_main_inputs(parent_layout, input)
title_layout = QHBoxLayout()
for parameter in input.medium:
title = QLabel(parameter['name'])
title.setFont(QFont("Times", 12, QFont.Bold))
title.setToolTip(parameter['hint'])
title_layout.addWidget(title)
parent_layout.addLayout(title_layout)
for i in range(1):
new_layout = QHBoxLayout()
for parameter in input.medium:
name = input.name + parameter['name'] + str(i)
setattr(self, name, QLineEdit())
getattr(self, name).setFont(QFont("Times", 12))
getattr(self, name).setText(str(parameter['default_value']))
#getattr(self, name).setDisabled(True)
new_layout.addWidget(getattr(self, name))
parent_layout.addLayout(new_layout)
def add_main_inputs(self, parent_layout, input) -> None:
"""
Pridani vstupu hlavni vstupu do uzivatelskeho prostredi.
"""
title = QLabel(input.title)
title.setAlignment(Qt.AlignCenter)
title.setFont(QFont("Times", 16, QFont.Bold))
parent_layout.addWidget(title)
for element in input.parameters:
new_layout = QHBoxLayout()
label_text = "{} [{}]:".format(element['name'], element['unit'])
label = QLabel(label_text)
label.setFont(QFont("Times", 12))
label.setToolTip(element['hint'])
name = input.name + element['name']
setattr(self, name, QLineEdit())
getattr(self, name).setText(str(element['default_value']))
getattr(self, name).setFont(QFont("Times", 12))
if element['name'] == 'T2': getattr(self, name).setDisabled(True)
new_layout.addWidget(label)
new_layout.addWidget(getattr(self, name))
parent_layout.addLayout(new_layout)
def get_medium_inputs(self, input) -> dict:
"""
Ziskani vstupu od uzivatele o mediu.
"""
values = self.get_main_inputs(input)
medium = []
for i in range(1):
part_medium = [0, 0]
j = 0
for element in input.medium:
name = input.name + element['name'] + str(i)
value = getattr(self, name).text().replace(',', '.')
if value == '':
break
try:
parse_value = element['parse_function'](value) * element['to_SI']
except ValueError:
self.show_error_dialog_to_user('Hodnota {} ma spatny format. Pro "medium" zadejte retezec a pro "procenta" cislo!'.format(name))
part_medium[j] = parse_value
j += 1
if value != '': medium.append(
|
s['Medium'] = medium
return values
def get_main_inputs(self, input) -> dict:
"""
Ziskani hlavnich vstupu od uzivatele
"""
values = {}
for element in input.parameters:
try:
name = input.name + element['name']
value = getattr(self, name).text().replace(",", ".")
parse_value = element['parse_function'](value) * element['to_SI']
if parse_value < 0: raise Exception('Hodnota {} je mensi nez nula. Zadejte kladne cislo.'.format(name))
values[element['name']] = parse_value
except ValueError:
self.show_error_dialog_to_user('Nezadali jste cislo u hodnoty {}!'.format(name))
except Exception as error:
self.show_error_dialog_to_user(error.args[0])
return values
def run_simulation(self):
"""
Spusteni po stisknuti tlacitka
Slouzi k vypoctu bilancni rovnice (vypoctu vystupnich teplot)
Slouzi k vypoctu jednotlivym vymeniku
"""
print('RUNNING')
self.table.clearContents()
self.table.setRowCount(0)
medium_tube = self.get_medium_inputs(self.input_tube)
medium_shell = self.get_medium_inputs(self.input_shell)
rest = self.get_main_inputs(self.input_rest)
try:
calculate = Calculate(medium_tube, medium_shell, rest)
getattr(self, 'TubeT2').setText(str(round(calculate.tube.t2, 2)))
getattr(self, 'ShellT2').setText(str(round(calculate.shell.t2, 2)))
vysledky = calculate.calculate_all()
except Exception as error:
self.show_error_dialog_to_user(error.args[0])
else:
print('Pozadavky na vymenik splnilo {} vymeniku.'.format(len(vysledky)))
self.show_output(vysledky)
self.show_graph(vysledky)
print('DONE!')
def show_error_dialog_to_user(self, error_message: str) -> None:
"""
Displays a separate dialog (alert) informing user of something bad, like
invalid user input or simulation errors.
Args:
error_message ... what should be shown to the user
"""
print(error_message)
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Error")
msg.setWindowTitle("Error")
msg.setInformativeText(error_message)
msg.exec_()
def prepare_table(self):
"""
Pripraveni sloupcu v tabulce
"""
i = 0
for item in ['DN[-]', 'd_out[mm]', 'tl_trub[mm]', 'roztec_trub[mm]', 'delka[mm]', 'roztec_prep[mm]', 'vyska_prep[mm]']:
self.table.insertColumn(i)
self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))
i += 1
for item in ['tl_prep[mm]','pocet_prep[-]', 'pocet_trub[-]', 'TP[m/s]', 'MZP[m/s]', 'vykon [W]',
'tlak_ztraty[Pa]', 'hmotnost[kg]']:
self.table.insertColumn(i)
self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))
i += 1
def show_output(self, outputs):
"""
Vygenerovani vymeniku ktere splnili pozadavky
"""
i = 0
self.table.setSortingEnabled(True)
for output in outputs:
self.table.insertRow(i)
j = 0
for x in output[0]:
item = QTableWidgetItem()
if x == 'shell':
item.setData(0, output[0][x]['DN'])
else:
item.setData(0, output[0][x]*1000)
item.setFlags(Qt.ItemFlags(1))
self.table.setItem(i, j, item)
j += 1
for y in output[1]:
item = QTableWidgetItem()
if y == 'tl_prep':
item.setData(0, output[1][y]*1000)
else:
item.setData(0, output[1][y])
item.setFlags(Qt.ItemFlags(1))
self.table.setItem(i, j, item)
j += 1
i += 1
def show_graph(self, outputs):
graph_inputs = {
'x' : [],
'y' : [],
's' : [],
'c' : [],
}
for output in outputs:
graph_inputs['x'].append(output[1]['hmotnost'])
graph_inputs['y'].append(output[1]['tlak_ztraty'])
graph_inputs['s'].append(output[0]['length']*10)
if output[0]['tl'] == 0.000889:
graph_inputs['c'].append('gold')
elif output[0]['tl'] == 0.001244:
graph_inputs['c'].append('orange')
elif output[0]['tl
|
part_medium)
value
|
conditional_block
|
heat_exchanger.py
|
import Calculate
class HeatExchangerWindow(QMainWindow, Ui_MainWindow):
"""
Class obsahující komunikaci mezi uživatelským prostředím a backendem.
Samotné uživatelské prostředí se nachází v souboru heat_exchanger_ui odkud je importováno.
"""
def __init__(self):
super().__init__()
self.setupUi(self)
self.input_tube = UserInputMedium('Tube', 'Trubkovy prostor')
self.input_shell = UserInputShell('Shell', 'Mezitrubkovy prostor') # pouze docasny na testovani
#self.input_shell = UserInputMedium('Shell', 'Mezitrubkovy prostor') #ve finalni verzi
self.input_rest = UserInputRest()
self.add_medium_inputs(self.verticalLayoutTube, self.input_tube)
self.add_medium_inputs(self.verticalLayoutShell, self.input_shell)
self.add_main_inputs(self.verticalLayoutRest, self.input_rest)
self.prepare_table()
self.buttonRun.pressed.connect(lambda: self.run_simulation())
def add_medium_inputs(self, parent_layout, input) -> None:
"""
Vygenerovani vstupu pro jednotliva media do uzivatelskeho prostredi.
"""
self.add_main_inputs(parent_layout, input)
title_layout = QHBoxLayout()
for parameter in input.medium:
title = QLabel(parameter['name'])
title.setFont(QFont("Times", 12, QFont.Bold))
title.setToolTip(parameter['hint'])
title_layout.addWidget(title)
parent_layout.addLayout(title_layout)
for i in range(1):
new_layout = QHBoxLayout()
for parameter in input.medium:
name = input.name + parameter['name'] + str(i)
setattr(self, name, QLineEdit())
getattr(self, name).setFont(QFont("Times", 12))
getattr(self, name).setText(str(parameter['default_value']))
#getattr(self, name).setDisabled(True)
new_layout.addWidget(getattr(self, name))
parent_layout.addLayout(new_layout)
def add_main_inputs(self, parent_layout, input) -> None:
"""
Pridani vstupu hlavni vstupu do uzivatelskeho prostredi.
"""
title = QLabel(input.title)
title.setAlignment(Qt.AlignCenter)
title.setFont(QFont("Times", 16, QFont.Bold))
parent_layout.addWidget(title)
for element in input.parameters:
new_layout = QHBoxLayout()
label_text = "{} [{}]:".format(element['name'], element['unit'])
label = QLabel(label_text)
label.setFont(QFont("Times", 12))
label.setToolTip(element['hint'])
name = input.name + element['name']
setattr(self, name, QLineEdit())
getattr(self, name).setText(str(element['default_value']))
getattr(self, name).setFont(QFont("Times", 12))
if element['name'] == 'T2': getattr(self, name).setDisabled(True)
new_layout.addWidget(label)
new_layout.addWidget(getattr(self, name))
parent_layout.addLayout(new_layout)
def get_medium_inputs(self, input) -> dict:
"""
Ziskani vstupu od uzivatele o mediu.
"""
values = self.get_main_inputs(input)
medium = []
for i in range(1):
part_medium = [0, 0]
j = 0
for element in input.medium:
name = input.name + element['name'] + str(i)
value = getattr(self, name).text().replace(',', '.')
if value == '':
break
try:
parse_value = element['parse_function'](value) * element['to_SI']
except ValueError:
self.show_error_dialog_to_user('Hodnota {} ma spatny format. Pro "medium" zadejte retezec a pro "procenta" cislo!'.format(name))
part_medium[j] = parse_value
j += 1
if value != '': medium.append(part_medium)
values['Medium'] = medium
return values
def get_main_inputs(self, input) -> dict:
"""
Ziskani hlavnich vstupu od uzivatele
"""
values = {}
for element in input.parameters:
try:
name = input.name + element['name']
value = getattr(self, name).text().replace(",", ".")
parse_value = element['parse_function'](value) * element['to_SI']
if parse_value < 0: raise Exception('Hodnota {} je mensi nez nula. Zadejte kladne cislo.'.format(name))
values[element['name']] = parse_value
except ValueError:
self.show_error_dialog_to_user('Nezadali jste cislo u hodnoty {}!'.format(name))
except Exception as error:
self.show_error_dialog_to_user(error.args[0])
return values
def run_simulation(self):
"""
Spusteni po stisknuti tlacitka
Slouzi k vypoctu bilancni rovnice (vypoctu vystupnich teplot)
Slouzi k vypoctu jednotlivym vymeniku
"""
print('RUNNING')
self.table.clearContents()
self.table.setRowCount(0)
medium_tube = self.get_medium_inputs(self.input_tube)
medium_shell = self.get_medium_inputs(self.input_shell)
rest = self.get_main_inputs(self.input_rest)
try:
calculate = Calculate(medium_tube, medium_shell, rest)
getattr(self, 'TubeT2').setText(str(round(calculate.tube.t2, 2)))
getattr(self, 'ShellT2').setText(str(round(calculate.shell.t2, 2)))
vysledky = calculate.calculate_all()
except Exception as error:
self.show_error_dialog_to_user(error.args[0])
else:
print('Pozadavky na vymenik splnilo {} vymeniku.'.format(len(vysledky)))
self.show_output(vysledky)
self.show_graph(vysledky)
print('DONE!')
def show_error_dialog_to_user(self, error_message: str) -> None:
"""
Displays a separate dialog (alert) informing user of something bad, like
invalid user input or simulation errors.
Args:
error_message ... what should be shown to the user
"""
print(error_message)
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Error")
msg.setWindowTitle("Error")
msg.setInformativeText(error_message)
msg.exec_()
def prepare_table(self):
"""
Pripraveni sloupcu v tabulce
"""
i = 0
for item in ['DN[-]', 'd_out[mm]', 'tl_trub[mm]', 'roztec_trub[mm]', 'delka[mm]', 'roztec_prep[mm]', 'vyska_prep[mm]']:
self.table.insertColumn(i)
self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))
i += 1
for item in ['tl_prep[mm]','pocet_prep[-]', 'pocet_trub[-]', 'TP[m/s]', 'MZP[m/s]', 'vykon [W]',
'tlak_ztraty[Pa]', 'hmotnost[kg]']:
self.table.insertColumn(i)
self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))
i += 1
def show_output(self, outputs):
"""
Vygenerovani vymeniku ktere splnili pozadavky
"""
i = 0
self.table.setSortingEnabled(True)
for output in outputs:
self.table.insertRow(i)
j = 0
for x in output[0]:
item = QTableWidgetItem()
if x == 'shell':
item.setData(0, output[0][x]['DN'])
else:
item.setData(0, output[0][x]*1000)
item.setFlags(Qt.ItemFlags(1))
self.table.setItem(i, j, item)
j += 1
for y in output[1]:
item = QTableWidgetItem()
if y == 'tl_prep':
item.setData(0, output[1][y]*1000)
else:
|
item.setData(0, output[1][y])
item.setFlags(Qt.ItemFlags(1))
self.table.setItem(i, j, item)
j += 1
i += 1
def show_graph(self, outputs):
graph_inputs = {
'x' : [],
'y' : [],
's' : [],
'c' : [],
}
for output in outputs:
graph_inputs['x'].append(output[1]['hmotnost'])
graph_inputs['y'].append(output[1]['tlak_ztraty'])
graph_inputs['s'].append(output[0]['length']*10)
if output[0]['tl'] == 0.000889:
graph_inputs['c'].append('gold')
elif output[0]['tl'] == 0.001244:
graph_inputs['c'].append('orange')
elif output[0]['tl']
|
random_line_split
|
|
heat_exchanger.py
|
Calculate
class HeatExchangerWindow(QMainWindow, Ui_MainWindow):
"""
Class obsahující komunikaci mezi uživatelským prostředím a backendem.
Samotné uživatelské prostředí se nachází v souboru heat_exchanger_ui odkud je importováno.
"""
def __init__(self):
super().__init__()
self.setupUi(self)
self.input_tube = UserInputMedium('Tube', 'Trubkovy prostor')
self.input_shell = UserInputShell('Shell', 'Mezitrubkovy prostor') # pouze docasny na testovani
#self.input_shell = UserInputMedium('Shell', 'Mezitrubkovy prostor') #ve finalni verzi
self.input_rest = UserInputRest()
self.add_medium_inputs(self.verticalLayoutTube, self.input_tube)
self.add_medium_inputs(self.verticalLayoutShell, self.input_shell)
self.add_main_inputs(self.verticalLayoutRest, self.input_rest)
self.prepare_table()
self.buttonRun.pressed.connect(lambda: self.run_simulation())
def add_medium_inputs(self, parent_layout, input) -> None:
"""
Vygenerovani vstupu pro jednotliva media do uzivatelskeho prostredi.
"""
self.add_main_inputs(parent_layout, input)
title_layout = QHBoxLayout()
for parameter in input.medium:
title = QLabel(parameter['name'])
title.setFont(QFont("Times", 12, QFont.Bold))
title.setToolTip(parameter['hint'])
title_layout.addWidget(title)
parent_layout.addLayout(title_layout)
for i in range(1):
new_layout = QHBoxLayout()
for parameter in input.medium:
name = input.name + parameter['name'] + str(i)
setattr(self, name, QLineEdit())
getattr(self, name).setFont(QFont("Times", 12))
getattr(self, name).setText(str(parameter['default_value']))
#getattr(self, name).setDisabled(True)
new_layout.addWidget(getattr(self, name))
parent_layout.addLayout(new_layout)
def add_main_input
|
layout, input) -> None:
"""
Pridani vstupu hlavni vstupu do uzivatelskeho prostredi.
"""
title = QLabel(input.title)
title.setAlignment(Qt.AlignCenter)
title.setFont(QFont("Times", 16, QFont.Bold))
parent_layout.addWidget(title)
for element in input.parameters:
new_layout = QHBoxLayout()
label_text = "{} [{}]:".format(element['name'], element['unit'])
label = QLabel(label_text)
label.setFont(QFont("Times", 12))
label.setToolTip(element['hint'])
name = input.name + element['name']
setattr(self, name, QLineEdit())
getattr(self, name).setText(str(element['default_value']))
getattr(self, name).setFont(QFont("Times", 12))
if element['name'] == 'T2': getattr(self, name).setDisabled(True)
new_layout.addWidget(label)
new_layout.addWidget(getattr(self, name))
parent_layout.addLayout(new_layout)
def get_medium_inputs(self, input) -> dict:
"""
Ziskani vstupu od uzivatele o mediu.
"""
values = self.get_main_inputs(input)
medium = []
for i in range(1):
part_medium = [0, 0]
j = 0
for element in input.medium:
name = input.name + element['name'] + str(i)
value = getattr(self, name).text().replace(',', '.')
if value == '':
break
try:
parse_value = element['parse_function'](value) * element['to_SI']
except ValueError:
self.show_error_dialog_to_user('Hodnota {} ma spatny format. Pro "medium" zadejte retezec a pro "procenta" cislo!'.format(name))
part_medium[j] = parse_value
j += 1
if value != '': medium.append(part_medium)
values['Medium'] = medium
return values
def get_main_inputs(self, input) -> dict:
"""
Ziskani hlavnich vstupu od uzivatele
"""
values = {}
for element in input.parameters:
try:
name = input.name + element['name']
value = getattr(self, name).text().replace(",", ".")
parse_value = element['parse_function'](value) * element['to_SI']
if parse_value < 0: raise Exception('Hodnota {} je mensi nez nula. Zadejte kladne cislo.'.format(name))
values[element['name']] = parse_value
except ValueError:
self.show_error_dialog_to_user('Nezadali jste cislo u hodnoty {}!'.format(name))
except Exception as error:
self.show_error_dialog_to_user(error.args[0])
return values
def run_simulation(self):
"""
Spusteni po stisknuti tlacitka
Slouzi k vypoctu bilancni rovnice (vypoctu vystupnich teplot)
Slouzi k vypoctu jednotlivym vymeniku
"""
print('RUNNING')
self.table.clearContents()
self.table.setRowCount(0)
medium_tube = self.get_medium_inputs(self.input_tube)
medium_shell = self.get_medium_inputs(self.input_shell)
rest = self.get_main_inputs(self.input_rest)
try:
calculate = Calculate(medium_tube, medium_shell, rest)
getattr(self, 'TubeT2').setText(str(round(calculate.tube.t2, 2)))
getattr(self, 'ShellT2').setText(str(round(calculate.shell.t2, 2)))
vysledky = calculate.calculate_all()
except Exception as error:
self.show_error_dialog_to_user(error.args[0])
else:
print('Pozadavky na vymenik splnilo {} vymeniku.'.format(len(vysledky)))
self.show_output(vysledky)
self.show_graph(vysledky)
print('DONE!')
def show_error_dialog_to_user(self, error_message: str) -> None:
"""
Displays a separate dialog (alert) informing user of something bad, like
invalid user input or simulation errors.
Args:
error_message ... what should be shown to the user
"""
print(error_message)
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Error")
msg.setWindowTitle("Error")
msg.setInformativeText(error_message)
msg.exec_()
def prepare_table(self):
"""
Pripraveni sloupcu v tabulce
"""
i = 0
for item in ['DN[-]', 'd_out[mm]', 'tl_trub[mm]', 'roztec_trub[mm]', 'delka[mm]', 'roztec_prep[mm]', 'vyska_prep[mm]']:
self.table.insertColumn(i)
self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))
i += 1
for item in ['tl_prep[mm]','pocet_prep[-]', 'pocet_trub[-]', 'TP[m/s]', 'MZP[m/s]', 'vykon [W]',
'tlak_ztraty[Pa]', 'hmotnost[kg]']:
self.table.insertColumn(i)
self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))
i += 1
def show_output(self, outputs):
"""
Vygenerovani vymeniku ktere splnili pozadavky
"""
i = 0
self.table.setSortingEnabled(True)
for output in outputs:
self.table.insertRow(i)
j = 0
for x in output[0]:
item = QTableWidgetItem()
if x == 'shell':
item.setData(0, output[0][x]['DN'])
else:
item.setData(0, output[0][x]*1000)
item.setFlags(Qt.ItemFlags(1))
self.table.setItem(i, j, item)
j += 1
for y in output[1]:
item = QTableWidgetItem()
if y == 'tl_prep':
item.setData(0, output[1][y]*1000)
else:
item.setData(0, output[1][y])
item.setFlags(Qt.ItemFlags(1))
self.table.setItem(i, j, item)
j += 1
i += 1
def show_graph(self, outputs):
graph_inputs = {
'x' : [],
'y' : [],
's' : [],
'c' : [],
}
for output in outputs:
graph_inputs['x'].append(output[1]['hmotnost'])
graph_inputs['y'].append(output[1]['tlak_ztraty'])
graph_inputs['s'].append(output[0]['length']*10)
if output[0]['tl'] == 0.000889:
graph_inputs['c'].append('gold')
elif output[0]['tl'] == 0.001244:
graph_inputs['c'].append('orange')
elif output[0]['tl
|
s(self, parent_
|
identifier_name
|
gamestate.rs
|
/// 1. Placement on an invalid position (either out of bounds or a hole)
/// 2. Placement when the players' avatars are already placed
///
/// This function will choose which penguin to place for the current player, so it is
/// impossible for the player to place a penguin that is not theirs.
pub fn place_avatar_for_current_player(&mut self, placement: Placement) -> Option<()> {
self.place_avatar_for_player(self.current_turn, placement.tile_id)
}
/// Moves a placed avatar from one position to another on the board,
/// removes the tile that penguin was on, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid move.
/// An invalid move is one of:
/// 1. Move to an invalid position (either out of bounds or hole)
/// 2. Move when the current avatar has yet to be placed
/// 3. Move to a tile that is not accessible within a straight line
/// of the current tile, with no holes in between.
/// 4. Move of a penguin that doesn't belong to the player
pub fn move_avatar_for_player_without_changing_turn(&mut self, player: PlayerId, penguin_start_tile: TileId, destination: TileId) -> Option<()> {
let occupied = &self.get_occupied_tiles();
let player = self.players.get_mut(&player)?;
player.move_penguin(penguin_start_tile, destination, &self.board, occupied)?;
player.score += self.board.remove_tile(penguin_start_tile);
Some(())
}
/// Helper function which moves an avatar for the player whose turn it currently is.
pub fn move_avatar_for_current_player(&mut self, move_: Move) -> Option<()> {
self.move_avatar_for_player_without_changing_turn(self.current_turn, move_.from, move_.to)?;
self.advance_turn();
Some(())
}
/// Retrieve a tile by its ID. Will return None if the id
/// does not reference any existing tile. This can happen
/// if the tile was removed and has become a hole in the board.
pub fn get_tile(&self, tile_id: TileId) -> Option<&Tile> {
self.board.tiles.get(&tile_id)
}
/// Gets the color of the player whose penguin is on a certain tile
/// Returns None if there is no penguin on that tile
pub fn get_color_on_tile(&self, tile_id: TileId) -> Option<PlayerColor> {
self.players.iter().find_map(|(_, player)| {
let is_penguin_on_tile = player.penguins.iter().any(|penguin| penguin.tile_id == Some(tile_id));
if is_penguin_on_tile {
Some(player.color)
} else {
None
}
})
}
/// Returns true if any player has a penguin they can move,
/// false if not (and the game is thus over)
pub fn can_any_player_move_penguin(&self) -> bool {
let occupied_tiles = self.get_occupied_tiles();
self.players.iter().any(|(_, player)| player.can_move_a_penguin(&self.board, &occupied_tiles))
}
/// Returns true if the given player can move a penguin
pub fn can_player_move(&self, player: PlayerId) -> bool {
self.players.get(&player).map_or(false, |player|
player.can_move_a_penguin(&self.board, &self.get_occupied_tiles()))
}
/// Returns the set of tiles on this gamestate's board which have a penguin on them
pub fn get_occupied_tiles(&self) -> HashSet<TileId> {
self.players.iter()
.flat_map(|(_, player)| player.penguins.iter().filter_map(|penguin| penguin.tile_id))
.collect()
}
/// Gets all valid moves for the current GameState,
/// meaning only move the current player can make
pub fn get_valid_moves(&self) -> Vec<Move> {
let occupied_tiles = self.get_occupied_tiles();
let penguins_to_move = &self.current_player().penguins;
penguins_to_move.iter().flat_map(|penguin| {
// penguins in Games are placed, so should always be Some
let starting_tile_id = penguin.tile_id.expect("A penguin was not placed!");
let starting_tile = self.get_tile(starting_tile_id).expect("A penguin is placed on a hole");
starting_tile.all_reachable_tiles(&self.board, &occupied_tiles)
.into_iter()
.map(move |destination| Move::new(starting_tile_id, destination.tile_id))
}).collect()
}
/// Get a penguin at a position, None if no penguin at that position
#[allow(dead_code)]
pub fn find_penguin_at_position(&self, posn: BoardPosn) -> Option<&Penguin> {
let tile = self.board.get_tile_id(posn.x, posn.y)?;
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Search for the penguin at the given TileId and return it if possible.
/// Returns None if no penguin at that location was found.
pub fn find_penguin(&self, tile: TileId) -> Option<&Penguin> {
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Returns the player whose turn it currently is
pub fn current_player(&self) -> &Player {
self.players.get(&self.current_turn).unwrap()
}
/// Is this game over? We define a game to be "over" if either
/// some players have won, or there are no players left in the game.
pub fn is_game_over(&self) -> bool {
self.winning_players.is_some() || self.players.is_empty()
}
#[allow(dead_code)]
pub fn get_player_by_color_mut(&mut self, color: PlayerColor) -> Option<&mut Player> {
self.players.iter_mut()
.find(|(_, player)| player.color == color)
.map(|(_, player)| player)
}
/// Advance the turn of this game to the next player's turn
/// Will mutate this game's current_turn field.
///
/// Note that this will skip the turn of any player who cannot
/// move any penguins. It is an invalid game state for the current
/// turn to be a player who cannot move any penguins.
pub fn advance_turn(&mut self) {
self.advance_turn_index();
for _ in 0 .. self.players.len() {
if !self.current_player().has_unplaced_penguins() && self.get_valid_moves().is_empty() {
self.advance_turn_index()
} else {
return;
}
}
// No players have any moves left, find the winning players by those with the maximum score
self.winning_players = Some(util::all_max_by_key(self.players.iter(), |(_, player)| player.score)
.map(|(id, _)| *id).collect());
}
/// Sets the turn of this game to the next player in order
fn advance_turn_index(&mut self) {
if !self.turn_order.is_empty() {
let current_turn_index = self.turn_order.iter().position(|id| id == &self.current_turn).unwrap();
let next_turn_index = (current_turn_index + 1) % self.turn_order.len();
self.current_turn = self.turn_order[next_turn_index];
}
}
/// Sets the turn of the game to the previous player's turn, used when removing a player.
fn previous_turn_index(&mut self) {
let current_turn_index = self.turn_order.iter()
.position(|id| id == &self.current_turn).unwrap();
let prev_turn_index = if current_turn_index == 0 {
self.turn_order.len().saturating_sub(1)
} else {
(current_turn_index - 1) % self.turn_order.len()
};
self.current_turn = self.turn_order[prev_turn_index];
}
pub fn player_score(&self, player_id: PlayerId) -> usize {
self.players[&player_id].score
}
/// Returns true if all penguins have a concrete position on the board.
/// If this is false then we are still in the PlacePenguins phase of the game.
pub fn all_penguins_are_placed(&self) -> bool {
self.players.iter().all(|(_, player)| !player.has_unplaced_penguins())
}
/// Removes a player and its penguins from this game
pub fn remove_player(&mut self, player_id: PlayerId) {
if !self.is_game_over() {
let should_advance_turn = self.current_turn == player_id;
// Prepare to advance the current turn past the to-be-removed player
if should_advance_turn {
self.previous_turn_index();
}
self.players.remove(&player_id);
self.turn_order.retain(|id| *id != player_id);
// Now actually advance the turn after the player is removed to properly
// handle the case where we skip the turns of possibly multiple players
// whose penguins are all stuck.
if should_advance_turn {
self.advance_turn();
}
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::common::boardposn::BoardPosn;
#[test]
fn
|
test_new
|
identifier_name
|
|
gamestate.rs
|
};
board_string.push_str(&tile_string);
board_string.push_str(" ");
}
board_string.push_str("\n");
}
writeln!(f, "{}", board_string)?;
// Write each player, their score, and their penguin positions
for (player_id, player) in self.players.iter() {
let current_player_str = if self.current_turn == *player_id { "<- current turn" } else { "" };
let penguins = util::map_slice(&player.penguins, |penguin| {
match penguin.tile_id {
Some(id) => format!("penguin on tile {}", id.0),
None => "unplaced".to_string(),
}
}).join(", ");
writeln!(f, "Player {} - {:?} - score: {} - penguins: [{}] {}",
player_id.0, player.color, player.score, penguins, current_player_str)?;
}
writeln!(f, "")
}
}
impl GameState {
/// Create a new GameState with the given board and player_count. Generates new
/// player ids for the number of players given.
/// This will panic if player_count is < MIN_PLAYERS_PER_GAME or > MAX_PLAYERS_PER_GAME.
pub fn new(board: Board, player_count: usize) -> GameState {
GameState::with_players(board, (0..player_count).map(PlayerId).collect())
}
/// Create a new GameState with the given board and turn_order, with the player count equal
/// to the number of players in turn_order.
/// This will panic if turn_order.len() is < MIN_PLAYERS_PER_GAME or > MAX_PLAYERS_PER_GAME.
pub fn with_players(board: Board, turn_order: Vec<PlayerId>) -> GameState {
// Each player receives 6 - N penguins, where N is the number of players
let penguins_per_player = PENGUIN_FACTOR - turn_order.len();
let players: BTreeMap<_, _> = turn_order.iter().zip(PlayerColor::iter()).map(|(id, color)| {
(*id, Player::new(*id, color, penguins_per_player))
}).collect();
let current_turn = turn_order[0];
GameState {
board,
players,
turn_order,
current_turn,
winning_players: None,
}
}
/// Creates a new gamestate with a board with a given number of rows and columns,
/// the given number of players, and no holes.
pub fn with_default_board(rows: u32, columns: u32, players: usize) -> GameState {
let board = Board::with_no_holes(rows, columns, 3);
GameState::new(board, players)
}
/// Places an unplaced avatar on a position on the board, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid placement.
/// An invalid placement is one of:
/// 1. Placement on an invalid position (either out of bounds or a hole)
/// 2. Placement when the players' avatars are already placed
/// 3. Placement of a penguin that doesn't belong to the current player
pub fn place_avatar_for_player(&mut self, player: PlayerId, tile: TileId) -> Option<()> {
self.place_avatar_without_changing_turn(player, tile)?;
self.advance_turn();
Some(())
}
/// Place a player's avatar but don't change whose turn it is.
/// This is useful to more easily place avatars in bulk during testing.
pub fn place_avatar_without_changing_turn(&mut self, player: PlayerId, tile: TileId) -> Option<()> {
let occupied_tiles = self.get_occupied_tiles();
if occupied_tiles.contains(&tile) {
None
} else {
let player = self.players.get_mut(&player)?;
player.place_penguin(tile, &self.board)
}
}
/// Places an unplaced avatar on the given placement on the board, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid placement.
/// An invalid placement is one of:
/// 1. Placement on an invalid position (either out of bounds or a hole)
/// 2. Placement when the players' avatars are already placed
///
/// This function will choose which penguin to place for the current player, so it is
/// impossible for the player to place a penguin that is not theirs.
pub fn place_avatar_for_current_player(&mut self, placement: Placement) -> Option<()> {
self.place_avatar_for_player(self.current_turn, placement.tile_id)
}
/// Moves a placed avatar from one position to another on the board,
/// removes the tile that penguin was on, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid move.
/// An invalid move is one of:
/// 1. Move to an invalid position (either out of bounds or hole)
/// 2. Move when the current avatar has yet to be placed
/// 3. Move to a tile that is not accessible within a straight line
/// of the current tile, with no holes in between.
/// 4. Move of a penguin that doesn't belong to the player
pub fn move_avatar_for_player_without_changing_turn(&mut self, player: PlayerId, penguin_start_tile: TileId, destination: TileId) -> Option<()> {
let occupied = &self.get_occupied_tiles();
let player = self.players.get_mut(&player)?;
player.move_penguin(penguin_start_tile, destination, &self.board, occupied)?;
player.score += self.board.remove_tile(penguin_start_tile);
Some(())
}
/// Helper function which moves an avatar for the player whose turn it currently is.
pub fn move_avatar_for_current_player(&mut self, move_: Move) -> Option<()> {
self.move_avatar_for_player_without_changing_turn(self.current_turn, move_.from, move_.to)?;
self.advance_turn();
Some(())
}
/// Retrieve a tile by its ID. Will return None if the id
/// does not reference any existing tile. This can happen
/// if the tile was removed and has become a hole in the board.
pub fn get_tile(&self, tile_id: TileId) -> Option<&Tile> {
self.board.tiles.get(&tile_id)
}
/// Gets the color of the player whose penguin is on a certain tile
/// Returns None if there is no penguin on that tile
pub fn get_color_on_tile(&self, tile_id: TileId) -> Option<PlayerColor> {
self.players.iter().find_map(|(_, player)| {
let is_penguin_on_tile = player.penguins.iter().any(|penguin| penguin.tile_id == Some(tile_id));
if is_penguin_on_tile {
Some(player.color)
} else {
None
}
})
}
/// Returns true if any player has a penguin they can move,
/// false if not (and the game is thus over)
pub fn can_any_player_move_penguin(&self) -> bool {
let occupied_tiles = self.get_occupied_tiles();
self.players.iter().any(|(_, player)| player.can_move_a_penguin(&self.board, &occupied_tiles))
}
/// Returns true if the given player can move a penguin
pub fn can_player_move(&self, player: PlayerId) -> bool {
self.players.get(&player).map_or(false, |player|
player.can_move_a_penguin(&self.board, &self.get_occupied_tiles()))
}
/// Returns the set of tiles on this gamestate's board which have a penguin on them
pub fn get_occupied_tiles(&self) -> HashSet<TileId> {
self.players.iter()
.flat_map(|(_, player)| player.penguins.iter().filter_map(|penguin| penguin.tile_id))
.collect()
}
/// Gets all valid moves for the current GameState,
/// meaning only move the current player can make
pub fn get_valid_moves(&self) -> Vec<Move> {
let occupied_tiles = self.get_occupied_tiles();
let penguins_to_move = &self.current_player().penguins;
penguins_to_move.iter().flat_map(|penguin| {
// penguins in Games are placed, so should always be Some
let starting_tile_id = penguin.tile_id.expect("A penguin was not placed!");
let starting_tile = self.get_tile(starting_tile_id).expect("A penguin is placed on a hole");
starting_tile.all_reachable_tiles(&self.board, &occupied_tiles)
.into_iter()
.map(move |destination| Move::new(starting_tile_id, destination.tile_id
|
{
let mut board_string = String::new();
for y in 0..self.board.height {
if y % 2 == 1 {
board_string.push_str(" ");
}
for x in 0..self.board.width {
let tile_string = match self.board.get_tile_id(x, y) {
Some(id) => {
match self.players.values().find(|player|
player.penguins.iter().any(|penguin| penguin.tile_id == Some(id)))
{
Some(player) => {
format!("P{}", player.player_id.0)
},
None => format!("{:2}", id.0),
}
},
None => " x".to_string(),
|
identifier_body
|
|
gamestate.rs
|
tile: TileId) -> Option<()> {
let occupied_tiles = self.get_occupied_tiles();
if occupied_tiles.contains(&tile) {
None
} else {
let player = self.players.get_mut(&player)?;
player.place_penguin(tile, &self.board)
}
}
/// Places an unplaced avatar on the given placement on the board, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid placement.
/// An invalid placement is one of:
/// 1. Placement on an invalid position (either out of bounds or a hole)
/// 2. Placement when the players' avatars are already placed
///
/// This function will choose which penguin to place for the current player, so it is
/// impossible for the player to place a penguin that is not theirs.
pub fn place_avatar_for_current_player(&mut self, placement: Placement) -> Option<()> {
self.place_avatar_for_player(self.current_turn, placement.tile_id)
}
/// Moves a placed avatar from one position to another on the board,
/// removes the tile that penguin was on, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid move.
/// An invalid move is one of:
/// 1. Move to an invalid position (either out of bounds or hole)
/// 2. Move when the current avatar has yet to be placed
/// 3. Move to a tile that is not accessible within a straight line
/// of the current tile, with no holes in between.
/// 4. Move of a penguin that doesn't belong to the player
pub fn move_avatar_for_player_without_changing_turn(&mut self, player: PlayerId, penguin_start_tile: TileId, destination: TileId) -> Option<()> {
let occupied = &self.get_occupied_tiles();
let player = self.players.get_mut(&player)?;
player.move_penguin(penguin_start_tile, destination, &self.board, occupied)?;
player.score += self.board.remove_tile(penguin_start_tile);
Some(())
}
/// Helper function which moves an avatar for the player whose turn it currently is.
pub fn move_avatar_for_current_player(&mut self, move_: Move) -> Option<()> {
self.move_avatar_for_player_without_changing_turn(self.current_turn, move_.from, move_.to)?;
self.advance_turn();
Some(())
}
/// Retrieve a tile by its ID. Will return None if the id
/// does not reference any existing tile. This can happen
/// if the tile was removed and has become a hole in the board.
pub fn get_tile(&self, tile_id: TileId) -> Option<&Tile> {
self.board.tiles.get(&tile_id)
}
/// Gets the color of the player whose penguin is on a certain tile
/// Returns None if there is no penguin on that tile
pub fn get_color_on_tile(&self, tile_id: TileId) -> Option<PlayerColor> {
self.players.iter().find_map(|(_, player)| {
let is_penguin_on_tile = player.penguins.iter().any(|penguin| penguin.tile_id == Some(tile_id));
if is_penguin_on_tile {
Some(player.color)
} else {
None
}
})
}
/// Returns true if any player has a penguin they can move,
/// false if not (and the game is thus over)
pub fn can_any_player_move_penguin(&self) -> bool {
let occupied_tiles = self.get_occupied_tiles();
self.players.iter().any(|(_, player)| player.can_move_a_penguin(&self.board, &occupied_tiles))
}
/// Returns true if the given player can move a penguin
pub fn can_player_move(&self, player: PlayerId) -> bool {
self.players.get(&player).map_or(false, |player|
player.can_move_a_penguin(&self.board, &self.get_occupied_tiles()))
}
/// Returns the set of tiles on this gamestate's board which have a penguin on them
pub fn get_occupied_tiles(&self) -> HashSet<TileId> {
self.players.iter()
.flat_map(|(_, player)| player.penguins.iter().filter_map(|penguin| penguin.tile_id))
.collect()
}
/// Gets all valid moves for the current GameState,
/// meaning only move the current player can make
pub fn get_valid_moves(&self) -> Vec<Move> {
let occupied_tiles = self.get_occupied_tiles();
let penguins_to_move = &self.current_player().penguins;
penguins_to_move.iter().flat_map(|penguin| {
// penguins in Games are placed, so should always be Some
let starting_tile_id = penguin.tile_id.expect("A penguin was not placed!");
let starting_tile = self.get_tile(starting_tile_id).expect("A penguin is placed on a hole");
starting_tile.all_reachable_tiles(&self.board, &occupied_tiles)
.into_iter()
.map(move |destination| Move::new(starting_tile_id, destination.tile_id))
}).collect()
}
/// Get a penguin at a position, None if no penguin at that position
#[allow(dead_code)]
pub fn find_penguin_at_position(&self, posn: BoardPosn) -> Option<&Penguin> {
let tile = self.board.get_tile_id(posn.x, posn.y)?;
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Search for the penguin at the given TileId and return it if possible.
/// Returns None if no penguin at that location was found.
pub fn find_penguin(&self, tile: TileId) -> Option<&Penguin> {
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Returns the player whose turn it currently is
pub fn current_player(&self) -> &Player {
self.players.get(&self.current_turn).unwrap()
}
/// Is this game over? We define a game to be "over" if either
/// some players have won, or there are no players left in the game.
pub fn is_game_over(&self) -> bool {
self.winning_players.is_some() || self.players.is_empty()
}
#[allow(dead_code)]
pub fn get_player_by_color_mut(&mut self, color: PlayerColor) -> Option<&mut Player> {
self.players.iter_mut()
.find(|(_, player)| player.color == color)
.map(|(_, player)| player)
}
/// Advance the turn of this game to the next player's turn
/// Will mutate this game's current_turn field.
///
/// Note that this will skip the turn of any player who cannot
/// move any penguins. It is an invalid game state for the current
/// turn to be a player who cannot move any penguins.
pub fn advance_turn(&mut self) {
self.advance_turn_index();
for _ in 0 .. self.players.len() {
if !self.current_player().has_unplaced_penguins() && self.get_valid_moves().is_empty() {
self.advance_turn_index()
} else {
return;
}
}
// No players have any moves left, find the winning players by those with the maximum score
self.winning_players = Some(util::all_max_by_key(self.players.iter(), |(_, player)| player.score)
.map(|(id, _)| *id).collect());
}
/// Sets the turn of this game to the next player in order
fn advance_turn_index(&mut self) {
if !self.turn_order.is_empty() {
let current_turn_index = self.turn_order.iter().position(|id| id == &self.current_turn).unwrap();
let next_turn_index = (current_turn_index + 1) % self.turn_order.len();
self.current_turn = self.turn_order[next_turn_index];
}
}
/// Sets the turn of the game to the previous player's turn, used when removing a player.
fn previous_turn_index(&mut self) {
let current_turn_index = self.turn_order.iter()
.position(|id| id == &self.current_turn).unwrap();
let prev_turn_index = if current_turn_index == 0 {
self.turn_order.len().saturating_sub(1)
} else {
(current_turn_index - 1) % self.turn_order.len()
};
self.current_turn = self.turn_order[prev_turn_index];
}
pub fn player_score(&self, player_id: PlayerId) -> usize {
self.players[&player_id].score
}
/// Returns true if all penguins have a concrete position on the board.
/// If this is false then we are still in the PlacePenguins phase of the game.
pub fn all_penguins_are_placed(&self) -> bool {
self.players.iter().all(|(_, player)| !player.has_unplaced_penguins())
}
/// Removes a player and its penguins from this game
pub fn remove_player(&mut self, player_id: PlayerId) {
if !self.is_game_over() {
let should_advance_turn = self.current_turn == player_id;
// Prepare to advance the current turn past the to-be-removed player
if should_advance_turn
|
{
self.previous_turn_index();
}
|
conditional_block
|
|
gamestate.rs
|
(&self, posn: BoardPosn) -> Option<&Penguin> {
let tile = self.board.get_tile_id(posn.x, posn.y)?;
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Search for the penguin at the given TileId and return it if possible.
/// Returns None if no penguin at that location was found.
pub fn find_penguin(&self, tile: TileId) -> Option<&Penguin> {
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Returns the player whose turn it currently is
pub fn current_player(&self) -> &Player {
self.players.get(&self.current_turn).unwrap()
}
/// Is this game over? We define a game to be "over" if either
/// some players have won, or there are no players left in the game.
pub fn is_game_over(&self) -> bool {
self.winning_players.is_some() || self.players.is_empty()
}
#[allow(dead_code)]
pub fn get_player_by_color_mut(&mut self, color: PlayerColor) -> Option<&mut Player> {
self.players.iter_mut()
.find(|(_, player)| player.color == color)
.map(|(_, player)| player)
}
/// Advance the turn of this game to the next player's turn
/// Will mutate this game's current_turn field.
///
/// Note that this will skip the turn of any player who cannot
/// move any penguins. It is an invalid game state for the current
/// turn to be a player who cannot move any penguins.
pub fn advance_turn(&mut self) {
self.advance_turn_index();
for _ in 0 .. self.players.len() {
if !self.current_player().has_unplaced_penguins() && self.get_valid_moves().is_empty() {
self.advance_turn_index()
} else {
return;
}
}
// No players have any moves left, find the winning players by those with the maximum score
self.winning_players = Some(util::all_max_by_key(self.players.iter(), |(_, player)| player.score)
.map(|(id, _)| *id).collect());
}
/// Sets the turn of this game to the next player in order
fn advance_turn_index(&mut self) {
if !self.turn_order.is_empty() {
let current_turn_index = self.turn_order.iter().position(|id| id == &self.current_turn).unwrap();
let next_turn_index = (current_turn_index + 1) % self.turn_order.len();
self.current_turn = self.turn_order[next_turn_index];
}
}
/// Sets the turn of the game to the previous player's turn, used when removing a player.
fn previous_turn_index(&mut self) {
let current_turn_index = self.turn_order.iter()
.position(|id| id == &self.current_turn).unwrap();
let prev_turn_index = if current_turn_index == 0 {
self.turn_order.len().saturating_sub(1)
} else {
(current_turn_index - 1) % self.turn_order.len()
};
self.current_turn = self.turn_order[prev_turn_index];
}
pub fn player_score(&self, player_id: PlayerId) -> usize {
self.players[&player_id].score
}
/// Returns true if all penguins have a concrete position on the board.
/// If this is false then we are still in the PlacePenguins phase of the game.
pub fn all_penguins_are_placed(&self) -> bool {
self.players.iter().all(|(_, player)| !player.has_unplaced_penguins())
}
/// Removes a player and its penguins from this game
pub fn remove_player(&mut self, player_id: PlayerId) {
if !self.is_game_over() {
let should_advance_turn = self.current_turn == player_id;
// Prepare to advance the current turn past the to-be-removed player
if should_advance_turn {
self.previous_turn_index();
}
self.players.remove(&player_id);
self.turn_order.retain(|id| *id != player_id);
// Now actually advance the turn after the player is removed to properly
// handle the case where we skip the turns of possibly multiple players
// whose penguins are all stuck.
if should_advance_turn {
self.advance_turn();
}
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::common::boardposn::BoardPosn;
#[test]
fn test_new() {
let board = Board::with_no_holes(3, 3, 3);
let gamestate = GameState::new(board, 4); // create game with 4 players
assert_eq!(gamestate.players.len(), 4);
// should have 6-n penguins per player
assert!(gamestate.players.iter().all(|(_, player)| player.penguins.len() == 2));
// does turn_order contain each of the players' ids exactly once?
assert_eq!(gamestate.turn_order.len(), gamestate.players.len());
assert!(gamestate.players.iter().all(|(id, _)| gamestate.turn_order.contains(id)), "{:?},\nturns={:?}", gamestate.players, gamestate.turn_order);
assert!(gamestate.winning_players.is_none()); // no winners yet
}
#[test]
fn test_can_any_player_move_penguin() {
// Can no players move when there's a penguin on the board, but holes blocking it in all directions?
let holes = util::map_slice(&[(1, 1), (1, 0), (0, 1)], |pos| BoardPosn::from(*pos));
let board_with_holes = Board::with_holes(2, 2, holes, 1);
let mut gamestate = GameState::new(board_with_holes, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(0));
assert!(!gamestate.can_any_player_move_penguin());
// Can a player move when they have a penguin on the board with no holes blocking it?
let board = Board::with_no_holes(3, 3, 3);
let mut gamestate = GameState::new(board, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(0));
assert!(gamestate.can_any_player_move_penguin());
// Can no players move when all penguins are blocked by holes or other penguins?
// 0(hole) 2(penguin)
// 1(penguin) 3(hole)
let holes = util::map_slice(&[(1, 1), (0, 0)], |pos| BoardPosn::from(*pos));
let board_with_holes = Board::with_holes(2, 2, holes, 1);
let mut gamestate = GameState::new(board_with_holes, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(1));
assert!(&gamestate.can_any_player_move_penguin()); // no penguin at 2, so can move
gamestate.place_avatar_without_changing_turn(player_id, TileId(2));
assert!(!gamestate.can_any_player_move_penguin()); // penguin at 2, so cannot move
}
#[test]
fn test_place_avatar() {
let mut gamestate = GameState::with_default_board(3, 3, 2);
gamestate.board.remove_tile(TileId(5));
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
// Player places a penguin at a valid spot
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(4)), Some(()));
// Player tried to place a penguin at an invalid location
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(10)), None);
// Player tried to place a penguin at a hole
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(5)), None);
}
#[test]
fn test_move_avatar() {
let mut gamestate = GameState::with_default_board(3, 3, 2);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
// Reachable tiles from 0 are [0, 2, 1, 5]
let tile_0 = TileId(0);
let reachable_tile = TileId(5);
let unreachable_tile = TileId(3);
// Move failed: penguin not yet placed
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, reachable_tile), None);
gamestate.place_avatar_without_changing_turn(player_id, tile_0);
|
// Move failed: tile not reachable from tile 0
|
random_line_split
|
|
example.py
|
Q[s][a] = 0.0
gamma = 0.9 # discount factor
alpha_W = 0.1 # learning rate
t = 1.0 # count time
########################################################################################################################
# To start the algorithm we need any action, so we pick one randomly until we find a valid action which we perform
found_initial_move = False
current_action = None
current_state = agent.get_state()
while not found_initial_move:
current_action = helperFunctions.random_action(None, agent.action_space, eps=1)
found_initial_move = agent.is_possible_action(current_action)
# loop until done (i.e. solved the maze or gave up)
done = False
while not done:
# perform current step and get the next state, the reward/penalty for the move, and whether the agent is done (solved or gave up)
next_state, reward, done = agent.step(current_action, False)
# get the best currently known action for the state we are in now
next_action = helperFunctions.get_best_action(Q[current_state])[0]
# randomize action to allow for exploration. As time progresses, make random actions less likely.
next_action = helperFunctions.random_action(next_action, agent.action_space, eps=0.4/t)
# Update Q
alpha = alpha_W/update_counts_sa[current_state][current_action]
update_counts_sa[current_state][current_action] += 0.005
Q[current_state][current_action] = Q[current_state][current_action] + alpha*(reward + gamma*Q[next_state][next_action] - Q[current_state][current_action])
# update current state, current action, and start over
current_state = next_state
current_action = next_action
t += 0.001
########################################################################################################################
# Part 2: Show the exploration route taken by the untrained worker #
########################################################################################################################
# show exploration route
result = ""
if not agent.gave_up and not agent.fell:
result = "I solved gridworld in " + str(agent.steps) + " steps."
elif not agent.gave_up and agent.fell:
result = "I fell into a pit after " + str(agent.steps) + " steps."
else:
result = "Sorry, I had to give up after " + str(agent.max_steps) + " steps."
# Animate the steps of the first game
print("Watch my exploration route... (close the plot window to contine)")
helperFunctions.animate_steps(agent, "Gridworld exploration untrained worker", result)
########################################################################################################################
# Part 3: Play the game 10 000 times to learn the best solution strategy #
########################################################################################################################
print("Now let me train for a while, I enjoyed the game so much!")
agent.reset()
plt.close('all')
# The code is essentially identical to the one used above, but now carried out 10 000 times
training_episodes = 6001
snapshot_interval = 1000
t = 1
q_snapshot = []
for i in range(training_episodes):
if i % 1000 == 0:
print("I'm playing game " + str(i) + " / " + str(training_episodes))
if i % snapshot_interval == 0:
q_snapshot.append(copy.deepcopy(Q))
if i % 1000 == 0:
t += 0.01
agent.reset()
found_initial_move = False
current_action = None
current_state = agent.get_state()
while not found_initial_move:
current_action = helperFunctions.random_action(None, agent.action_space, eps=1)
found_initial_move = agent.is_possible_action(current_action)
done = False
# loop until done (i.e. solved the maze or gave up)
while not done:
# perform current step and get the next state, the reward/penalty for the move, and whether the agent is done (solved or gave up)
next_state, reward, done = agent.step(current_action, False)
# get the best currently known action for the state we are in now
next_action = helperFunctions.get_best_action(Q[current_state])[0]
# randomize action to allow for exploration. As time progresses, make random actions less likely.
next_action = helperFunctions.random_action(next_action, agent.action_space, eps=0.4/t)
# Update Q
# alpha = alpha_W / update_counts_sa[current_state][current_action]
alpha = 0.2
update_counts_sa[current_state][current_action] += 0.005
Q[current_state][current_action] = Q[current_state][current_action] + alpha * (reward + gamma * Q[next_state][next_action] - Q[current_state][current_action])
# update current state, current action, and start over
current_state = next_state
current_action = next_action
# update one last time
Q[current_state][current_action] = Q[current_state][current_action] + alpha * (reward + gamma * Q[next_state][next_action] - Q[current_state][current_action])
print("Ok, I am done practicing.")
agent.reset()
plt.close('all')
########################################################################################################################
# Show snapshots of what the agent has learned #
########################################################################################################################
episode = 0
obj_pos = []
for o in agent.objects:
if o.name == 'worker':
continue
obj_pos.append(tuple((o.x, o.y)))
for Q in q_snapshot:
# plot the world
world = np.zeros([5+2, 5+2])
world = np.full_like(world, -1.)
arrow_dict = {}
# find the best action and map the colors according to certainty
for pos, vals in Q.items(): # iterate over grid positions
directions, q = list(vals.keys()), list(vals.values())
best, worst = max(q), min(q)
shift = -worst
# find best direction for arrow, skip for objects
if pos not in obj_pos:
mpos = 0
for mpos in range(4):
if q[mpos] == best:
break
arrow_dict[(pos[0]+1, pos[1]+1)] = directions[mpos]
# scale everything to be in the interval [0,1]
if (q[0]+q[1]+q[2]+q[3]+4*shift) != 0:
color = (best + shift)/(q[0]+q[1]+q[2]+q[3]+4*shift)
else:
color = 0.00001
world[1+pos[1]][1+pos[0]] = color
for o in agent.objects:
if o.name == 'worker':
continue
world[1+o.y][1+o.x] = 2
plt.ioff()
plt.axis("off")
masked_cmap = plt.get_cmap('rainbow')
masked_cmap.set_under(color='black')
masked_cmap.set_over(color='white')
im = plt.imshow(world, interpolation="nearest", cmap=masked_cmap, vmin=0, vmax=1)
im.set_array(world)
colbar = plt.colorbar()
colbar.ax.tick_params(labelsize=24)
# add arrows: [0, 1, 2, 3] = up, down, left, right
for pos, arr_dir in arrow_dict.items():
if Q[(pos[0]-1, pos[1]-1)].values() == [0, 0, 0, 0]: # field never visited
continue
if arr_dir == 0:
xstart, ystart = pos[0], pos[1] + 0.25
deltax, deltay = 0, -0.5
if arr_dir == 1:
xstart, ystart = pos[0], pos[1] - 0.25
deltax, deltay = 0, 0.5
if arr_dir == 2:
xstart, ystart = pos[0] + 0.25, pos[1]
deltax, deltay = -0.5, 0
if arr_dir == 3:
xstart, ystart = pos[0] - 0.25, pos[1]
deltax, deltay = 0.5, 0
plt.arrow(xstart, ystart, deltax, deltay, shape='full', width=0.1, length_includes_head=True, head_width=.35, color='black')
plt.draw()
plt.title("After " + str(episode) + " episodes", fontsize=24)
plt.tight_layout()
plt.savefig("./" + str(episode) + ".pdf", bbox_inches='tight')
episode += snapshot_interval
plt.close()
########################################################################################################################
# Part 4: Show the exploration route taken by the trained worker #
########################################################################################################################
# Navigate the maze using the best steps as learned by the agent
current_state = agent.get_state()
done = False
while not done:
current_action = helperFunctions.get_best_action(Q[current_state])[0]
current_state, reward, done = agent.step(current_action, False)
result = ""
if not agent.gave_up:
result = "I can now solve Gridworld in " + str(agent.steps) + " steps."
|
random_line_split
|
||
example.py
|
Green
agent = gridworld.GameEnv()
agent.step(0)
print("I moved up")
agent.step(1)
print("I moved down")
agent.step(2)
print("I moved left")
agent.step(3)
print("I moved right")
########################################################################################################################
# Part 1: Play the game once to see an untrained agent at work #
########################################################################################################################
agent.reset()
world = agent.render_world()
plt.ioff()
plt.tight_layout()
plt.axis("off")
plt.imshow(world, interpolation="nearest")
plt.draw()
plt.title("Maze layout", fontsize=24)
plt.tight_layout()
plt.show()
plt.close()
agent.reset()
agent.close_world_display()
print("Let the game begin...")
# generate all states
all_states = []
for x in range(agent.sizeX):
for y in range(agent.sizeY):
all_states.append((x, y))
# Q is a dictionary that contains the rewards for all four actions that can be performed in any given square of Gridworld.
# Initialize Q and keep track of how many times Q[s] has been updated
Q = {}
update_counts_sa = {}
for s in all_states:
update_counts_sa[s] = {}
Q[s] = {}
for a in agent.action_space:
update_counts_sa[s][a] = 1.0
Q[s][a] = 0.0
gamma = 0.9 # discount factor
alpha_W = 0.1 # learning rate
t = 1.0 # count time
########################################################################################################################
# To start the algorithm we need any action, so we pick one randomly until we find a valid action which we perform
found_initial_move = False
current_action = None
current_state = agent.get_state()
while not found_initial_move:
|
# loop until done (i.e. solved the maze or gave up)
done = False
while not done:
# perform current step and get the next state, the reward/penalty for the move, and whether the agent is done (solved or gave up)
next_state, reward, done = agent.step(current_action, False)
# get the best currently known action for the state we are in now
next_action = helperFunctions.get_best_action(Q[current_state])[0]
# randomize action to allow for exploration. As time progresses, make random actions less likely.
next_action = helperFunctions.random_action(next_action, agent.action_space, eps=0.4/t)
# Update Q
alpha = alpha_W/update_counts_sa[current_state][current_action]
update_counts_sa[current_state][current_action] += 0.005
Q[current_state][current_action] = Q[current_state][current_action] + alpha*(reward + gamma*Q[next_state][next_action] - Q[current_state][current_action])
# update current state, current action, and start over
current_state = next_state
current_action = next_action
t += 0.001
########################################################################################################################
# Part 2: Show the exploration route taken by the untrained worker #
########################################################################################################################
# show exploration route
result = ""
if not agent.gave_up and not agent.fell:
result = "I solved gridworld in " + str(agent.steps) + " steps."
elif not agent.gave_up and agent.fell:
result = "I fell into a pit after " + str(agent.steps) + " steps."
else:
result = "Sorry, I had to give up after " + str(agent.max_steps) + " steps."
# Animate the steps of the first game
print("Watch my exploration route... (close the plot window to contine)")
helperFunctions.animate_steps(agent, "Gridworld exploration untrained worker", result)
########################################################################################################################
# Part 3: Play the game 10 000 times to learn the best solution strategy #
########################################################################################################################
print("Now let me train for a while, I enjoyed the game so much!")
agent.reset()
plt.close('all')
# The code is essentially identical to the one used above, but now carried out 10 000 times
training_episodes = 6001
snapshot_interval = 1000
t = 1
q_snapshot = []
for i in range(training_episodes):
if i % 1000 == 0:
print("I'm playing game " + str(i) + " / " + str(training_episodes))
if i % snapshot_interval == 0:
q_snapshot.append(copy.deepcopy(Q))
if i % 1000 == 0:
t += 0.01
agent.reset()
found_initial_move = False
current_action = None
current_state = agent.get_state()
while not found_initial_move:
current_action = helperFunctions.random_action(None, agent.action_space, eps=1)
found_initial_move = agent.is_possible_action(current_action)
done = False
# loop until done (i.e. solved the maze or gave up)
while not done:
# perform current step and get the next state, the reward/penalty for the move, and whether the agent is done (solved or gave up)
next_state, reward, done = agent.step(current_action, False)
# get the best currently known action for the state we are in now
next_action = helperFunctions.get_best_action(Q[current_state])[0]
# randomize action to allow for exploration. As time progresses, make random actions less likely.
next_action = helperFunctions.random_action(next_action, agent.action_space, eps=0.4/t)
# Update Q
# alpha = alpha_W / update_counts_sa[current_state][current_action]
alpha = 0.2
update_counts_sa[current_state][current_action] += 0.005
Q[current_state][current_action] = Q[current_state][current_action] + alpha * (reward + gamma * Q[next_state][next_action] - Q[current_state][current_action])
# update current state, current action, and start over
current_state = next_state
current_action = next_action
# update one last time
Q[current_state][current_action] = Q[current_state][current_action] + alpha * (reward + gamma * Q[next_state][next_action] - Q[current_state][current_action])
print("Ok, I am done practicing.")
agent.reset()
plt.close('all')
########################################################################################################################
# Show snapshots of what the agent has learned #
########################################################################################################################
episode = 0
obj_pos = []
for o in agent.objects:
if o.name == 'worker':
continue
obj_pos.append(tuple((o.x, o.y)))
for Q in q_snapshot:
# plot the world
world = np.zeros([5+2, 5+2])
world = np.full_like(world, -1.)
arrow_dict = {}
# find the best action and map the colors according to certainty
for pos, vals in Q.items(): # iterate over grid positions
directions, q = list(vals.keys()), list(vals.values())
best, worst = max(q), min(q)
shift = -worst
# find best direction for arrow, skip for objects
if pos not in obj_pos:
mpos = 0
for mpos in range(4):
if q[mpos] == best:
break
arrow_dict[(pos[0]+1, pos[1]+1)] = directions[mpos]
# scale everything to be in the interval [0,1]
if (q[0]+q[1]+q[2]+q[3]+4*shift) != 0:
color = (best + shift)/(q[0]+q[1]+q[2]+q[3]+4*shift)
else:
color = 0.00001
world[1+pos[1]][1+pos[0]] = color
for o in agent.objects:
if o.name == 'worker':
continue
world[1+o.y][1+o.x] = 2
plt.ioff()
plt.axis("off")
masked_cmap = plt.get_cmap('rainbow')
masked_cmap.set_under(color='black')
masked_cmap.set_over(color='white')
im = plt.imshow(world, interpolation="nearest", cmap=masked_cmap, vmin=0, vmax=1)
im.set_array(world)
colbar = plt.colorbar()
colbar.ax.tick_params(labelsize=24)
# add arrows: [0, 1, 2, 3] = up, down, left, right
for pos, arr_dir in arrow_dict.items():
if Q[(pos[0]-1, pos[1]-1)].values() == [0, 0, 0, 0]: # field never visited
continue
if arr_dir == 0:
xstart, ystart = pos[0], pos[1] + 0.25
deltax, deltay = 0, -0.5
if arr_dir == 1:
xstart, ystart = pos[0], pos[1] - 0.25
deltax, deltay = 0, 0.5
if arr_dir == 2:
xstart, ystart = pos[0] + 0.2
|
current_action = helperFunctions.random_action(None, agent.action_space, eps=1)
found_initial_move = agent.is_possible_action(current_action)
|
conditional_block
|
zbump_nonmorphing.py
|
8.4,-9.6,-10.8,-12]:
(x_const,yels,zpos,zneg,lipz) = ellipse_points(r,xelem,cg[1],cg[2],100,shrink)
printer(x_const,yels,zpos,ax)
printer(x_const,yels,zneg,ax)
#print('\nyes is: ',yes)
#print('\nzes is: ',zes)
return (ax,yes,zes,zpos,lipz)
def og_gc_printer(x,y,z,ax):
for i in range(len(x)):
if i in range(4,67):
ax.plot(x[i],y[i],z[i], label = 'GC',color = 'red')
def new_gc_printer(x,y,z,ax):
for i in range(len(x)):
# if i in range(0,99):
#if i < 67:
#if i < 114:
#if i > 44:
#if i in [0,10,21,32,35,39,43,44,49,54,59,66,67,70,74,77,84,94,104,113,118,128,138,148,155,164,170,177,186,195,205]:
#if i in [118,128,138,148,155,164,170,177,186,195,205]:
#ax.plot(x[i],y[i],z[i], label = 'NEW',color = 'red')
#else:
ax.plot(x[i],y[i],z[i], label = 'NEW',color = 'green')
def index_finder(gcpts,cgy):
index = 0
for i in range(len(gcpts)-1):
#if ( (gcpts[i+1] > cgy) and (gcpts[i] < cgy) ):
if ( (abs(gcpts[i+1]-cgy)) < (abs(gcpts[i]-cgy)) ):
index = (i+1)
return index
def zgc_bumpNadd(xgc,ygc,zgc,circy,circz,elipz,cg,length,shift,r,shrink,lipz,t,srB,srO,srF,gcnumbeh,gcnumfro,ax):
front = []
over = []
behind = []
alone = []
#print(len(ygc))
#gcnumfro = 20
#gcnumbeh = 30
# for loop through each individual guide_curve
for i in range(len(ygc)):
# Determine whether gc is a FRONT, OVER, OR BEHIND
for j in range(len(ygc[0])):
if ((min(circy)<= ygc[i][j] <= max(circy)) and (i < (len(ygc)/3) ) and ((zgc[i][j]) >= (max(elipz)) ) ):
elip_i = i # index of the first gc to have BOTH endpoints 'below' elipz or zmax
for i in range(len(ygc)):
for j in range(len(ygc[0])):
if ((min(circy)<= ygc[i][j] <= max(circy)) and (cg[0]+shift >= xgc[i][j]) and (zgc[i][j] < cg[2]) ):
front_i = i # the last index in over
#print('the elip is: ',elip_i)
#print('\nthe front_i is: ',front_i)
for i in range(len(ygc)):
if (i < elip_i):
alone.append(i)
elif (elip_i <= i < (front_i-gcnumbeh)):
behind.append(i)
elif ((front_i-gcnumbeh) <= i <= front_i+2):
over.append(i)
elif (front_i+2 < i <= (front_i+2+gcnumfro)):
front.append(i)
elif ((front_i+2+gcnumfro) < i):
alone.append(i)
#print('\ny min and max: ',min(circy),max(circy))
#print('yeup: ', cg[0]-length+shift, cg[0]+shift)
#print('front: ',front)
#print('over: ',over)
#print('behind: ',behind)
#print('alone: ',alone)
# for loop through each gc again to make corresponding adjustments
#t = .35 # max material thickness beyond nominal radius of ducted fan
countfro = 0
countbeh = 0
#-------------------Front Longitudinal Cubic---------------------#
endi = index_finder(ygc[(min(front)+gcnumfro)],cg[1])
x0f = abs(xgc[(min(front)+gcnumfro)][endi] - (cg[0]+shift))
z0f = abs(zgc[(min(front)+gcnumfro)][endi] - (cg[2]-r-t))
xf = x0f/z0f
zf = z0f/z0f
#print(x0f)
#print(z0f)
#print(xf)
#print(zf)
cub_front = zbum.cubic_solver(xf,zf,0,0)
#-------------------Over Longitudinal Cubic----------------------#
begini = index_finder(ygc[(max(over)-gcnumbeh)],cg[1])
x0b = abs( (cg[0]+shift) - xgc[(max(over)-gcnumbeh)][begini] )
z0b = abs( (cg[2]+r+t) - zgc[(max(over)-gcnumbeh)][begini] )
xb = x0b/z0b
|
#print(xb)
#print(zb)
cub_behind = zbum.cubic_solver(xb,zb,-.15,0.15)
#-----------------------z bumping section------------------------#
for k in range(len(ygc)):
if k in behind: # down bump for BEHIND gc's
#### Preliminary calcs for intervals for spanwise smoothing
oldif_s = 10
oldif_e = 10
for l in range(len(ygc[k])):
nedif_s = abs(ygc[k][l] - (min(circy)-srB[0]))
nedif_e = abs(ygc[k][l] - (max(circy)+srB[1]))
if nedif_s < oldif_s:
start_b = l
oldif_s = nedif_s
if nedif_e < oldif_e:
end_b = l
oldif_e = nedif_e
#### Preliminary calcs for longitudinal smoothing
(cub1,cub2,iy2,iy3) = zbum.interval_mkr_elip(r,shrink,lipz,cg[1],ygc[k][start_b],zgc[k][start_b],ygc[k][start_b+1],zgc[k][start_b+1],ygc[k][end_b-1],zgc[k][end_b-1],ygc[k][end_b],zgc[k][end_b],min(elipz),max(elipz))
#### File through a second time to perform the appropriate adjustments
for l in range(len(ygc[k])):
zgc[k][l] = zbum.zbump_elip(ygc[k][l],zgc[k][l],cg[1],r,shrink,lipz,cub1,cub2,ygc[k][start_b],iy2,iy3,ygc[k][end_b])
if k in over: # UP bump for OVER gc's
#### Preliminary calcs for intervals for spanwise smoothing
oldif_s = 10
oldif_e = 10
for l in range(len(ygc[k])):
nedif_s = abs(ygc[k][l] - (min(circy)-srO[0]))
nedif_e = abs(ygc[k][l] - (max(circy)+srO[1]))
if nedif_s < oldif_s:
start_o = l
oldif_s = nedif_s
if nedif_e < oldif_e:
end_o = l
oldif_e = nedif_e
#### Preliminary calcs for longitudinal smoothing
index = index_finder(ygc[k],cg[1])
exam = ( (cg[0]+shift) - xgc[k][index] ) /z0b
frac = zbum.frac_finder(exam,cub_behind)
(cub1,cub2,iy2,iy3,newr) = zbum.interval_mkr(r,t,circz,cg[1],cg[2
|
zb = z0b/z0b
#print(x0b)
#print(z0b)
|
random_line_split
|
zbump_nonmorphing.py
|
(x,y,z,ax):
ax.plot(x,y,z, label = 'Engine',color = 'blue')
def engine(cg,r,length,cg_shift,shrink,ax):
#cg = [-2.28,6.5,-1.86]
#r = 1.5
#length = 2.4
#cg_shift = 2.3
circles_front = 10
circles_back = 5
every__deg = 15
#shrink = 100 # Ellipse z-direction shrink factor
for i in range(0,circles_front+1):
step = cg_shift/circles_front *i
(x_const,yes,zes) = circ_points(r,cg[0]+step,cg[1],cg[2],every__deg)
printer(x_const,yes,zes,ax)
for i in range(0,circles_back+1):
step = (length-cg_shift)/circles_back *i
(x_const,yes,zes) = circ_points(r,cg[0]-step,cg[1],cg[2],every__deg)
printer(x_const,yes,zes,ax)
for xelem in [-8.4,-9.6,-10.8,-12]:
(x_const,yels,zpos,zneg,lipz) = ellipse_points(r,xelem,cg[1],cg[2],100,shrink)
printer(x_const,yels,zpos,ax)
printer(x_const,yels,zneg,ax)
#print('\nyes is: ',yes)
#print('\nzes is: ',zes)
return (ax,yes,zes,zpos,lipz)
def og_gc_printer(x,y,z,ax):
for i in range(len(x)):
if i in range(4,67):
ax.plot(x[i],y[i],z[i], label = 'GC',color = 'red')
def new_gc_printer(x,y,z,ax):
for i in range(len(x)):
# if i in range(0,99):
#if i < 67:
#if i < 114:
#if i > 44:
#if i in [0,10,21,32,35,39,43,44,49,54,59,66,67,70,74,77,84,94,104,113,118,128,138,148,155,164,170,177,186,195,205]:
#if i in [118,128,138,148,155,164,170,177,186,195,205]:
#ax.plot(x[i],y[i],z[i], label = 'NEW',color = 'red')
#else:
ax.plot(x[i],y[i],z[i], label = 'NEW',color = 'green')
def index_finder(gcpts,cgy):
index = 0
for i in range(len(gcpts)-1):
#if ( (gcpts[i+1] > cgy) and (gcpts[i] < cgy) ):
if ( (abs(gcpts[i+1]-cgy)) < (abs(gcpts[i]-cgy)) ):
index = (i+1)
return index
def zgc_bumpNadd(xgc,ygc,zgc,circy,circz,elipz,cg,length,shift,r,shrink,lipz,t,srB,srO,srF,gcnumbeh,gcnumfro,ax):
front = []
over = []
behind = []
alone = []
#print(len(ygc))
#gcnumfro = 20
#gcnumbeh = 30
# for loop through each individual guide_curve
for i in range(len(ygc)):
# Determine whether gc is a FRONT, OVER, OR BEHIND
for j in range(len(ygc[0])):
if ((min(circy)<= ygc[i][j] <= max(circy)) and (i < (len(ygc)/3) ) and ((zgc[i][j]) >= (max(elipz)) ) ):
elip_i = i # index of the first gc to have BOTH endpoints 'below' elipz or zmax
for i in range(len(ygc)):
for j in range(len(ygc[0])):
if ((min(circy)<= ygc[i][j] <= max(circy)) and (cg[0]+shift >= xgc[i][j]) and (zgc[i][j] < cg[2]) ):
front_i = i # the last index in over
#print('the elip is: ',elip_i)
#print('\nthe front_i is: ',front_i)
for i in range(len(ygc)):
if (i < elip_i):
alone.append(i)
elif (elip_i <= i < (front_i-gcnumbeh)):
behind.append(i)
elif ((front_i-gcnumbeh) <= i <= front_i+2):
over.append(i)
elif (front_i+2 < i <= (front_i+2+gcnumfro)):
front.append(i)
elif ((front_i+2+gcnumfro) < i):
alone.append(i)
#print('\ny min and max: ',min(circy),max(circy))
#print('yeup: ', cg[0]-length+shift, cg[0]+shift)
#print('front: ',front)
#print('over: ',over)
#print('behind: ',behind)
#print('alone: ',alone)
# for loop through each gc again to make corresponding adjustments
#t = .35 # max material thickness beyond nominal radius of ducted fan
countfro = 0
countbeh = 0
#-------------------Front Longitudinal Cubic---------------------#
endi = index_finder(ygc[(min(front)+gcnumfro)],cg[1])
x0f = abs(xgc[(min(front)+gcnumfro)][endi] - (cg[0]+shift))
z0f = abs(zgc[(min(front)+gcnumfro)][endi] - (cg[2]-r-t))
xf = x0f/z0f
zf = z0f/z0f
#print(x0f)
#print(z0f)
#print(xf)
#print(zf)
cub_front = zbum.cubic_solver(xf,zf,0,0)
#-------------------Over Longitudinal Cubic----------------------#
begini = index_finder(ygc[(max(over)-gcnumbeh)],cg[1])
x0b = abs( (cg[0]+shift) - xgc[(max(over)-gcnumbeh)][begini] )
z0b = abs( (cg[2]+r+t) - zgc[(max(over)-gcnumbeh)][begini] )
xb = x0b/z0b
zb = z0b/z0b
#print(x0b)
#print(z0b)
#print(xb)
#print(zb)
cub_behind = zbum.cubic_solver(xb,zb,-.15,0.15)
#-----------------------z bumping section------------------------#
for k in range(len(ygc)):
if k in behind: # down bump for BEHIND gc's
#### Preliminary calcs for intervals for spanwise smoothing
oldif_s = 10
oldif_e = 10
for l in range(len(ygc[k])):
nedif_s = abs(ygc[k][l] - (min(circy)-srB[0]))
nedif_e = abs(ygc[k][l] - (max(circy)+srB[1]))
if nedif_s < oldif_s:
start_b = l
oldif_s = nedif_s
if nedif_e < oldif_e:
end_b = l
oldif_e = nedif_e
#### Preliminary calcs for longitudinal smoothing
(cub1,cub2,iy2,iy3) = zbum.interval_mkr_elip(r,shrink,lipz,cg[1],ygc[k][start_b],zgc[k][start_b],ygc[k][start_b+1],zgc[k][start_b+1],ygc[k][end_b-1],zgc[k][end_b-1],ygc[k][end_b],zgc[k][end_b],min(elipz),max(elipz))
#### File through a second time to perform the appropriate adjustments
for l in range(len(ygc[k])):
zgc[k][l] = zbum.zbump_elip(ygc[k][l],zgc[k][l],cg[1],r,shrink,lipz,cub1,c
|
printer
|
identifier_name
|
|
zbump_nonmorphing.py
|
.4,-9.6,-10.8,-12]:
(x_const,yels,zpos,zneg,lipz) = ellipse_points(r,xelem,cg[1],cg[2],100,shrink)
printer(x_const,yels,zpos,ax)
printer(x_const,yels,zneg,ax)
#print('\nyes is: ',yes)
#print('\nzes is: ',zes)
return (ax,yes,zes,zpos,lipz)
def og_gc_printer(x,y,z,ax):
for i in range(len(x)):
if i in range(4,67):
ax.plot(x[i],y[i],z[i], label = 'GC',color = 'red')
def new_gc_printer(x,y,z,ax):
for i in range(len(x)):
# if i in range(0,99):
#if i < 67:
#if i < 114:
#if i > 44:
#if i in [0,10,21,32,35,39,43,44,49,54,59,66,67,70,74,77,84,94,104,113,118,128,138,148,155,164,170,177,186,195,205]:
#if i in [118,128,138,148,155,164,170,177,186,195,205]:
#ax.plot(x[i],y[i],z[i], label = 'NEW',color = 'red')
#else:
ax.plot(x[i],y[i],z[i], label = 'NEW',color = 'green')
def index_finder(gcpts,cgy):
index = 0
for i in range(len(gcpts)-1):
#if ( (gcpts[i+1] > cgy) and (gcpts[i] < cgy) ):
|
return index
def zgc_bumpNadd(xgc,ygc,zgc,circy,circz,elipz,cg,length,shift,r,shrink,lipz,t,srB,srO,srF,gcnumbeh,gcnumfro,ax):
front = []
over = []
behind = []
alone = []
#print(len(ygc))
#gcnumfro = 20
#gcnumbeh = 30
# for loop through each individual guide_curve
for i in range(len(ygc)):
# Determine whether gc is a FRONT, OVER, OR BEHIND
for j in range(len(ygc[0])):
if ((min(circy)<= ygc[i][j] <= max(circy)) and (i < (len(ygc)/3) ) and ((zgc[i][j]) >= (max(elipz)) ) ):
elip_i = i # index of the first gc to have BOTH endpoints 'below' elipz or zmax
for i in range(len(ygc)):
for j in range(len(ygc[0])):
if ((min(circy)<= ygc[i][j] <= max(circy)) and (cg[0]+shift >= xgc[i][j]) and (zgc[i][j] < cg[2]) ):
front_i = i # the last index in over
#print('the elip is: ',elip_i)
#print('\nthe front_i is: ',front_i)
for i in range(len(ygc)):
if (i < elip_i):
alone.append(i)
elif (elip_i <= i < (front_i-gcnumbeh)):
behind.append(i)
elif ((front_i-gcnumbeh) <= i <= front_i+2):
over.append(i)
elif (front_i+2 < i <= (front_i+2+gcnumfro)):
front.append(i)
elif ((front_i+2+gcnumfro) < i):
alone.append(i)
#print('\ny min and max: ',min(circy),max(circy))
#print('yeup: ', cg[0]-length+shift, cg[0]+shift)
#print('front: ',front)
#print('over: ',over)
#print('behind: ',behind)
#print('alone: ',alone)
# for loop through each gc again to make corresponding adjustments
#t = .35 # max material thickness beyond nominal radius of ducted fan
countfro = 0
countbeh = 0
#-------------------Front Longitudinal Cubic---------------------#
endi = index_finder(ygc[(min(front)+gcnumfro)],cg[1])
x0f = abs(xgc[(min(front)+gcnumfro)][endi] - (cg[0]+shift))
z0f = abs(zgc[(min(front)+gcnumfro)][endi] - (cg[2]-r-t))
xf = x0f/z0f
zf = z0f/z0f
#print(x0f)
#print(z0f)
#print(xf)
#print(zf)
cub_front = zbum.cubic_solver(xf,zf,0,0)
#-------------------Over Longitudinal Cubic----------------------#
begini = index_finder(ygc[(max(over)-gcnumbeh)],cg[1])
x0b = abs( (cg[0]+shift) - xgc[(max(over)-gcnumbeh)][begini] )
z0b = abs( (cg[2]+r+t) - zgc[(max(over)-gcnumbeh)][begini] )
xb = x0b/z0b
zb = z0b/z0b
#print(x0b)
#print(z0b)
#print(xb)
#print(zb)
cub_behind = zbum.cubic_solver(xb,zb,-.15,0.15)
#-----------------------z bumping section------------------------#
for k in range(len(ygc)):
if k in behind: # down bump for BEHIND gc's
#### Preliminary calcs for intervals for spanwise smoothing
oldif_s = 10
oldif_e = 10
for l in range(len(ygc[k])):
nedif_s = abs(ygc[k][l] - (min(circy)-srB[0]))
nedif_e = abs(ygc[k][l] - (max(circy)+srB[1]))
if nedif_s < oldif_s:
start_b = l
oldif_s = nedif_s
if nedif_e < oldif_e:
end_b = l
oldif_e = nedif_e
#### Preliminary calcs for longitudinal smoothing
(cub1,cub2,iy2,iy3) = zbum.interval_mkr_elip(r,shrink,lipz,cg[1],ygc[k][start_b],zgc[k][start_b],ygc[k][start_b+1],zgc[k][start_b+1],ygc[k][end_b-1],zgc[k][end_b-1],ygc[k][end_b],zgc[k][end_b],min(elipz),max(elipz))
#### File through a second time to perform the appropriate adjustments
for l in range(len(ygc[k])):
zgc[k][l] = zbum.zbump_elip(ygc[k][l],zgc[k][l],cg[1],r,shrink,lipz,cub1,cub2,ygc[k][start_b],iy2,iy3,ygc[k][end_b])
if k in over: # UP bump for OVER gc's
#### Preliminary calcs for intervals for spanwise smoothing
oldif_s = 10
oldif_e = 10
for l in range(len(ygc[k])):
nedif_s = abs(ygc[k][l] - (min(circy)-srO[0]))
nedif_e = abs(ygc[k][l] - (max(circy)+srO[1]))
if nedif_s < oldif_s:
start_o = l
oldif_s = nedif_s
if nedif_e < oldif_e:
end_o = l
oldif_e = nedif_e
#### Preliminary calcs for longitudinal smoothing
index = index_finder(ygc[k],cg[1])
exam = ( (cg[0]+shift) - xgc[k][index] ) /z0b
frac = zbum.frac_finder(exam,cub_behind)
(cub1,cub2,iy2,iy3,newr) = zbum.interval_mkr(r,t,circz,cg[1],cg[
|
if ( (abs(gcpts[i+1]-cgy)) < (abs(gcpts[i]-cgy)) ):
index = (i+1)
|
conditional_block
|
zbump_nonmorphing.py
|
def og_gc_printer(x,y,z,ax):
for i in range(len(x)):
if i in range(4,67):
ax.plot(x[i],y[i],z[i], label = 'GC',color = 'red')
def new_gc_printer(x,y,z,ax):
for i in range(len(x)):
# if i in range(0,99):
#if i < 67:
#if i < 114:
#if i > 44:
#if i in [0,10,21,32,35,39,43,44,49,54,59,66,67,70,74,77,84,94,104,113,118,128,138,148,155,164,170,177,186,195,205]:
#if i in [118,128,138,148,155,164,170,177,186,195,205]:
#ax.plot(x[i],y[i],z[i], label = 'NEW',color = 'red')
#else:
ax.plot(x[i],y[i],z[i], label = 'NEW',color = 'green')
def index_finder(gcpts,cgy):
index = 0
for i in range(len(gcpts)-1):
#if ( (gcpts[i+1] > cgy) and (gcpts[i] < cgy) ):
if ( (abs(gcpts[i+1]-cgy)) < (abs(gcpts[i]-cgy)) ):
index = (i+1)
return index
def zgc_bumpNadd(xgc,ygc,zgc,circy,circz,elipz,cg,length,shift,r,shrink,lipz,t,srB,srO,srF,gcnumbeh,gcnumfro,ax):
front = []
over = []
behind = []
alone = []
#print(len(ygc))
#gcnumfro = 20
#gcnumbeh = 30
# for loop through each individual guide_curve
for i in range(len(ygc)):
# Determine whether gc is a FRONT, OVER, OR BEHIND
for j in range(len(ygc[0])):
if ((min(circy)<= ygc[i][j] <= max(circy)) and (i < (len(ygc)/3) ) and ((zgc[i][j]) >= (max(elipz)) ) ):
elip_i = i # index of the first gc to have BOTH endpoints 'below' elipz or zmax
for i in range(len(ygc)):
for j in range(len(ygc[0])):
if ((min(circy)<= ygc[i][j] <= max(circy)) and (cg[0]+shift >= xgc[i][j]) and (zgc[i][j] < cg[2]) ):
front_i = i # the last index in over
#print('the elip is: ',elip_i)
#print('\nthe front_i is: ',front_i)
for i in range(len(ygc)):
if (i < elip_i):
alone.append(i)
elif (elip_i <= i < (front_i-gcnumbeh)):
behind.append(i)
elif ((front_i-gcnumbeh) <= i <= front_i+2):
over.append(i)
elif (front_i+2 < i <= (front_i+2+gcnumfro)):
front.append(i)
elif ((front_i+2+gcnumfro) < i):
alone.append(i)
#print('\ny min and max: ',min(circy),max(circy))
#print('yeup: ', cg[0]-length+shift, cg[0]+shift)
#print('front: ',front)
#print('over: ',over)
#print('behind: ',behind)
#print('alone: ',alone)
# for loop through each gc again to make corresponding adjustments
#t = .35 # max material thickness beyond nominal radius of ducted fan
countfro = 0
countbeh = 0
#-------------------Front Longitudinal Cubic---------------------#
endi = index_finder(ygc[(min(front)+gcnumfro)],cg[1])
x0f = abs(xgc[(min(front)+gcnumfro)][endi] - (cg[0]+shift))
z0f = abs(zgc[(min(front)+gcnumfro)][endi] - (cg[2]-r-t))
xf = x0f/z0f
zf = z0f/z0f
#print(x0f)
#print(z0f)
#print(xf)
#print(zf)
cub_front = zbum.cubic_solver(xf,zf,0,0)
#-------------------Over Longitudinal Cubic----------------------#
begini = index_finder(ygc[(max(over)-gcnumbeh)],cg[1])
x0b = abs( (cg[0]+shift) - xgc[(max(over)-gcnumbeh)][begini] )
z0b = abs( (cg[2]+r+t) - zgc[(max(over)-gcnumbeh)][begini] )
xb = x0b/z0b
zb = z0b/z0b
#print(x0b)
#print(z0b)
#print(xb)
#print(zb)
cub_behind = zbum.cubic_solver(xb,zb,-.15,0.15)
#-----------------------z bumping section------------------------#
for k in range(len(ygc)):
if k in behind: # down bump for BEHIND gc's
#### Preliminary calcs for intervals for spanwise smoothing
oldif_s = 10
oldif_e = 10
for l in range(len(ygc[k])):
nedif_s = abs(ygc[k][l] - (min(circy)-srB[0]))
nedif_e = abs(ygc[k][l] - (max(circy)+srB[1]))
if nedif_s < oldif_s:
start_b = l
oldif_s = nedif_s
if nedif_e < oldif_e:
end_b = l
oldif_e = nedif_e
#### Preliminary calcs for longitudinal smoothing
(cub1,cub2,iy2,iy3) = zbum.interval_mkr_elip(r,shrink,lipz,cg[1],ygc[k][start_b],zgc[k][start_b],ygc[k][start_b+1],zgc[k][start_b+1],ygc[k][end_b-1],zgc[k][end_b-1],ygc[k][end_b],zgc[k][end_b],min(elipz),max(elipz))
#### File through a second time to perform the appropriate adjustments
for l in range(len(ygc[k])):
zgc[k][l] = zbum.zbump_elip(ygc[k][l],zgc[k][l],cg[1],r,shrink,lipz,cub1,cub2,ygc[k][start_b],iy2,iy3,ygc[k][end_b])
if k in over: # UP bump for OVER gc's
#### Preliminary calcs for intervals for spanwise smoothing
oldif_s = 10
oldif_e = 10
for l in range(len(ygc[k])):
nedif_s = abs(ygc
|
circles_front = 10
circles_back = 5
every__deg = 15
#shrink = 100 # Ellipse z-direction shrink factor
for i in range(0,circles_front+1):
step = cg_shift/circles_front *i
(x_const,yes,zes) = circ_points(r,cg[0]+step,cg[1],cg[2],every__deg)
printer(x_const,yes,zes,ax)
for i in range(0,circles_back+1):
step = (length-cg_shift)/circles_back *i
(x_const,yes,zes) = circ_points(r,cg[0]-step,cg[1],cg[2],every__deg)
printer(x_const,yes,zes,ax)
for xelem in [-8.4,-9.6,-10.8,-12]:
(x_const,yels,zpos,zneg,lipz) = ellipse_points(r,xelem,cg[1],cg[2],100,shrink)
printer(x_const,yels,zpos,ax)
printer(x_const,yels,zneg,ax)
#print('\nyes is: ',yes)
#print('\nzes is: ',zes)
return (ax,yes,zes,zpos,lipz)
|
identifier_body
|
|
osm-pbf-analyst.js
|
(filename, options) {
const _options = Object.assign({}, DEFAULT_OPTIONS, options);
const { highWaterMark, uiEnabled, uiUpdateInterval, uiColors } = _options;
const memory = newMemoryObject(uiEnabled);
const { internal, file, block, primitive } = memory;
const fileReadStream = fs.createReadStream(filename, { highWaterMark });
const instance = new EventEmitter();
// Pause stream so it is manually started with `start` method
fileReadStream.pause();
/**
* Tests whether buffer can be read a specified amount of bytes more.
* Used to test that the buffer has a complete OSM File structure, before processing.
* @method canRead
* @param size {Number} of bytes to attempt to read, starting from `pointer` offset
* @return {Boolean}
*/
const canRead = (size) => internal.pointer + size <= internal.buffer.length;
/**
* Reads 4 bytes as a 32bit int from `pointer` offset
* @method readInt32
* @return {Int32|Number}
*/
const readInt32 = () => {
const value = internal.buffer.readInt32BE(internal.pointer);
internal.pointer += 4;
return value;
};
/**
* Reads a specified amount of bytes as a Buffer from `pointer` offset
* @method readBuffer
* @param size {Number} of bytes to read
* @return {Buffer}
*/
const readBuffer = (size) => {
const value = internal.buffer.slice(internal.pointer, internal.pointer + size);
internal.pointer += size;
return value;
};
/**
* Removes elements preceding the `pointer` in buffer.
* 1. This is to clear up memory as it is no longer needed. Reading
* the buffer with `readInt32` & `readBuffer`, moves the pointer up
* past bytes that have been read; bytes that are read are deleted.
* 2. Resets `pointer` back to zero.
* @method clipBuffer
*/
const clipBuffer = () => {
const newLength = internal.buffer.length - internal.pointer;
const newBuffer = new Buffer(newLength);
for (var i = 0; i < newLength; i++) {
newBuffer[i] = internal.buffer[i + internal.pointer];
}
internal.buffer = newBuffer;
internal.pointer = 0;
};
/**
* Attempt to read a OSM File block.
* @method readFile
* @return {Boolean} `true` if a file structure was successfully read
*/
const readFile = () => {
if (canRead(4)) {
const headerSize = readInt32();
if (canRead(headerSize)) {
const header = FileFormat.BlobHeader.decode(readBuffer(headerSize));
if (canRead(header.datasize)) {
const blob = FileFormat.Blob.decode(readBuffer(header.datasize));
const { type } = header;
// So far there are only two types: `OSMHeader` & `OSMData`
instance.emit(type, blob);
clipBuffer();
return true;
}
}
}
return false;
};
/**
* Setup File Stream Events
*/
const onFileStreamClose = () => {
file.opened = false;
file.closed = true;
instance.emit('end');
};
const onFileStreamData = (data) => {
file.bytesRead += data.length;
// Merge this chunk at the end of the `buffer`
internal.buffer = Buffer.concat([internal.buffer, data], internal.buffer.length + data.length);
// Reset pointer, so reading starts at the beggining of the internal.buffer.
// OPTIMIZE It is possible to not reset pointer, instead conntinue if `readFile`
// method can store its state.
internal.pointer = 0;
// Read all fully buffered file blocks
while (readFile());
// Count chunks received
file.chunkCount += 1;
};
const onFileStreamEnd = () => fileReadStream.close();
const onFileStreamError = (error) => intance.emit('error', error);
const onFileStreamOpen = () => {
file.opened = true;
file.closed = false;
instance.emit('open');
fileReadStream.on('data', onFileStreamData);
};
/**
* Parser Internal Events
*/
const onOSMHeader = (blob) => {
const { raw, raw_size, zlib_data, lzma_data, OBSOLETE_bzip2_data } = blob;
if (raw) {
instance.emit('header', BlockFormat.HeaderBlock.decode(raw));
} else if (zlib_data) {
const inflated = zlib.inflateSync(zlib_data.buffer.slice(zlib_data.offset));
const header = BlockFormat.HeaderBlock.decode(inflated);
instance.emit('Header', header);
} else if (lzma_data) {
// Proposed Compression; not required
} else if (OBSOLETE_bzip2_data) {
// Obsolete Compression; always ignore
}
};
const onOSMData = (blob) => {
const { raw, raw_size, zlib_data, lzma_data, OBSOLETE_bzip2_data } = blob;
let primitiveBlock = null;
if (raw) {
primitiveBlock = BlockFormat.PrimitiveBlock.decode(raw);
} else if (zlib_data) {
const inflated = zlib.inflateSync(zlib_data.buffer.slice(zlib_data.offset));
primitiveBlock = BlockFormat.PrimitiveBlock.decode(inflated);
} else if (lzma_data) {
// Proposed Compression; not required
} else if (OBSOLETE_bzip2_data) {
// Obsolete Compression; always ignore
}
if (!primitiveBlock) return instance;
const { primitivegroup, lat_offset, lon_offset, granularity, date_granularity, stringtable } = primitiveBlock;
const firstPrimitive = primitivegroup[0];
const { nodes, dense, ways, relations, changesets } = firstPrimitive;
// Each file block has to apply a fix to the lat/lon and timestamps on each node
// http://wiki.openstreetmap.org/wiki/PBF_Format#Definition_of_OSMData_fileblock
const fixLatitude = (lat) => .000000001 * lat.multiply(granularity).add(lat_offset);
const fixLongitude = (lon) => .000000001 * lon.multiply(granularity).add(lon_offset);
const fixTimestamp = (timestamp) => timestamp.multiply(date_granularity).divide(1000);
const utf8StringTable = stringtable.s.map(x => x.toUTF8());
const toolbox = { fixLatitude, fixLongitude, fixTimestamp, utf8StringTable, primitiveBlock };
// A Primitive group can only have one halid property
// A parser can ignore unsupported types
if (nodes.length) {
instance.emit('Nodes', nodes, toolbox);
} else if (dense) {
instance.emit('DenseNode', dense, toolbox);
} else if (ways.length) {
instance.emit('Ways', ways, primitiveBlock.primitivegroup, primitiveBlock);
} else if (relations.length) {
instance.emit('Relations', relations, primitiveBlock.primitivegroup, primitiveBlock);
} else if (changesets.length) {
instance.emit('ChangeSets', changesets, primitiveBlock.primitivegroup, primitiveBlock);
}
return instance;
};
const onDenseNode = (dense, toolbox) => {
const { id, denseinfo, lat, lon, keys_vals } = dense;
const { primitiveBlock, utf8StringTable } = toolbox;
const { stringtable } = primitiveBlock;
const keyValsLength = dense.keys_vals.length;
const errCheckMin = Math.min(id.length, lat.length, lon.length, denseinfo.version.length);
const errCheckMax = Math.max(id.length, lat.length, lon.length, denseinfo.version.length);
const length = errCheckMin;
if (errCheckMin !== errCheckMax) throw new Error('Corrupt DenseNode');
if (length !== 8000) console.warn(new Error('DenseNode Abnormal Length:' + length).stack);
// Dense Nodes are delta encoded, in other words, the data is in
// the `difference` between elements.
let found_tag = false;
let lastID = new Long(0);
let lastLat = new Long(0);
let lastLon = new Long(0);
let changeset = new Long(0);
let timestamp = new Long(0);
let uid = 0;
let user_sid = 0;
let version = 0;
let username = '';
for (let i = 0; i < length; i++) {
node = {tags: {}};
lastID = lastID.add(dense.id[i]);
lastLat = lastLat.add(dense.lat[i]);
lastLon = lastLon.add(dense.lon[i]);
user_sid += dense.denseinfo.user_sid[i];
uid += dense.denseinfo.uid[i];
timestamp = timestamp.add(dense.denseinfo.timestamp[i]);
changeset = changeset.add(dense.denseinfo.changeset[i]);
// Current node's data
node.changeset = changeset;
node.id =
|
OsmPbfAnalyst
|
identifier_name
|
|
osm-pbf-analyst.js
|
/**
* Reads 4 bytes as a 32bit int from `pointer` offset
* @method readInt32
* @return {Int32|Number}
*/
const readInt32 = () => {
const value = internal.buffer.readInt32BE(internal.pointer);
internal.pointer += 4;
return value;
};
/**
* Reads a specified amount of bytes as a Buffer from `pointer` offset
* @method readBuffer
* @param size {Number} of bytes to read
* @return {Buffer}
*/
const readBuffer = (size) => {
const value = internal.buffer.slice(internal.pointer, internal.pointer + size);
internal.pointer += size;
return value;
};
/**
* Removes elements preceding the `pointer` in buffer.
* 1. This is to clear up memory as it is no longer needed. Reading
* the buffer with `readInt32` & `readBuffer`, moves the pointer up
* past bytes that have been read; bytes that are read are deleted.
* 2. Resets `pointer` back to zero.
* @method clipBuffer
*/
const clipBuffer = () => {
const newLength = internal.buffer.length - internal.pointer;
const newBuffer = new Buffer(newLength);
for (var i = 0; i < newLength; i++) {
newBuffer[i] = internal.buffer[i + internal.pointer];
}
internal.buffer = newBuffer;
internal.pointer = 0;
};
/**
* Attempt to read a OSM File block.
* @method readFile
* @return {Boolean} `true` if a file structure was successfully read
*/
const readFile = () => {
if (canRead(4)) {
const headerSize = readInt32();
if (canRead(headerSize)) {
const header = FileFormat.BlobHeader.decode(readBuffer(headerSize));
if (canRead(header.datasize)) {
const blob = FileFormat.Blob.decode(readBuffer(header.datasize));
const { type } = header;
// So far there are only two types: `OSMHeader` & `OSMData`
instance.emit(type, blob);
clipBuffer();
return true;
}
}
}
return false;
};
/**
* Setup File Stream Events
*/
const onFileStreamClose = () => {
file.opened = false;
file.closed = true;
instance.emit('end');
};
const onFileStreamData = (data) => {
file.bytesRead += data.length;
// Merge this chunk at the end of the `buffer`
internal.buffer = Buffer.concat([internal.buffer, data], internal.buffer.length + data.length);
// Reset pointer, so reading starts at the beggining of the internal.buffer.
// OPTIMIZE It is possible to not reset pointer, instead conntinue if `readFile`
// method can store its state.
internal.pointer = 0;
// Read all fully buffered file blocks
while (readFile());
// Count chunks received
file.chunkCount += 1;
};
const onFileStreamEnd = () => fileReadStream.close();
const onFileStreamError = (error) => intance.emit('error', error);
const onFileStreamOpen = () => {
file.opened = true;
file.closed = false;
instance.emit('open');
fileReadStream.on('data', onFileStreamData);
};
/**
* Parser Internal Events
*/
const onOSMHeader = (blob) => {
const { raw, raw_size, zlib_data, lzma_data, OBSOLETE_bzip2_data } = blob;
if (raw) {
instance.emit('header', BlockFormat.HeaderBlock.decode(raw));
} else if (zlib_data) {
const inflated = zlib.inflateSync(zlib_data.buffer.slice(zlib_data.offset));
const header = BlockFormat.HeaderBlock.decode(inflated);
instance.emit('Header', header);
} else if (lzma_data) {
// Proposed Compression; not required
} else if (OBSOLETE_bzip2_data) {
// Obsolete Compression; always ignore
}
};
const onOSMData = (blob) => {
const { raw, raw_size, zlib_data, lzma_data, OBSOLETE_bzip2_data } = blob;
let primitiveBlock = null;
if (raw) {
primitiveBlock = BlockFormat.PrimitiveBlock.decode(raw);
} else if (zlib_data) {
const inflated = zlib.inflateSync(zlib_data.buffer.slice(zlib_data.offset));
primitiveBlock = BlockFormat.PrimitiveBlock.decode(inflated);
} else if (lzma_data) {
// Proposed Compression; not required
} else if (OBSOLETE_bzip2_data) {
// Obsolete Compression; always ignore
}
if (!primitiveBlock) return instance;
const { primitivegroup, lat_offset, lon_offset, granularity, date_granularity, stringtable } = primitiveBlock;
const firstPrimitive = primitivegroup[0];
const { nodes, dense, ways, relations, changesets } = firstPrimitive;
// Each file block has to apply a fix to the lat/lon and timestamps on each node
// http://wiki.openstreetmap.org/wiki/PBF_Format#Definition_of_OSMData_fileblock
const fixLatitude = (lat) => .000000001 * lat.multiply(granularity).add(lat_offset);
const fixLongitude = (lon) => .000000001 * lon.multiply(granularity).add(lon_offset);
const fixTimestamp = (timestamp) => timestamp.multiply(date_granularity).divide(1000);
const utf8StringTable = stringtable.s.map(x => x.toUTF8());
const toolbox = { fixLatitude, fixLongitude, fixTimestamp, utf8StringTable, primitiveBlock };
// A Primitive group can only have one halid property
// A parser can ignore unsupported types
if (nodes.length) {
instance.emit('Nodes', nodes, toolbox);
} else if (dense) {
instance.emit('DenseNode', dense, toolbox);
} else if (ways.length) {
instance.emit('Ways', ways, primitiveBlock.primitivegroup, primitiveBlock);
} else if (relations.length) {
instance.emit('Relations', relations, primitiveBlock.primitivegroup, primitiveBlock);
} else if (changesets.length) {
instance.emit('ChangeSets', changesets, primitiveBlock.primitivegroup, primitiveBlock);
}
return instance;
};
const onDenseNode = (dense, toolbox) => {
const { id, denseinfo, lat, lon, keys_vals } = dense;
const { primitiveBlock, utf8StringTable } = toolbox;
const { stringtable } = primitiveBlock;
const keyValsLength = dense.keys_vals.length;
const errCheckMin = Math.min(id.length, lat.length, lon.length, denseinfo.version.length);
const errCheckMax = Math.max(id.length, lat.length, lon.length, denseinfo.version.length);
const length = errCheckMin;
if (errCheckMin !== errCheckMax) throw new Error('Corrupt DenseNode');
if (length !== 8000) console.warn(new Error('DenseNode Abnormal Length:' + length).stack);
// Dense Nodes are delta encoded, in other words, the data is in
// the `difference` between elements.
let found_tag = false;
let lastID = new Long(0);
let lastLat = new Long(0);
let lastLon = new Long(0);
let changeset = new Long(0);
let timestamp = new Long(0);
let uid = 0;
let user_sid = 0;
let version = 0;
let username = '';
for (let i = 0; i < length; i++) {
node = {tags: {}};
lastID = lastID.add(dense.id[i]);
lastLat = lastLat.add(dense.lat[i]);
lastLon = lastLon.add(dense.lon[i]);
user_sid += dense.denseinfo.user_sid[i];
uid += dense.denseinfo.uid[i];
timestamp = timestamp.add(dense.denseinfo.timestamp[i]);
changeset = changeset.add(dense.denseinfo.changeset[i]);
// Current node's data
node.changeset = changeset;
node.id = lastID;
node.version = dense.denseinfo
|
{
const _options = Object.assign({}, DEFAULT_OPTIONS, options);
const { highWaterMark, uiEnabled, uiUpdateInterval, uiColors } = _options;
const memory = newMemoryObject(uiEnabled);
const { internal, file, block, primitive } = memory;
const fileReadStream = fs.createReadStream(filename, { highWaterMark });
const instance = new EventEmitter();
// Pause stream so it is manually started with `start` method
fileReadStream.pause();
/**
* Tests whether buffer can be read a specified amount of bytes more.
* Used to test that the buffer has a complete OSM File structure, before processing.
* @method canRead
* @param size {Number} of bytes to attempt to read, starting from `pointer` offset
* @return {Boolean}
*/
const canRead = (size) => internal.pointer + size <= internal.buffer.length;
|
identifier_body
|
|
osm-pbf-analyst.js
|
=> internal.pointer + size <= internal.buffer.length;
/**
* Reads 4 bytes as a 32bit int from `pointer` offset
* @method readInt32
* @return {Int32|Number}
*/
const readInt32 = () => {
const value = internal.buffer.readInt32BE(internal.pointer);
internal.pointer += 4;
return value;
};
/**
* Reads a specified amount of bytes as a Buffer from `pointer` offset
* @method readBuffer
* @param size {Number} of bytes to read
* @return {Buffer}
*/
const readBuffer = (size) => {
const value = internal.buffer.slice(internal.pointer, internal.pointer + size);
internal.pointer += size;
return value;
};
/**
* Removes elements preceding the `pointer` in buffer.
* 1. This is to clear up memory as it is no longer needed. Reading
* the buffer with `readInt32` & `readBuffer`, moves the pointer up
* past bytes that have been read; bytes that are read are deleted.
* 2. Resets `pointer` back to zero.
* @method clipBuffer
*/
const clipBuffer = () => {
const newLength = internal.buffer.length - internal.pointer;
const newBuffer = new Buffer(newLength);
for (var i = 0; i < newLength; i++) {
newBuffer[i] = internal.buffer[i + internal.pointer];
}
internal.buffer = newBuffer;
internal.pointer = 0;
};
/**
* Attempt to read a OSM File block.
* @method readFile
* @return {Boolean} `true` if a file structure was successfully read
*/
const readFile = () => {
if (canRead(4)) {
const headerSize = readInt32();
if (canRead(headerSize)) {
const header = FileFormat.BlobHeader.decode(readBuffer(headerSize));
if (canRead(header.datasize)) {
const blob = FileFormat.Blob.decode(readBuffer(header.datasize));
const { type } = header;
// So far there are only two types: `OSMHeader` & `OSMData`
instance.emit(type, blob);
clipBuffer();
return true;
}
}
}
return false;
};
/**
* Setup File Stream Events
*/
const onFileStreamClose = () => {
file.opened = false;
file.closed = true;
instance.emit('end');
};
const onFileStreamData = (data) => {
file.bytesRead += data.length;
// Merge this chunk at the end of the `buffer`
internal.buffer = Buffer.concat([internal.buffer, data], internal.buffer.length + data.length);
// Reset pointer, so reading starts at the beggining of the internal.buffer.
// OPTIMIZE It is possible to not reset pointer, instead conntinue if `readFile`
// method can store its state.
internal.pointer = 0;
// Read all fully buffered file blocks
while (readFile());
// Count chunks received
file.chunkCount += 1;
};
const onFileStreamEnd = () => fileReadStream.close();
const onFileStreamError = (error) => intance.emit('error', error);
const onFileStreamOpen = () => {
file.opened = true;
file.closed = false;
instance.emit('open');
fileReadStream.on('data', onFileStreamData);
};
/**
* Parser Internal Events
*/
const onOSMHeader = (blob) => {
const { raw, raw_size, zlib_data, lzma_data, OBSOLETE_bzip2_data } = blob;
if (raw) {
instance.emit('header', BlockFormat.HeaderBlock.decode(raw));
} else if (zlib_data) {
const inflated = zlib.inflateSync(zlib_data.buffer.slice(zlib_data.offset));
const header = BlockFormat.HeaderBlock.decode(inflated);
instance.emit('Header', header);
} else if (lzma_data) {
// Proposed Compression; not required
} else if (OBSOLETE_bzip2_data) {
// Obsolete Compression; always ignore
}
};
const onOSMData = (blob) => {
const { raw, raw_size, zlib_data, lzma_data, OBSOLETE_bzip2_data } = blob;
let primitiveBlock = null;
if (raw) {
primitiveBlock = BlockFormat.PrimitiveBlock.decode(raw);
} else if (zlib_data) {
const inflated = zlib.inflateSync(zlib_data.buffer.slice(zlib_data.offset));
primitiveBlock = BlockFormat.PrimitiveBlock.decode(inflated);
} else if (lzma_data) {
// Proposed Compression; not required
} else if (OBSOLETE_bzip2_data) {
// Obsolete Compression; always ignore
}
if (!primitiveBlock) return instance;
const { primitivegroup, lat_offset, lon_offset, granularity, date_granularity, stringtable } = primitiveBlock;
|
// Each file block has to apply a fix to the lat/lon and timestamps on each node
// http://wiki.openstreetmap.org/wiki/PBF_Format#Definition_of_OSMData_fileblock
const fixLatitude = (lat) => .000000001 * lat.multiply(granularity).add(lat_offset);
const fixLongitude = (lon) => .000000001 * lon.multiply(granularity).add(lon_offset);
const fixTimestamp = (timestamp) => timestamp.multiply(date_granularity).divide(1000);
const utf8StringTable = stringtable.s.map(x => x.toUTF8());
const toolbox = { fixLatitude, fixLongitude, fixTimestamp, utf8StringTable, primitiveBlock };
// A Primitive group can only have one halid property
// A parser can ignore unsupported types
if (nodes.length) {
instance.emit('Nodes', nodes, toolbox);
} else if (dense) {
instance.emit('DenseNode', dense, toolbox);
} else if (ways.length) {
instance.emit('Ways', ways, primitiveBlock.primitivegroup, primitiveBlock);
} else if (relations.length) {
instance.emit('Relations', relations, primitiveBlock.primitivegroup, primitiveBlock);
} else if (changesets.length) {
instance.emit('ChangeSets', changesets, primitiveBlock.primitivegroup, primitiveBlock);
}
return instance;
};
const onDenseNode = (dense, toolbox) => {
const { id, denseinfo, lat, lon, keys_vals } = dense;
const { primitiveBlock, utf8StringTable } = toolbox;
const { stringtable } = primitiveBlock;
const keyValsLength = dense.keys_vals.length;
const errCheckMin = Math.min(id.length, lat.length, lon.length, denseinfo.version.length);
const errCheckMax = Math.max(id.length, lat.length, lon.length, denseinfo.version.length);
const length = errCheckMin;
if (errCheckMin !== errCheckMax) throw new Error('Corrupt DenseNode');
if (length !== 8000) console.warn(new Error('DenseNode Abnormal Length:' + length).stack);
// Dense Nodes are delta encoded, in other words, the data is in
// the `difference` between elements.
let found_tag = false;
let lastID = new Long(0);
let lastLat = new Long(0);
let lastLon = new Long(0);
let changeset = new Long(0);
let timestamp = new Long(0);
let uid = 0;
let user_sid = 0;
let version = 0;
let username = '';
for (let i = 0; i < length; i++) {
node = {tags: {}};
lastID = lastID.add(dense.id[i]);
lastLat = lastLat.add(dense.lat[i]);
lastLon = lastLon.add(dense.lon[i]);
user_sid += dense.denseinfo.user_sid[i];
uid += dense.denseinfo.uid[i];
timestamp = timestamp.add(dense.denseinfo.timestamp[i]);
changeset = changeset.add(dense.denseinfo.changeset[i]);
// Current node's data
node.changeset = changeset;
node.id = lastID;
node.version = dense.denseinfo.version[i];
node.lat = toolbox.fixLatitude(lastLat);
node.lon = toolbox.fixLongitude(lastLon);
node.timestamp = toolbox.fixTimestamp(timestamp);
node.username = stringtable.s[user_sid].toUTF8();
// Get tags for node
for (var k = 0; k < keyValsLength; k++) {
const keyId = keys_vals[k];
k++;
if (!keyId) break;
const valueId = keys_vals[k];
const key = utf8StringTable[keyId];
const value = utf8StringTable[valueId];
node.tags[key] = value;
}
instance.emit('Node', node);
}
};
/**
* Parser Flow Commands
*/
const start = () => {
if (!internal.started) {
internal.started = true;
internal.paused = false;
fileReadStream.resume
|
const firstPrimitive = primitivegroup[0];
const { nodes, dense, ways, relations, changesets } = firstPrimitive;
|
random_line_split
|
main.rs
|
-history dtolnay/syn dtolnay/quote
star-history serde-rs/serde
",
);
static MISSING_TOKEN: &str = "\
Error: GitHub auth token is not set up.
(Expected config file: {{path}})
Run `gh auth login` to store a GitHub login token. The `gh` CLI
can be installed from <https://cli.github.com>.
If you prefer not to use the `gh` CLI, you can instead provide
a token to star-history through the GITHUB_TOKEN environment
variable. Head to <https://github.com/settings/tokens> and click
\"Generate new token (classic)\". The default public access
permission is sufficient -- you can leave all the checkboxes
empty. Save the generated token somewhere like ~/.githubtoken
and use `export GITHUB_TOKEN=$(cat ~/.githubtoken)`.
";
#[derive(Error, Debug)]
enum Error {
#[error("Error from GitHub api: {0}")]
GitHub(String),
#[error("failed to decode response body")]
DecodeResponse(#[source] serde_json::Error),
#[error("no such user: {0}")]
NoSuchUser(String),
#[error("no such repository: {0}/{1}")]
NoSuchRepo(String, String),
#[error(transparent)]
GhToken(#[from] gh_token::Error),
#[error(transparent)]
Reqwest(#[from] reqwest::Error),
#[error(transparent)]
Io(#[from] io::Error),
}
type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Eq, Clone)]
enum Series {
Owner(String),
Repo(String, String),
}
impl Display for Series {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
Series::Owner(owner) => formatter.write_str(owner)?,
Series::Repo(owner, repo) => {
formatter.write_str(owner)?;
formatter.write_str("/")?;
formatter.write_str(repo)?;
}
}
Ok(())
}
}
impl Ord for Series {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(Series::Owner(lowner), Series::Owner(rowner)) => {
lowner.to_lowercase().cmp(&rowner.to_lowercase())
}
(Series::Repo(lowner, lrepo), Series::Repo(rowner, rrepo)) => {
(lowner.to_lowercase(), lrepo.to_lowercase())
.cmp(&(rowner.to_lowercase(), rrepo.to_lowercase()))
}
(Series::Owner(_), Series::Repo(..)) => Ordering::Less,
(Series::Repo(..), Series::Owner(_)) => Ordering::Greater,
}
}
}
impl PartialOrd for Series {
fn partial_cmp(&self, other: &Series) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Series {
fn eq(&self, other: &Series) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(transparent)]
struct Cursor(Option<String>);
impl Display for Cursor {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match &self.0 {
Some(cursor) => {
formatter.write_str("\"")?;
formatter.write_str(cursor)?;
formatter.write_str("\"")?;
}
None => formatter.write_str("null")?,
}
Ok(())
}
}
struct Work {
series: Series,
cursor: Cursor,
}
#[derive(Serialize)]
struct
|
{
query: String,
}
#[derive(Deserialize, Debug)]
struct Response {
message: Option<String>,
#[serde(default, deserialize_with = "deserialize_data")]
data: VecDeque<Data>,
#[serde(default)]
errors: Vec<Message>,
}
#[derive(Deserialize, Debug)]
struct Message {
message: String,
}
#[derive(Debug)]
enum Data {
Owner(Option<Owner>),
Repo(Option<Repo>),
}
#[derive(Deserialize, Debug)]
struct Owner {
login: String,
repositories: Repositories,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Repositories {
page_info: PageInfo,
nodes: Vec<Repo>,
}
#[derive(Deserialize, Debug)]
struct Repo {
name: String,
owner: Account,
stargazers: Option<Stargazers>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Default, Debug)]
struct Account {
login: String,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Stargazers {
page_info: PageInfo,
#[serde(deserialize_with = "non_nulls")]
edges: Vec<Star>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Debug)]
struct Star {
#[serde(rename = "starredAt")]
time: DateTime<Utc>,
node: Account,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct PageInfo {
has_next_page: bool,
end_cursor: Cursor,
}
fn deserialize_data<'de, D>(deserializer: D) -> Result<VecDeque<Data>, D::Error>
where
D: Deserializer<'de>,
{
struct ResponseVisitor;
impl<'de> Visitor<'de> for ResponseVisitor {
type Value = VecDeque<Data>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Map<String, Data>")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut data = VecDeque::new();
while let Some(key) = map.next_key::<String>()? {
if key.starts_with("owner") {
let owner = map.next_value::<Option<Owner>>()?;
data.push_back(Data::Owner(owner));
} else if key.starts_with("repo") {
let repo = map.next_value::<Option<Repo>>()?;
data.push_back(Data::Repo(repo));
} else {
map.next_value::<IgnoredAny>()?;
}
}
Ok(data)
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(VecDeque::new())
}
}
deserializer.deserialize_any(ResponseVisitor)
}
fn non_nulls<'de, D, T>(deserializer: D) -> Result<Vec<T>, D::Error>
where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
struct NonNullsVisitor<T>(PhantomData<fn() -> T>);
impl<'de, T> Visitor<'de> for NonNullsVisitor<T>
where
T: Deserialize<'de>,
{
type Value = Vec<T>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("array")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut vec = Vec::new();
while let Some(next) = seq.next_element::<Option<T>>()? {
vec.extend(next);
}
Ok(vec)
}
}
let visitor = NonNullsVisitor(PhantomData);
deserializer.deserialize_seq(visitor)
}
fn main() {
let ref mut log = Log::new();
if let Err(err) = try_main(log) {
log.error(err);
process::exit(1);
}
}
fn try_main(log: &mut Log) -> Result<()> {
let mut args = Vec::new();
for arg in env::args().skip(1) {
if arg == "--help" {
print!("{}", HELP);
process::exit(0);
} else if arg == "--version" {
println!("{}", VERSION);
process::exit(0);
}
let mut parts = arg.splitn(2, '/');
let owner = parts.next().unwrap();
match parts.next() {
Some(repo) => {
let owner = owner.to_owned();
let repo = repo.to_owned();
args.push(Series::Repo(owner, repo));
}
None => {
let owner = owner.strip_prefix('@').unwrap_or(owner).to_owned();
args.push(Series::Owner(owner));
}
}
}
let github_token = match gh_token::get() {
Ok(token) => token,
Err(gh_token::Error::NotConfigured(path)) => {
let path_lossy = path.to_string_lossy();
let message = MISSING_TOKEN.replace("{{path}}", &path_lossy);
eprint!("{}", message);
process::exit(1);
}
Err(error) => return Err(Error::GhToken(error)),
};
let authorization = format!("bearer {}", github_token.trim());
if args.is_empty() {
eprint!("{}", HELP);
process::exit(1);
}
let mut work = Vec::new();
let mut stars = Map::new();
for series in &args {
stars.insert(series.clone(), Set::new());
work.push(Work {
series: series.clone(),
cursor: Cursor(None),
});
}
let client = Client::new();
while !work.is_empty() {
let batch_size = cmp::min(work.len(), 50);
|
Request
|
identifier_name
|
main.rs
|
formatter.write_str("null")?,
}
Ok(())
}
}
struct Work {
series: Series,
cursor: Cursor,
}
#[derive(Serialize)]
struct Request {
query: String,
}
#[derive(Deserialize, Debug)]
struct Response {
message: Option<String>,
#[serde(default, deserialize_with = "deserialize_data")]
data: VecDeque<Data>,
#[serde(default)]
errors: Vec<Message>,
}
#[derive(Deserialize, Debug)]
struct Message {
message: String,
}
#[derive(Debug)]
enum Data {
Owner(Option<Owner>),
Repo(Option<Repo>),
}
#[derive(Deserialize, Debug)]
struct Owner {
login: String,
repositories: Repositories,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Repositories {
page_info: PageInfo,
nodes: Vec<Repo>,
}
#[derive(Deserialize, Debug)]
struct Repo {
name: String,
owner: Account,
stargazers: Option<Stargazers>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Default, Debug)]
struct Account {
login: String,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Stargazers {
page_info: PageInfo,
#[serde(deserialize_with = "non_nulls")]
edges: Vec<Star>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Debug)]
struct Star {
#[serde(rename = "starredAt")]
time: DateTime<Utc>,
node: Account,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct PageInfo {
has_next_page: bool,
end_cursor: Cursor,
}
fn deserialize_data<'de, D>(deserializer: D) -> Result<VecDeque<Data>, D::Error>
where
D: Deserializer<'de>,
{
struct ResponseVisitor;
impl<'de> Visitor<'de> for ResponseVisitor {
type Value = VecDeque<Data>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Map<String, Data>")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut data = VecDeque::new();
while let Some(key) = map.next_key::<String>()? {
if key.starts_with("owner") {
let owner = map.next_value::<Option<Owner>>()?;
data.push_back(Data::Owner(owner));
} else if key.starts_with("repo") {
let repo = map.next_value::<Option<Repo>>()?;
data.push_back(Data::Repo(repo));
} else {
map.next_value::<IgnoredAny>()?;
}
}
Ok(data)
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(VecDeque::new())
}
}
deserializer.deserialize_any(ResponseVisitor)
}
fn non_nulls<'de, D, T>(deserializer: D) -> Result<Vec<T>, D::Error>
where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
struct NonNullsVisitor<T>(PhantomData<fn() -> T>);
impl<'de, T> Visitor<'de> for NonNullsVisitor<T>
where
T: Deserialize<'de>,
{
type Value = Vec<T>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("array")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut vec = Vec::new();
while let Some(next) = seq.next_element::<Option<T>>()? {
vec.extend(next);
}
Ok(vec)
}
}
let visitor = NonNullsVisitor(PhantomData);
deserializer.deserialize_seq(visitor)
}
fn main() {
let ref mut log = Log::new();
if let Err(err) = try_main(log) {
log.error(err);
process::exit(1);
}
}
fn try_main(log: &mut Log) -> Result<()> {
let mut args = Vec::new();
for arg in env::args().skip(1) {
if arg == "--help" {
print!("{}", HELP);
process::exit(0);
} else if arg == "--version" {
println!("{}", VERSION);
process::exit(0);
}
let mut parts = arg.splitn(2, '/');
let owner = parts.next().unwrap();
match parts.next() {
Some(repo) => {
let owner = owner.to_owned();
let repo = repo.to_owned();
args.push(Series::Repo(owner, repo));
}
None => {
let owner = owner.strip_prefix('@').unwrap_or(owner).to_owned();
args.push(Series::Owner(owner));
}
}
}
let github_token = match gh_token::get() {
Ok(token) => token,
Err(gh_token::Error::NotConfigured(path)) => {
let path_lossy = path.to_string_lossy();
let message = MISSING_TOKEN.replace("{{path}}", &path_lossy);
eprint!("{}", message);
process::exit(1);
}
Err(error) => return Err(Error::GhToken(error)),
};
let authorization = format!("bearer {}", github_token.trim());
if args.is_empty() {
eprint!("{}", HELP);
process::exit(1);
}
let mut work = Vec::new();
let mut stars = Map::new();
for series in &args {
stars.insert(series.clone(), Set::new());
work.push(Work {
series: series.clone(),
cursor: Cursor(None),
});
}
let client = Client::new();
while !work.is_empty() {
let batch_size = cmp::min(work.len(), 50);
let defer = work.split_off(batch_size);
let batch = mem::replace(&mut work, defer);
let mut query = String::new();
query += "{\n";
for (i, work) in batch.iter().enumerate() {
let cursor = &work.cursor;
query += &match &work.series {
Series::Owner(owner) => query_owner(i, owner, cursor),
Series::Repo(owner, repo) => query_repo(i, owner, repo, cursor),
};
}
query += "}\n";
let json = client
.post("https://api.github.com/graphql")
.header(USER_AGENT, "dtolnay/star-history")
.header(AUTHORIZATION, &authorization)
.json(&Request { query })
.send()?
.text()?;
let response: Response = serde_json::from_str(&json).map_err(Error::DecodeResponse)?;
if let Some(message) = response.message {
return Err(Error::GitHub(message));
}
for err in response.errors {
log.error(Error::GitHub(err.message));
}
let mut data = response.data;
let mut queue = batch.into_iter();
while let Some(node) = data.pop_front() {
let id = queue.next();
match node {
Data::Owner(None) | Data::Repo(None) => match id.unwrap().series {
Series::Owner(owner) => return Err(Error::NoSuchUser(owner)),
Series::Repo(owner, repo) => return Err(Error::NoSuchRepo(owner, repo)),
},
Data::Owner(Some(node)) => {
let owner = node.login;
for repo in node.repositories.nodes {
data.push_back(Data::Repo(Some(repo)));
}
if node.repositories.page_info.has_next_page {
work.push(Work {
series: Series::Owner(owner),
cursor: node.repositories.page_info.end_cursor,
});
}
}
Data::Repo(Some(node)) => {
let owner = node.owner.login;
let repo = node.name;
if let Some(stargazers) = node.stargazers {
let series = Series::Owner(owner.clone());
let owner_stars = stars.entry(series).or_default();
for star in &stargazers.edges {
owner_stars.insert(star.clone());
}
let series = Series::Repo(owner.clone(), repo.clone());
let repo_stars = stars.entry(series).or_default();
for star in &stargazers.edges {
repo_stars.insert(star.clone());
}
if stargazers.page_info.has_next_page {
work.push(Work {
series: Series::Repo(owner, repo),
cursor: stargazers.page_info.end_cursor,
});
}
} else {
work.push(Work {
series: Series::Repo(owner, repo),
cursor: Cursor(None),
});
}
}
}
}
log.tick();
}
let now = Utc::now();
for set in stars.values_mut() {
if let Some(first) = set.iter().next() {
let first_time = first.time;
set.insert(Star {
time: first_time - Duration::seconds(1),
node: Default::default(),
});
}
match set.iter().next_back() {
Some(last) if last.time >= now => {}
_ =>
|
{
set.insert(Star {
time: now,
node: Default::default(),
});
}
|
conditional_block
|
|
main.rs
|
-history dtolnay/syn dtolnay/quote
star-history serde-rs/serde
",
);
static MISSING_TOKEN: &str = "\
Error: GitHub auth token is not set up.
(Expected config file: {{path}})
Run `gh auth login` to store a GitHub login token. The `gh` CLI
can be installed from <https://cli.github.com>.
If you prefer not to use the `gh` CLI, you can instead provide
a token to star-history through the GITHUB_TOKEN environment
variable. Head to <https://github.com/settings/tokens> and click
\"Generate new token (classic)\". The default public access
permission is sufficient -- you can leave all the checkboxes
empty. Save the generated token somewhere like ~/.githubtoken
and use `export GITHUB_TOKEN=$(cat ~/.githubtoken)`.
";
#[derive(Error, Debug)]
enum Error {
#[error("Error from GitHub api: {0}")]
GitHub(String),
#[error("failed to decode response body")]
DecodeResponse(#[source] serde_json::Error),
#[error("no such user: {0}")]
NoSuchUser(String),
#[error("no such repository: {0}/{1}")]
NoSuchRepo(String, String),
#[error(transparent)]
GhToken(#[from] gh_token::Error),
#[error(transparent)]
Reqwest(#[from] reqwest::Error),
#[error(transparent)]
Io(#[from] io::Error),
}
type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Eq, Clone)]
enum Series {
Owner(String),
Repo(String, String),
}
impl Display for Series {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
Series::Owner(owner) => formatter.write_str(owner)?,
Series::Repo(owner, repo) => {
formatter.write_str(owner)?;
formatter.write_str("/")?;
formatter.write_str(repo)?;
}
}
Ok(())
}
}
impl Ord for Series {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(Series::Owner(lowner), Series::Owner(rowner)) => {
lowner.to_lowercase().cmp(&rowner.to_lowercase())
}
(Series::Repo(lowner, lrepo), Series::Repo(rowner, rrepo)) => {
(lowner.to_lowercase(), lrepo.to_lowercase())
.cmp(&(rowner.to_lowercase(), rrepo.to_lowercase()))
}
(Series::Owner(_), Series::Repo(..)) => Ordering::Less,
(Series::Repo(..), Series::Owner(_)) => Ordering::Greater,
}
}
}
impl PartialOrd for Series {
fn partial_cmp(&self, other: &Series) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Series {
fn eq(&self, other: &Series) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(transparent)]
struct Cursor(Option<String>);
impl Display for Cursor {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match &self.0 {
Some(cursor) => {
formatter.write_str("\"")?;
formatter.write_str(cursor)?;
formatter.write_str("\"")?;
}
None => formatter.write_str("null")?,
}
Ok(())
}
}
struct Work {
series: Series,
cursor: Cursor,
}
#[derive(Serialize)]
struct Request {
query: String,
}
#[derive(Deserialize, Debug)]
struct Response {
message: Option<String>,
#[serde(default, deserialize_with = "deserialize_data")]
data: VecDeque<Data>,
#[serde(default)]
errors: Vec<Message>,
}
#[derive(Deserialize, Debug)]
struct Message {
message: String,
}
#[derive(Debug)]
enum Data {
Owner(Option<Owner>),
Repo(Option<Repo>),
}
#[derive(Deserialize, Debug)]
struct Owner {
login: String,
repositories: Repositories,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Repositories {
page_info: PageInfo,
nodes: Vec<Repo>,
}
#[derive(Deserialize, Debug)]
struct Repo {
name: String,
owner: Account,
stargazers: Option<Stargazers>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Default, Debug)]
struct Account {
login: String,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Stargazers {
page_info: PageInfo,
#[serde(deserialize_with = "non_nulls")]
edges: Vec<Star>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Debug)]
struct Star {
#[serde(rename = "starredAt")]
time: DateTime<Utc>,
node: Account,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct PageInfo {
has_next_page: bool,
end_cursor: Cursor,
}
fn deserialize_data<'de, D>(deserializer: D) -> Result<VecDeque<Data>, D::Error>
where
D: Deserializer<'de>,
{
struct ResponseVisitor;
impl<'de> Visitor<'de> for ResponseVisitor {
type Value = VecDeque<Data>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Map<String, Data>")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut data = VecDeque::new();
while let Some(key) = map.next_key::<String>()? {
if key.starts_with("owner") {
let owner = map.next_value::<Option<Owner>>()?;
data.push_back(Data::Owner(owner));
} else if key.starts_with("repo") {
let repo = map.next_value::<Option<Repo>>()?;
data.push_back(Data::Repo(repo));
} else {
map.next_value::<IgnoredAny>()?;
}
}
Ok(data)
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
|
E: de::Error,
{
Ok(VecDeque::new())
}
}
deserializer.deserialize_any(ResponseVisitor)
}
fn non_nulls<'de, D, T>(deserializer: D) -> Result<Vec<T>, D::Error>
where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
struct NonNullsVisitor<T>(PhantomData<fn() -> T>);
impl<'de, T> Visitor<'de> for NonNullsVisitor<T>
where
T: Deserialize<'de>,
{
type Value = Vec<T>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("array")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut vec = Vec::new();
while let Some(next) = seq.next_element::<Option<T>>()? {
vec.extend(next);
}
Ok(vec)
}
}
let visitor = NonNullsVisitor(PhantomData);
deserializer.deserialize_seq(visitor)
}
fn main() {
let ref mut log = Log::new();
if let Err(err) = try_main(log) {
log.error(err);
process::exit(1);
}
}
fn try_main(log: &mut Log) -> Result<()> {
let mut args = Vec::new();
for arg in env::args().skip(1) {
if arg == "--help" {
print!("{}", HELP);
process::exit(0);
} else if arg == "--version" {
println!("{}", VERSION);
process::exit(0);
}
let mut parts = arg.splitn(2, '/');
let owner = parts.next().unwrap();
match parts.next() {
Some(repo) => {
let owner = owner.to_owned();
let repo = repo.to_owned();
args.push(Series::Repo(owner, repo));
}
None => {
let owner = owner.strip_prefix('@').unwrap_or(owner).to_owned();
args.push(Series::Owner(owner));
}
}
}
let github_token = match gh_token::get() {
Ok(token) => token,
Err(gh_token::Error::NotConfigured(path)) => {
let path_lossy = path.to_string_lossy();
let message = MISSING_TOKEN.replace("{{path}}", &path_lossy);
eprint!("{}", message);
process::exit(1);
}
Err(error) => return Err(Error::GhToken(error)),
};
let authorization = format!("bearer {}", github_token.trim());
if args.is_empty() {
eprint!("{}", HELP);
process::exit(1);
}
let mut work = Vec::new();
let mut stars = Map::new();
for series in &args {
stars.insert(series.clone(), Set::new());
work.push(Work {
series: series.clone(),
cursor: Cursor(None),
});
}
let client = Client::new();
while !work.is_empty() {
let batch_size = cmp::min(work.len(), 50);
let defer
|
random_line_split
|
|
main.rs
|
-history dtolnay/syn dtolnay/quote
star-history serde-rs/serde
",
);
static MISSING_TOKEN: &str = "\
Error: GitHub auth token is not set up.
(Expected config file: {{path}})
Run `gh auth login` to store a GitHub login token. The `gh` CLI
can be installed from <https://cli.github.com>.
If you prefer not to use the `gh` CLI, you can instead provide
a token to star-history through the GITHUB_TOKEN environment
variable. Head to <https://github.com/settings/tokens> and click
\"Generate new token (classic)\". The default public access
permission is sufficient -- you can leave all the checkboxes
empty. Save the generated token somewhere like ~/.githubtoken
and use `export GITHUB_TOKEN=$(cat ~/.githubtoken)`.
";
#[derive(Error, Debug)]
enum Error {
#[error("Error from GitHub api: {0}")]
GitHub(String),
#[error("failed to decode response body")]
DecodeResponse(#[source] serde_json::Error),
#[error("no such user: {0}")]
NoSuchUser(String),
#[error("no such repository: {0}/{1}")]
NoSuchRepo(String, String),
#[error(transparent)]
GhToken(#[from] gh_token::Error),
#[error(transparent)]
Reqwest(#[from] reqwest::Error),
#[error(transparent)]
Io(#[from] io::Error),
}
type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Eq, Clone)]
enum Series {
Owner(String),
Repo(String, String),
}
impl Display for Series {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
Series::Owner(owner) => formatter.write_str(owner)?,
Series::Repo(owner, repo) => {
formatter.write_str(owner)?;
formatter.write_str("/")?;
formatter.write_str(repo)?;
}
}
Ok(())
}
}
impl Ord for Series {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(Series::Owner(lowner), Series::Owner(rowner)) => {
lowner.to_lowercase().cmp(&rowner.to_lowercase())
}
(Series::Repo(lowner, lrepo), Series::Repo(rowner, rrepo)) => {
(lowner.to_lowercase(), lrepo.to_lowercase())
.cmp(&(rowner.to_lowercase(), rrepo.to_lowercase()))
}
(Series::Owner(_), Series::Repo(..)) => Ordering::Less,
(Series::Repo(..), Series::Owner(_)) => Ordering::Greater,
}
}
}
impl PartialOrd for Series {
fn partial_cmp(&self, other: &Series) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Series {
fn eq(&self, other: &Series) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(transparent)]
struct Cursor(Option<String>);
impl Display for Cursor {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result
|
}
struct Work {
series: Series,
cursor: Cursor,
}
#[derive(Serialize)]
struct Request {
query: String,
}
#[derive(Deserialize, Debug)]
struct Response {
message: Option<String>,
#[serde(default, deserialize_with = "deserialize_data")]
data: VecDeque<Data>,
#[serde(default)]
errors: Vec<Message>,
}
#[derive(Deserialize, Debug)]
struct Message {
message: String,
}
#[derive(Debug)]
enum Data {
Owner(Option<Owner>),
Repo(Option<Repo>),
}
#[derive(Deserialize, Debug)]
struct Owner {
login: String,
repositories: Repositories,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Repositories {
page_info: PageInfo,
nodes: Vec<Repo>,
}
#[derive(Deserialize, Debug)]
struct Repo {
name: String,
owner: Account,
stargazers: Option<Stargazers>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Default, Debug)]
struct Account {
login: String,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Stargazers {
page_info: PageInfo,
#[serde(deserialize_with = "non_nulls")]
edges: Vec<Star>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Debug)]
struct Star {
#[serde(rename = "starredAt")]
time: DateTime<Utc>,
node: Account,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct PageInfo {
has_next_page: bool,
end_cursor: Cursor,
}
fn deserialize_data<'de, D>(deserializer: D) -> Result<VecDeque<Data>, D::Error>
where
D: Deserializer<'de>,
{
struct ResponseVisitor;
impl<'de> Visitor<'de> for ResponseVisitor {
type Value = VecDeque<Data>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Map<String, Data>")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut data = VecDeque::new();
while let Some(key) = map.next_key::<String>()? {
if key.starts_with("owner") {
let owner = map.next_value::<Option<Owner>>()?;
data.push_back(Data::Owner(owner));
} else if key.starts_with("repo") {
let repo = map.next_value::<Option<Repo>>()?;
data.push_back(Data::Repo(repo));
} else {
map.next_value::<IgnoredAny>()?;
}
}
Ok(data)
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(VecDeque::new())
}
}
deserializer.deserialize_any(ResponseVisitor)
}
fn non_nulls<'de, D, T>(deserializer: D) -> Result<Vec<T>, D::Error>
where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
struct NonNullsVisitor<T>(PhantomData<fn() -> T>);
impl<'de, T> Visitor<'de> for NonNullsVisitor<T>
where
T: Deserialize<'de>,
{
type Value = Vec<T>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("array")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut vec = Vec::new();
while let Some(next) = seq.next_element::<Option<T>>()? {
vec.extend(next);
}
Ok(vec)
}
}
let visitor = NonNullsVisitor(PhantomData);
deserializer.deserialize_seq(visitor)
}
fn main() {
let ref mut log = Log::new();
if let Err(err) = try_main(log) {
log.error(err);
process::exit(1);
}
}
fn try_main(log: &mut Log) -> Result<()> {
let mut args = Vec::new();
for arg in env::args().skip(1) {
if arg == "--help" {
print!("{}", HELP);
process::exit(0);
} else if arg == "--version" {
println!("{}", VERSION);
process::exit(0);
}
let mut parts = arg.splitn(2, '/');
let owner = parts.next().unwrap();
match parts.next() {
Some(repo) => {
let owner = owner.to_owned();
let repo = repo.to_owned();
args.push(Series::Repo(owner, repo));
}
None => {
let owner = owner.strip_prefix('@').unwrap_or(owner).to_owned();
args.push(Series::Owner(owner));
}
}
}
let github_token = match gh_token::get() {
Ok(token) => token,
Err(gh_token::Error::NotConfigured(path)) => {
let path_lossy = path.to_string_lossy();
let message = MISSING_TOKEN.replace("{{path}}", &path_lossy);
eprint!("{}", message);
process::exit(1);
}
Err(error) => return Err(Error::GhToken(error)),
};
let authorization = format!("bearer {}", github_token.trim());
if args.is_empty() {
eprint!("{}", HELP);
process::exit(1);
}
let mut work = Vec::new();
let mut stars = Map::new();
for series in &args {
stars.insert(series.clone(), Set::new());
work.push(Work {
series: series.clone(),
cursor: Cursor(None),
});
}
let client = Client::new();
while !work.is_empty() {
let batch_size = cmp::min(work.len(), 50);
|
{
match &self.0 {
Some(cursor) => {
formatter.write_str("\"")?;
formatter.write_str(cursor)?;
formatter.write_str("\"")?;
}
None => formatter.write_str("null")?,
}
Ok(())
}
|
identifier_body
|
lib.rs
|
!("OUT_DIR"), "/dump.json"));
serde_json::from_slice::<LootTableSet>(BYTES)
.expect("invalid loot table dump")
.0
.into_iter()
.map(|(k, v)| (k, LootTable(v)))
.collect()
});
/// Returns the loot table with the given ID, if it exists.
/// IDs are the same as those used in MC data packs. For example,
/// the loot table for stone blocks has ID "blocks/stone."
pub fn loot_table(id: &str) -> Option<&'static LootTable> {
STORE.get(id)
}
/// Condition context used to determine whether loot table conditions are satisfied.
#[derive(Debug, Default)]
pub struct Conditions {
/// The item used to break a block
pub item: Option<ItemStack>,
}
/// Opaque wrapper over `model::LootTable`.
#[derive(Debug)]
pub struct LootTable(model::LootTable);
/// Error returned when a loot table sample fails.
#[derive(Debug, Error)]
pub enum SampleError {
#[error("invalid item identifier {0}")]
InvalidItem(String),
#[error("missing loot table {0}")]
MissingLootTable(String),
/// Should be handled gracefully.
#[error("dynamic loot table {0:?}")]
IsDynamic(DynamicKind),
}
/// Indicates that the yielded item should
/// be computed based on a block entity.
#[derive(Debug)]
pub enum DynamicKind {
/// Drop e.g. contents of chest
Contents,
/// Drop the block itself (e.g. player head, banner)
This,
}
impl LootTable {
/// Samples a value from the table.
///
/// The returned set of item stacks is the result of sampling,
/// i.e. the loot which should be yielded. May return multiple
/// item stacks. No guarantee is made about the ordering
/// or distinction of the returned vector.
pub fn sample(
&self,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<SmallVec<[ItemStack; 2]>, SampleError> {
let mut results = SmallVec::new();
let pools = &self.0.pools;
// Algorithm: sample each pool.
// For each pool, evaluate `rolls` entries based on `Entry.weight`
// and yield their results.
for pool in pools {
sample_pool(pool, rng, &mut results, conditions)?;
}
Ok(results)
}
}
fn sample_pool(
pool: &Pool,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
// `rolls` times, choose an entry at random based on weighting
// and yield its results.
// Only select from entries with their conditions satisfied
let entries = pool
.entries
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
let weight_sum = entries.iter().map(|entry| entry.weight).sum::<u32>();
for _ in 0..pool.rolls.sample(rng) {
// We choose an integer at random from [0, weight_sum) and
// determine which entry has a cumulative weight matching
// the result. This algorithm is O(n) computaitonally, but this is unlikely
// to matter in practice, because loot tables rarely
// have more than one or two entries per pool.
let n = rng.gen_range(0, weight_sum);
let mut cumulative_weight = 0;
let entry = entries
.iter()
.find(|entry| {
if n >= cumulative_weight && n < cumulative_weight + entry.weight {
true
} else {
cumulative_weight += entry.weight;
false
}
})
.expect("entry finding algorithm incorrect");
sample_entry(entry, rng, results, conditions)?;
}
// apply functions to results
results
.iter_mut()
.try_for_each(|item| apply_functions(pool.functions.iter(), item, rng, conditions))?;
Ok(())
}
fn sample_entry(
entry: &Entry,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
let mut single;
let mut none = iter::empty();
let mut sampled;
let items: &mut dyn Iterator<Item = ItemStack> = match &entry.kind {
EntryKind::Empty => &mut none,
EntryKind::Item => {
let item = Item::from_identifier(&entry.name)
.ok_or_else(|| SampleError::InvalidItem(entry.name.to_string()))?;
single = iter::once(ItemStack::new(item, 1));
&mut single
}
EntryKind::Tag => &mut none, // TODO
EntryKind::LootTable => {
let table = loot_table(&entry.name)
.ok_or_else(|| SampleError::MissingLootTable(entry.name.to_string()))?;
sampled = table.sample(rng, conditions)?.into_iter();
&mut sampled
}
EntryKind::Group => {
// Return an iterator over the child entries
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Alternatives => {
// Only sample first entry whose conditions are satisfied, if any
let mut temp = SmallVec::new();
if let Some(entry) = entry
.children
.iter()
.find(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
{
sample_entry(entry, rng, &mut temp, conditions)?;
}
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Sequence => {
// Apply all entries until one does not satisfy conditions
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.map(|entry| {
if satisfies_conditions(entry.conditions.iter(), conditions, rng) {
Some(entry)
} else {
None
}
})
.while_some()
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Dynamic => {
let kind = if entry.name == "contents" || entry.name == "minecraft:contents" {
DynamicKind::Contents
} else {
DynamicKind::This
};
return Err(SampleError::IsDynamic(kind));
}
};
results.extend(
items
.map(|mut item| {
apply_functions(entry.functions.iter(), &mut item, rng, conditions)?;
Ok(item)
})
.filter_map(|item: Result<ItemStack, SampleError>| item.ok()),
);
Ok(())
}
fn apply_functions<'a>(
functions: impl Iterator<Item = &'a Function>,
item: &mut ItemStack,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<(), SampleError> {
let functions = functions
.filter(|f| satisfies_conditions(f.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Function; 4]>>();
for function in functions {
match &function.kind {
FunctionKind::SetCount { count } => {
let count = count.sample(rng);
item.amount = count as u8;
}
FunctionKind::Unknown => (),
}
}
Ok(())
}
fn satisfies_conditions<'a>(
mut conditions: impl Iterator<Item = &'a Condition>,
input: &Conditions,
rng: &mut impl Rng,
) -> bool {
conditions.all(|condition| match condition {
Condition::MatchTool { predicate } => {
if let Some(item) = &predicate.item {
match &input.item {
Some(stack) => {
if stack.ty.identifier() != item {
return false;
}
}
None => return false,
}
}
// enchantments are not yet supported
if !predicate.enchantments.is_empty() {
return false;
}
true
}
Condition::RandomChance { chance } => {
let chance = chance.max(0.0).min(1.0);
rng.gen_bool(chance)
}
Condition::Unknown => true,
})
}
#[cfg(test)]
mod tests {
use super::*;
use rand::rngs::mock::StepRng;
#[test]
fn store_deserializes_successfully() {
Lazy::force(&STORE);
}
#[test]
fn sample_dirt()
|
{
let table = loot_table("blocks/dirt").expect("missing loot table for dirt block");
let mut rng = StepRng::new(0, 1);
let items = table.sample(&mut rng, &Conditions::default()).unwrap();
assert_eq!(items.as_slice(), &[ItemStack::new(Item::Dirt, 1)]);
}
|
identifier_body
|
|
lib.rs
|
smallvec::SmallVec;
use std::iter;
use thiserror::Error;
/// The global loot table store, initialized at runtime from
/// the embedded loot table dump. (Generated by the build script)
static STORE: Lazy<AHashMap<InlinableString, LootTable>> = Lazy::new(|| {
static BYTES: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/dump.json"));
serde_json::from_slice::<LootTableSet>(BYTES)
.expect("invalid loot table dump")
.0
.into_iter()
.map(|(k, v)| (k, LootTable(v)))
.collect()
});
/// Returns the loot table with the given ID, if it exists.
/// IDs are the same as those used in MC data packs. For example,
/// the loot table for stone blocks has ID "blocks/stone."
pub fn loot_table(id: &str) -> Option<&'static LootTable> {
STORE.get(id)
}
/// Condition context used to determine whether loot table conditions are satisfied.
#[derive(Debug, Default)]
pub struct Conditions {
/// The item used to break a block
pub item: Option<ItemStack>,
}
/// Opaque wrapper over `model::LootTable`.
#[derive(Debug)]
pub struct LootTable(model::LootTable);
/// Error returned when a loot table sample fails.
#[derive(Debug, Error)]
pub enum SampleError {
#[error("invalid item identifier {0}")]
InvalidItem(String),
#[error("missing loot table {0}")]
MissingLootTable(String),
/// Should be handled gracefully.
#[error("dynamic loot table {0:?}")]
IsDynamic(DynamicKind),
}
/// Indicates that the yielded item should
/// be computed based on a block entity.
#[derive(Debug)]
pub enum DynamicKind {
/// Drop e.g. contents of chest
Contents,
/// Drop the block itself (e.g. player head, banner)
This,
}
impl LootTable {
/// Samples a value from the table.
///
/// The returned set of item stacks is the result of sampling,
/// i.e. the loot which should be yielded. May return multiple
/// item stacks. No guarantee is made about the ordering
/// or distinction of the returned vector.
pub fn sample(
&self,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<SmallVec<[ItemStack; 2]>, SampleError> {
let mut results = SmallVec::new();
let pools = &self.0.pools;
// Algorithm: sample each pool.
// For each pool, evaluate `rolls` entries based on `Entry.weight`
// and yield their results.
for pool in pools {
sample_pool(pool, rng, &mut results, conditions)?;
}
Ok(results)
}
}
fn sample_pool(
pool: &Pool,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
// `rolls` times, choose an entry at random based on weighting
// and yield its results.
// Only select from entries with their conditions satisfied
let entries = pool
.entries
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
let weight_sum = entries.iter().map(|entry| entry.weight).sum::<u32>();
for _ in 0..pool.rolls.sample(rng) {
// We choose an integer at random from [0, weight_sum) and
// determine which entry has a cumulative weight matching
// the result. This algorithm is O(n) computaitonally, but this is unlikely
// to matter in practice, because loot tables rarely
// have more than one or two entries per pool.
let n = rng.gen_range(0, weight_sum);
let mut cumulative_weight = 0;
let entry = entries
.iter()
.find(|entry| {
if n >= cumulative_weight && n < cumulative_weight + entry.weight {
true
} else
|
})
.expect("entry finding algorithm incorrect");
sample_entry(entry, rng, results, conditions)?;
}
// apply functions to results
results
.iter_mut()
.try_for_each(|item| apply_functions(pool.functions.iter(), item, rng, conditions))?;
Ok(())
}
fn sample_entry(
entry: &Entry,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
let mut single;
let mut none = iter::empty();
let mut sampled;
let items: &mut dyn Iterator<Item = ItemStack> = match &entry.kind {
EntryKind::Empty => &mut none,
EntryKind::Item => {
let item = Item::from_identifier(&entry.name)
.ok_or_else(|| SampleError::InvalidItem(entry.name.to_string()))?;
single = iter::once(ItemStack::new(item, 1));
&mut single
}
EntryKind::Tag => &mut none, // TODO
EntryKind::LootTable => {
let table = loot_table(&entry.name)
.ok_or_else(|| SampleError::MissingLootTable(entry.name.to_string()))?;
sampled = table.sample(rng, conditions)?.into_iter();
&mut sampled
}
EntryKind::Group => {
// Return an iterator over the child entries
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Alternatives => {
// Only sample first entry whose conditions are satisfied, if any
let mut temp = SmallVec::new();
if let Some(entry) = entry
.children
.iter()
.find(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
{
sample_entry(entry, rng, &mut temp, conditions)?;
}
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Sequence => {
// Apply all entries until one does not satisfy conditions
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.map(|entry| {
if satisfies_conditions(entry.conditions.iter(), conditions, rng) {
Some(entry)
} else {
None
}
})
.while_some()
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Dynamic => {
let kind = if entry.name == "contents" || entry.name == "minecraft:contents" {
DynamicKind::Contents
} else {
DynamicKind::This
};
return Err(SampleError::IsDynamic(kind));
}
};
results.extend(
items
.map(|mut item| {
apply_functions(entry.functions.iter(), &mut item, rng, conditions)?;
Ok(item)
})
.filter_map(|item: Result<ItemStack, SampleError>| item.ok()),
);
Ok(())
}
fn apply_functions<'a>(
functions: impl Iterator<Item = &'a Function>,
item: &mut ItemStack,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<(), SampleError> {
let functions = functions
.filter(|f| satisfies_conditions(f.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Function; 4]>>();
for function in functions {
match &function.kind {
FunctionKind::SetCount { count } => {
let count = count.sample(rng);
item.amount = count as u8;
}
FunctionKind::Unknown => (),
}
}
Ok(())
}
fn satisfies_conditions<'a>(
mut conditions: impl Iterator<Item = &'a Condition>,
input: &Conditions,
rng: &mut impl Rng,
) -> bool {
conditions.all(|condition| match condition {
Condition::MatchTool { predicate } => {
if let Some(item) = &predicate.item {
match &input.item {
Some(stack) => {
if stack.ty.identifier() != item {
return false;
}
}
None => return false,
}
}
// enchantments are not yet supported
if !predicate.enchantments.is_empty() {
return false;
}
true
}
Condition::RandomChance { chance } => {
let chance = chance.max(0.0).min(1.0);
rng.gen_bool(chance)
}
Condition::Unknown => true,
})
}
#[cfg(test)]
mod tests {
use super::*;
use rand::rngs::mock::StepRng;
#[test]
fn store_deserializes_successfully() {
Lazy::force(&STORE);
}
#[test]
fn
|
{
cumulative_weight += entry.weight;
false
}
|
conditional_block
|
lib.rs
|
use smallvec::SmallVec;
use std::iter;
use thiserror::Error;
/// The global loot table store, initialized at runtime from
/// the embedded loot table dump. (Generated by the build script)
static STORE: Lazy<AHashMap<InlinableString, LootTable>> = Lazy::new(|| {
static BYTES: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/dump.json"));
serde_json::from_slice::<LootTableSet>(BYTES)
.expect("invalid loot table dump")
.0
.into_iter()
.map(|(k, v)| (k, LootTable(v)))
.collect()
});
/// Returns the loot table with the given ID, if it exists.
/// IDs are the same as those used in MC data packs. For example,
/// the loot table for stone blocks has ID "blocks/stone."
pub fn loot_table(id: &str) -> Option<&'static LootTable> {
STORE.get(id)
}
/// Condition context used to determine whether loot table conditions are satisfied.
#[derive(Debug, Default)]
pub struct Conditions {
/// The item used to break a block
pub item: Option<ItemStack>,
}
/// Opaque wrapper over `model::LootTable`.
#[derive(Debug)]
pub struct LootTable(model::LootTable);
/// Error returned when a loot table sample fails.
#[derive(Debug, Error)]
pub enum SampleError {
#[error("invalid item identifier {0}")]
InvalidItem(String),
#[error("missing loot table {0}")]
MissingLootTable(String),
/// Should be handled gracefully.
#[error("dynamic loot table {0:?}")]
IsDynamic(DynamicKind),
}
/// Indicates that the yielded item should
/// be computed based on a block entity.
#[derive(Debug)]
pub enum DynamicKind {
/// Drop e.g. contents of chest
Contents,
/// Drop the block itself (e.g. player head, banner)
This,
}
impl LootTable {
/// Samples a value from the table.
///
/// The returned set of item stacks is the result of sampling,
/// i.e. the loot which should be yielded. May return multiple
/// item stacks. No guarantee is made about the ordering
/// or distinction of the returned vector.
pub fn sample(
&self,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<SmallVec<[ItemStack; 2]>, SampleError> {
let mut results = SmallVec::new();
let pools = &self.0.pools;
// Algorithm: sample each pool.
// For each pool, evaluate `rolls` entries based on `Entry.weight`
// and yield their results.
for pool in pools {
sample_pool(pool, rng, &mut results, conditions)?;
}
Ok(results)
}
}
fn sample_pool(
pool: &Pool,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
// `rolls` times, choose an entry at random based on weighting
// and yield its results.
// Only select from entries with their conditions satisfied
let entries = pool
.entries
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
let weight_sum = entries.iter().map(|entry| entry.weight).sum::<u32>();
|
// the result. This algorithm is O(n) computaitonally, but this is unlikely
// to matter in practice, because loot tables rarely
// have more than one or two entries per pool.
let n = rng.gen_range(0, weight_sum);
let mut cumulative_weight = 0;
let entry = entries
.iter()
.find(|entry| {
if n >= cumulative_weight && n < cumulative_weight + entry.weight {
true
} else {
cumulative_weight += entry.weight;
false
}
})
.expect("entry finding algorithm incorrect");
sample_entry(entry, rng, results, conditions)?;
}
// apply functions to results
results
.iter_mut()
.try_for_each(|item| apply_functions(pool.functions.iter(), item, rng, conditions))?;
Ok(())
}
fn sample_entry(
entry: &Entry,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
let mut single;
let mut none = iter::empty();
let mut sampled;
let items: &mut dyn Iterator<Item = ItemStack> = match &entry.kind {
EntryKind::Empty => &mut none,
EntryKind::Item => {
let item = Item::from_identifier(&entry.name)
.ok_or_else(|| SampleError::InvalidItem(entry.name.to_string()))?;
single = iter::once(ItemStack::new(item, 1));
&mut single
}
EntryKind::Tag => &mut none, // TODO
EntryKind::LootTable => {
let table = loot_table(&entry.name)
.ok_or_else(|| SampleError::MissingLootTable(entry.name.to_string()))?;
sampled = table.sample(rng, conditions)?.into_iter();
&mut sampled
}
EntryKind::Group => {
// Return an iterator over the child entries
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Alternatives => {
// Only sample first entry whose conditions are satisfied, if any
let mut temp = SmallVec::new();
if let Some(entry) = entry
.children
.iter()
.find(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
{
sample_entry(entry, rng, &mut temp, conditions)?;
}
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Sequence => {
// Apply all entries until one does not satisfy conditions
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.map(|entry| {
if satisfies_conditions(entry.conditions.iter(), conditions, rng) {
Some(entry)
} else {
None
}
})
.while_some()
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Dynamic => {
let kind = if entry.name == "contents" || entry.name == "minecraft:contents" {
DynamicKind::Contents
} else {
DynamicKind::This
};
return Err(SampleError::IsDynamic(kind));
}
};
results.extend(
items
.map(|mut item| {
apply_functions(entry.functions.iter(), &mut item, rng, conditions)?;
Ok(item)
})
.filter_map(|item: Result<ItemStack, SampleError>| item.ok()),
);
Ok(())
}
fn apply_functions<'a>(
functions: impl Iterator<Item = &'a Function>,
item: &mut ItemStack,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<(), SampleError> {
let functions = functions
.filter(|f| satisfies_conditions(f.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Function; 4]>>();
for function in functions {
match &function.kind {
FunctionKind::SetCount { count } => {
let count = count.sample(rng);
item.amount = count as u8;
}
FunctionKind::Unknown => (),
}
}
Ok(())
}
fn satisfies_conditions<'a>(
mut conditions: impl Iterator<Item = &'a Condition>,
input: &Conditions,
rng: &mut impl Rng,
) -> bool {
conditions.all(|condition| match condition {
Condition::MatchTool { predicate } => {
if let Some(item) = &predicate.item {
match &input.item {
Some(stack) => {
if stack.ty.identifier() != item {
return false;
}
}
None => return false,
}
}
// enchantments are not yet supported
if !predicate.enchantments.is_empty() {
return false;
}
true
}
Condition::RandomChance { chance } => {
let chance = chance.max(0.0).min(1.0);
rng.gen_bool(chance)
}
Condition::Unknown => true,
})
}
#[cfg(test)]
mod tests {
use super::*;
use rand::rngs::mock::StepRng;
#[test]
fn store_deserializes_successfully() {
Lazy::force(&STORE);
}
#[test]
fn sample_d
|
for _ in 0..pool.rolls.sample(rng) {
// We choose an integer at random from [0, weight_sum) and
// determine which entry has a cumulative weight matching
|
random_line_split
|
lib.rs
|
vec::SmallVec;
use std::iter;
use thiserror::Error;
/// The global loot table store, initialized at runtime from
/// the embedded loot table dump. (Generated by the build script)
static STORE: Lazy<AHashMap<InlinableString, LootTable>> = Lazy::new(|| {
static BYTES: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/dump.json"));
serde_json::from_slice::<LootTableSet>(BYTES)
.expect("invalid loot table dump")
.0
.into_iter()
.map(|(k, v)| (k, LootTable(v)))
.collect()
});
/// Returns the loot table with the given ID, if it exists.
/// IDs are the same as those used in MC data packs. For example,
/// the loot table for stone blocks has ID "blocks/stone."
pub fn loot_table(id: &str) -> Option<&'static LootTable> {
STORE.get(id)
}
/// Condition context used to determine whether loot table conditions are satisfied.
#[derive(Debug, Default)]
pub struct Conditions {
/// The item used to break a block
pub item: Option<ItemStack>,
}
/// Opaque wrapper over `model::LootTable`.
#[derive(Debug)]
pub struct LootTable(model::LootTable);
/// Error returned when a loot table sample fails.
#[derive(Debug, Error)]
pub enum SampleError {
#[error("invalid item identifier {0}")]
InvalidItem(String),
#[error("missing loot table {0}")]
MissingLootTable(String),
/// Should be handled gracefully.
#[error("dynamic loot table {0:?}")]
IsDynamic(DynamicKind),
}
/// Indicates that the yielded item should
/// be computed based on a block entity.
#[derive(Debug)]
pub enum DynamicKind {
/// Drop e.g. contents of chest
Contents,
/// Drop the block itself (e.g. player head, banner)
This,
}
impl LootTable {
/// Samples a value from the table.
///
/// The returned set of item stacks is the result of sampling,
/// i.e. the loot which should be yielded. May return multiple
/// item stacks. No guarantee is made about the ordering
/// or distinction of the returned vector.
pub fn sample(
&self,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<SmallVec<[ItemStack; 2]>, SampleError> {
let mut results = SmallVec::new();
let pools = &self.0.pools;
// Algorithm: sample each pool.
// For each pool, evaluate `rolls` entries based on `Entry.weight`
// and yield their results.
for pool in pools {
sample_pool(pool, rng, &mut results, conditions)?;
}
Ok(results)
}
}
fn sample_pool(
pool: &Pool,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
// `rolls` times, choose an entry at random based on weighting
// and yield its results.
// Only select from entries with their conditions satisfied
let entries = pool
.entries
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
let weight_sum = entries.iter().map(|entry| entry.weight).sum::<u32>();
for _ in 0..pool.rolls.sample(rng) {
// We choose an integer at random from [0, weight_sum) and
// determine which entry has a cumulative weight matching
// the result. This algorithm is O(n) computaitonally, but this is unlikely
// to matter in practice, because loot tables rarely
// have more than one or two entries per pool.
let n = rng.gen_range(0, weight_sum);
let mut cumulative_weight = 0;
let entry = entries
.iter()
.find(|entry| {
if n >= cumulative_weight && n < cumulative_weight + entry.weight {
true
} else {
cumulative_weight += entry.weight;
false
}
})
.expect("entry finding algorithm incorrect");
sample_entry(entry, rng, results, conditions)?;
}
// apply functions to results
results
.iter_mut()
.try_for_each(|item| apply_functions(pool.functions.iter(), item, rng, conditions))?;
Ok(())
}
fn sample_entry(
entry: &Entry,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
let mut single;
let mut none = iter::empty();
let mut sampled;
let items: &mut dyn Iterator<Item = ItemStack> = match &entry.kind {
EntryKind::Empty => &mut none,
EntryKind::Item => {
let item = Item::from_identifier(&entry.name)
.ok_or_else(|| SampleError::InvalidItem(entry.name.to_string()))?;
single = iter::once(ItemStack::new(item, 1));
&mut single
}
EntryKind::Tag => &mut none, // TODO
EntryKind::LootTable => {
let table = loot_table(&entry.name)
.ok_or_else(|| SampleError::MissingLootTable(entry.name.to_string()))?;
sampled = table.sample(rng, conditions)?.into_iter();
&mut sampled
}
EntryKind::Group => {
// Return an iterator over the child entries
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Alternatives => {
// Only sample first entry whose conditions are satisfied, if any
let mut temp = SmallVec::new();
if let Some(entry) = entry
.children
.iter()
.find(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
{
sample_entry(entry, rng, &mut temp, conditions)?;
}
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Sequence => {
// Apply all entries until one does not satisfy conditions
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.map(|entry| {
if satisfies_conditions(entry.conditions.iter(), conditions, rng) {
Some(entry)
} else {
None
}
})
.while_some()
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Dynamic => {
let kind = if entry.name == "contents" || entry.name == "minecraft:contents" {
DynamicKind::Contents
} else {
DynamicKind::This
};
return Err(SampleError::IsDynamic(kind));
}
};
results.extend(
items
.map(|mut item| {
apply_functions(entry.functions.iter(), &mut item, rng, conditions)?;
Ok(item)
})
.filter_map(|item: Result<ItemStack, SampleError>| item.ok()),
);
Ok(())
}
fn apply_functions<'a>(
functions: impl Iterator<Item = &'a Function>,
item: &mut ItemStack,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<(), SampleError> {
let functions = functions
.filter(|f| satisfies_conditions(f.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Function; 4]>>();
for function in functions {
match &function.kind {
FunctionKind::SetCount { count } => {
let count = count.sample(rng);
item.amount = count as u8;
}
FunctionKind::Unknown => (),
}
}
Ok(())
}
fn
|
<'a>(
mut conditions: impl Iterator<Item = &'a Condition>,
input: &Conditions,
rng: &mut impl Rng,
) -> bool {
conditions.all(|condition| match condition {
Condition::MatchTool { predicate } => {
if let Some(item) = &predicate.item {
match &input.item {
Some(stack) => {
if stack.ty.identifier() != item {
return false;
}
}
None => return false,
}
}
// enchantments are not yet supported
if !predicate.enchantments.is_empty() {
return false;
}
true
}
Condition::RandomChance { chance } => {
let chance = chance.max(0.0).min(1.0);
rng.gen_bool(chance)
}
Condition::Unknown => true,
})
}
#[cfg(test)]
mod tests {
use super::*;
use rand::rngs::mock::StepRng;
#[test]
fn store_deserializes_successfully() {
Lazy::force(&STORE);
}
#[test]
fn
|
satisfies_conditions
|
identifier_name
|
memory_index.rs
|
claims: BTreeMap<Property, ID>,
}
impl Permanode {
fn index_claim(&mut self, claim: &Dict, permanode_id: &ID, claim_id: &ID) {
// We require the claim to have the sort key
let sort_value: &Property = match claim.get(self.sort.field()) {
Some(ref prop) => prop,
None => {
debug!("Claim {} is invalid for permanode {}: \
missing sort key",
claim_id, permanode_id);
return;
}
};
// Currently, no validation is done; every claim is accepted
// In the future, we'd have ways of checking a claim, such as public
// key signatures (permanode has key, claim has signature)
self.claims.insert(sort_value.clone(), claim_id.clone());
match self.nodetype {
PermanodeType::Set => {
// Keep the whole set of values
// TODO: handle set deletion claims
}
PermanodeType::Single => {
// Keep one value, the latest by sorting order
if self.claims.len() > 1 {
let mut map = BTreeMap::new();
swap(&mut self.claims, &mut map);
let mut map = map.into_iter();
let (k, v) = match self.sort {
Sort::Ascending(_) => map.next_back().unwrap(),
Sort::Descending(_) => map.next().unwrap(),
};
self.claims.insert(k, v);
}
}
}
}
}
fn insert_into_multimap<K: Clone + Eq + ::std::hash::Hash,
V: Eq + ::std::hash::Hash>(
multimap: &mut HashMap<K, HashSet<V>>,
key: &K,
value: V)
{
if let Some(set) = multimap.get_mut(key) {
set.insert(value);
return;
}
let mut set = HashSet::new();
set.insert(value);
|
/// Directory where objects are stored on disk.
path: PathBuf,
/// All objects, indexed by their ID.
objects: HashMap<ID, Object>,
/// Back references: value is all references pointing to the key.
backlinks: HashMap<ID, HashSet<(Backkey, ID)>>,
/// All claim objects, whether they are valid for permanode or not.
claims: HashMap<ID, HashSet<ID>>,
/// All permanodes, with valid associated claims.
permanodes: HashMap<ID, Permanode>,
root: ID,
log: Option<ID>,
policy: Box<dyn Policy>,
}
impl MemoryIndex {
/// Reads all the objects from a directory into memory.
pub fn open<P: AsRef<Path>>(path: P, root: ID)
-> errors::Result<MemoryIndex>
{
let path = path.as_ref();
let mut index = MemoryIndex {
path: path.to_path_buf(),
objects: HashMap::new(),
backlinks: HashMap::new(),
claims: HashMap::new(),
permanodes: HashMap::new(),
root: root.clone(),
log: None,
policy: Box::new(KeepPolicy::new()),
};
let dirlist = path.read_dir()
.map_err(|e| ("Error listing objects directory", e))?;
for first in dirlist {
let first = first
.map_err(|e| ("Error listing objects directory", e))?;
let dirlist = first.path().read_dir()
.map_err(|e| ("Error listing objects subdirectory", e))?;
for second in dirlist {
let second = second
.map_err(|e| ("Error listing objects subdirectory", e))?;
let filename = second.path();
// Read object
let fp = File::open(filename)
.map_err(|e| ("Error opening object", e))?;
let object = match serialize::deserialize(fp) {
Err(e) => {
let mut path: PathBuf = first.file_name().into();
path.push(second.file_name());
error!("Error deserializing object: {:?}", path);
return Err(("Error deserializing object", e).into());
}
Ok(o) => o,
};
index.insert_object_in_index(object);
}
}
// Parse root config
index.log = {
let config = index.get_object(&root)?
.ok_or(Error::CorruptedStore("Missing root object"))?;
let config = match config.data {
ObjectData::Dict(ref dict) => dict,
_ => return Err(Error::CorruptedStore(
"Root object is not a dict")),
};
match config.get("log") {
Some(&Property::Reference(ref id)) => {
let log_obj = index.get_object(id)?
.ok_or(Error::CorruptedStore("Missing log object"))?;
match log_obj.data {
ObjectData::Dict(_) => {
debug!("Activated log: {}", id);
}
_ => {
return Err(Error::CorruptedStore(
"Log is not a permanode"));
}
}
Some(id.clone())
}
Some(_) => return Err(Error::CorruptedStore(
"Log is not a reference")),
None => None,
}
};
Ok(index)
}
pub fn create<'a, P: AsRef<Path>, I: Iterator<Item=&'a Object>>(
path: P, objects: I)
-> io::Result<()>
{
for object in objects {
MemoryIndex::write_object(path.as_ref(), object)?;
}
Ok(())
}
fn write_object(dir: &Path, object: &Object) -> io::Result<()> {
let hashstr = object.id.str();
let mut path = dir.join(&hashstr[..4]);
if !path.exists() {
fs::create_dir(&path)?;
}
path.push(&hashstr[4..]);
let mut fp = OpenOptions::new()
.write(true)
.create_new(true)
.open(&path)?;
serialize::serialize(&mut fp, object)
}
/// Utility to insert a new object in the store.
///
/// Insert the object, indexing the back references, and parsing the object
/// to handle permanodes.
fn insert_object_in_index(&mut self, object: Object) {
assert!(!self.objects.contains_key(&object.id));
{
// Record reverse references
// This is run on all values of type reference on the object,
// whether it is a list or a dict
let mut insert = |target: &ID, key: Backkey, source: ID| {
if log_enabled!(Level::Debug) {
match key {
Backkey::Key(ref k) => {
debug!("Reference {} -> {} ({})",
source, target, k);
}
Backkey::Index(i) => {
debug!("Reference {} -> {} ({})",
source, target, i);
}
}
}
// Add backlink
insert_into_multimap(&mut self.backlinks,
target, (key, source));
};
// Go over the object, calling insert() above on all its values of
// type reference
match object.data {
ObjectData::Dict(ref dict) => {
for (k, v) in dict {
if let Property::Reference(ref id) = *v {
insert(id,
Backkey::Key(k.clone()),
object.id.clone());
}
}
}
ObjectData::List(ref list) => {
for (k, v) in list.into_iter().enumerate() {
if let Property::Reference(ref id) = *v {
insert(id,
Backkey::Index(k),
object.id.clone());
}
}
}
}
}
// Check for special objects
if let ObjectData::Dict(ref dict) = object.data {
match dict.get("dhstore_kind") {
Some(&Property::String(ref kind)) => match kind as &str {
"permanode" => {
info!("Found permanode: {}", object.id);
self.index_permanode(&object);
}
"claim" => {
info!("Found claim: {}", object.id);
self.index_claim(&object);
}
kind => debug!("Found unknown kind {:?}", kind),
},
Some(_) => {
info!("Object has dhstore_kind with non-string value");
}
None => {}
}
}
// Now inserts the object
self.objects.insert(object.id.clone(), object);
}
fn index_permanode(&mut self, permanode: &Object) {
// Validate the permanode
let ref id = permanode.id;
let permanode = match permanode.data {
ObjectData::Dict(ref d) => d,
ObjectData::List(_) => {
panic!("Invalid permanode {}: not a dict", id);
}
};
match permanode.get("random") {
Some(&Property::String(ref s)) => {
if s.len() != HASH_STR_SIZE {
warn!("Invalid permanode {}: invalid random size {}",
id, s.len());
return;
}
}
_ => {
warn!("Invalid permanode {}: missing random", id);
return;
}
}
let sort = match permanode.get("sort") {
Some(&Property::String(ref s)) => match s.parse() {
Ok(f) =>
|
multimap.insert(key.clone(), set);
}
/// The in-memory index, that loads all objects from the disk on startup.
pub struct MemoryIndex {
|
random_line_split
|
memory_index.rs
|
claims: BTreeMap<Property, ID>,
}
impl Permanode {
fn index_claim(&mut self, claim: &Dict, permanode_id: &ID, claim_id: &ID) {
// We require the claim to have the sort key
let sort_value: &Property = match claim.get(self.sort.field()) {
Some(ref prop) => prop,
None => {
debug!("Claim {} is invalid for permanode {}: \
missing sort key",
claim_id, permanode_id);
return;
}
};
// Currently, no validation is done; every claim is accepted
// In the future, we'd have ways of checking a claim, such as public
// key signatures (permanode has key, claim has signature)
self.claims.insert(sort_value.clone(), claim_id.clone());
match self.nodetype {
PermanodeType::Set => {
// Keep the whole set of values
// TODO: handle set deletion claims
}
PermanodeType::Single => {
// Keep one value, the latest by sorting order
if self.claims.len() > 1 {
let mut map = BTreeMap::new();
swap(&mut self.claims, &mut map);
let mut map = map.into_iter();
let (k, v) = match self.sort {
Sort::Ascending(_) => map.next_back().unwrap(),
Sort::Descending(_) => map.next().unwrap(),
};
self.claims.insert(k, v);
}
}
}
}
}
fn
|
<K: Clone + Eq + ::std::hash::Hash,
V: Eq + ::std::hash::Hash>(
multimap: &mut HashMap<K, HashSet<V>>,
key: &K,
value: V)
{
if let Some(set) = multimap.get_mut(key) {
set.insert(value);
return;
}
let mut set = HashSet::new();
set.insert(value);
multimap.insert(key.clone(), set);
}
/// The in-memory index, that loads all objects from the disk on startup.
pub struct MemoryIndex {
/// Directory where objects are stored on disk.
path: PathBuf,
/// All objects, indexed by their ID.
objects: HashMap<ID, Object>,
/// Back references: value is all references pointing to the key.
backlinks: HashMap<ID, HashSet<(Backkey, ID)>>,
/// All claim objects, whether they are valid for permanode or not.
claims: HashMap<ID, HashSet<ID>>,
/// All permanodes, with valid associated claims.
permanodes: HashMap<ID, Permanode>,
root: ID,
log: Option<ID>,
policy: Box<dyn Policy>,
}
impl MemoryIndex {
/// Reads all the objects from a directory into memory.
pub fn open<P: AsRef<Path>>(path: P, root: ID)
-> errors::Result<MemoryIndex>
{
let path = path.as_ref();
let mut index = MemoryIndex {
path: path.to_path_buf(),
objects: HashMap::new(),
backlinks: HashMap::new(),
claims: HashMap::new(),
permanodes: HashMap::new(),
root: root.clone(),
log: None,
policy: Box::new(KeepPolicy::new()),
};
let dirlist = path.read_dir()
.map_err(|e| ("Error listing objects directory", e))?;
for first in dirlist {
let first = first
.map_err(|e| ("Error listing objects directory", e))?;
let dirlist = first.path().read_dir()
.map_err(|e| ("Error listing objects subdirectory", e))?;
for second in dirlist {
let second = second
.map_err(|e| ("Error listing objects subdirectory", e))?;
let filename = second.path();
// Read object
let fp = File::open(filename)
.map_err(|e| ("Error opening object", e))?;
let object = match serialize::deserialize(fp) {
Err(e) => {
let mut path: PathBuf = first.file_name().into();
path.push(second.file_name());
error!("Error deserializing object: {:?}", path);
return Err(("Error deserializing object", e).into());
}
Ok(o) => o,
};
index.insert_object_in_index(object);
}
}
// Parse root config
index.log = {
let config = index.get_object(&root)?
.ok_or(Error::CorruptedStore("Missing root object"))?;
let config = match config.data {
ObjectData::Dict(ref dict) => dict,
_ => return Err(Error::CorruptedStore(
"Root object is not a dict")),
};
match config.get("log") {
Some(&Property::Reference(ref id)) => {
let log_obj = index.get_object(id)?
.ok_or(Error::CorruptedStore("Missing log object"))?;
match log_obj.data {
ObjectData::Dict(_) => {
debug!("Activated log: {}", id);
}
_ => {
return Err(Error::CorruptedStore(
"Log is not a permanode"));
}
}
Some(id.clone())
}
Some(_) => return Err(Error::CorruptedStore(
"Log is not a reference")),
None => None,
}
};
Ok(index)
}
pub fn create<'a, P: AsRef<Path>, I: Iterator<Item=&'a Object>>(
path: P, objects: I)
-> io::Result<()>
{
for object in objects {
MemoryIndex::write_object(path.as_ref(), object)?;
}
Ok(())
}
fn write_object(dir: &Path, object: &Object) -> io::Result<()> {
let hashstr = object.id.str();
let mut path = dir.join(&hashstr[..4]);
if !path.exists() {
fs::create_dir(&path)?;
}
path.push(&hashstr[4..]);
let mut fp = OpenOptions::new()
.write(true)
.create_new(true)
.open(&path)?;
serialize::serialize(&mut fp, object)
}
/// Utility to insert a new object in the store.
///
/// Insert the object, indexing the back references, and parsing the object
/// to handle permanodes.
fn insert_object_in_index(&mut self, object: Object) {
assert!(!self.objects.contains_key(&object.id));
{
// Record reverse references
// This is run on all values of type reference on the object,
// whether it is a list or a dict
let mut insert = |target: &ID, key: Backkey, source: ID| {
if log_enabled!(Level::Debug) {
match key {
Backkey::Key(ref k) => {
debug!("Reference {} -> {} ({})",
source, target, k);
}
Backkey::Index(i) => {
debug!("Reference {} -> {} ({})",
source, target, i);
}
}
}
// Add backlink
insert_into_multimap(&mut self.backlinks,
target, (key, source));
};
// Go over the object, calling insert() above on all its values of
// type reference
match object.data {
ObjectData::Dict(ref dict) => {
for (k, v) in dict {
if let Property::Reference(ref id) = *v {
insert(id,
Backkey::Key(k.clone()),
object.id.clone());
}
}
}
ObjectData::List(ref list) => {
for (k, v) in list.into_iter().enumerate() {
if let Property::Reference(ref id) = *v {
insert(id,
Backkey::Index(k),
object.id.clone());
}
}
}
}
}
// Check for special objects
if let ObjectData::Dict(ref dict) = object.data {
match dict.get("dhstore_kind") {
Some(&Property::String(ref kind)) => match kind as &str {
"permanode" => {
info!("Found permanode: {}", object.id);
self.index_permanode(&object);
}
"claim" => {
info!("Found claim: {}", object.id);
self.index_claim(&object);
}
kind => debug!("Found unknown kind {:?}", kind),
},
Some(_) => {
info!("Object has dhstore_kind with non-string value");
}
None => {}
}
}
// Now inserts the object
self.objects.insert(object.id.clone(), object);
}
fn index_permanode(&mut self, permanode: &Object) {
// Validate the permanode
let ref id = permanode.id;
let permanode = match permanode.data {
ObjectData::Dict(ref d) => d,
ObjectData::List(_) => {
panic!("Invalid permanode {}: not a dict", id);
}
};
match permanode.get("random") {
Some(&Property::String(ref s)) => {
if s.len() != HASH_STR_SIZE {
warn!("Invalid permanode {}: invalid random size {}",
id, s.len());
return;
}
}
_ => {
warn!("Invalid permanode {}: missing random", id);
return;
}
}
let sort = match permanode.get("sort") {
Some(&Property::String(ref s)) => match s.parse() {
Ok(f)
|
insert_into_multimap
|
identifier_name
|
memory_index.rs
|
claims: BTreeMap<Property, ID>,
}
impl Permanode {
fn index_claim(&mut self, claim: &Dict, permanode_id: &ID, claim_id: &ID) {
// We require the claim to have the sort key
let sort_value: &Property = match claim.get(self.sort.field()) {
Some(ref prop) => prop,
None => {
debug!("Claim {} is invalid for permanode {}: \
missing sort key",
claim_id, permanode_id);
return;
}
};
// Currently, no validation is done; every claim is accepted
// In the future, we'd have ways of checking a claim, such as public
// key signatures (permanode has key, claim has signature)
self.claims.insert(sort_value.clone(), claim_id.clone());
match self.nodetype {
PermanodeType::Set => {
// Keep the whole set of values
// TODO: handle set deletion claims
}
PermanodeType::Single => {
// Keep one value, the latest by sorting order
if self.claims.len() > 1
|
}
}
}
}
fn insert_into_multimap<K: Clone + Eq + ::std::hash::Hash,
V: Eq + ::std::hash::Hash>(
multimap: &mut HashMap<K, HashSet<V>>,
key: &K,
value: V)
{
if let Some(set) = multimap.get_mut(key) {
set.insert(value);
return;
}
let mut set = HashSet::new();
set.insert(value);
multimap.insert(key.clone(), set);
}
/// The in-memory index, that loads all objects from the disk on startup.
pub struct MemoryIndex {
/// Directory where objects are stored on disk.
path: PathBuf,
/// All objects, indexed by their ID.
objects: HashMap<ID, Object>,
/// Back references: value is all references pointing to the key.
backlinks: HashMap<ID, HashSet<(Backkey, ID)>>,
/// All claim objects, whether they are valid for permanode or not.
claims: HashMap<ID, HashSet<ID>>,
/// All permanodes, with valid associated claims.
permanodes: HashMap<ID, Permanode>,
root: ID,
log: Option<ID>,
policy: Box<dyn Policy>,
}
impl MemoryIndex {
/// Reads all the objects from a directory into memory.
pub fn open<P: AsRef<Path>>(path: P, root: ID)
-> errors::Result<MemoryIndex>
{
let path = path.as_ref();
let mut index = MemoryIndex {
path: path.to_path_buf(),
objects: HashMap::new(),
backlinks: HashMap::new(),
claims: HashMap::new(),
permanodes: HashMap::new(),
root: root.clone(),
log: None,
policy: Box::new(KeepPolicy::new()),
};
let dirlist = path.read_dir()
.map_err(|e| ("Error listing objects directory", e))?;
for first in dirlist {
let first = first
.map_err(|e| ("Error listing objects directory", e))?;
let dirlist = first.path().read_dir()
.map_err(|e| ("Error listing objects subdirectory", e))?;
for second in dirlist {
let second = second
.map_err(|e| ("Error listing objects subdirectory", e))?;
let filename = second.path();
// Read object
let fp = File::open(filename)
.map_err(|e| ("Error opening object", e))?;
let object = match serialize::deserialize(fp) {
Err(e) => {
let mut path: PathBuf = first.file_name().into();
path.push(second.file_name());
error!("Error deserializing object: {:?}", path);
return Err(("Error deserializing object", e).into());
}
Ok(o) => o,
};
index.insert_object_in_index(object);
}
}
// Parse root config
index.log = {
let config = index.get_object(&root)?
.ok_or(Error::CorruptedStore("Missing root object"))?;
let config = match config.data {
ObjectData::Dict(ref dict) => dict,
_ => return Err(Error::CorruptedStore(
"Root object is not a dict")),
};
match config.get("log") {
Some(&Property::Reference(ref id)) => {
let log_obj = index.get_object(id)?
.ok_or(Error::CorruptedStore("Missing log object"))?;
match log_obj.data {
ObjectData::Dict(_) => {
debug!("Activated log: {}", id);
}
_ => {
return Err(Error::CorruptedStore(
"Log is not a permanode"));
}
}
Some(id.clone())
}
Some(_) => return Err(Error::CorruptedStore(
"Log is not a reference")),
None => None,
}
};
Ok(index)
}
pub fn create<'a, P: AsRef<Path>, I: Iterator<Item=&'a Object>>(
path: P, objects: I)
-> io::Result<()>
{
for object in objects {
MemoryIndex::write_object(path.as_ref(), object)?;
}
Ok(())
}
fn write_object(dir: &Path, object: &Object) -> io::Result<()> {
let hashstr = object.id.str();
let mut path = dir.join(&hashstr[..4]);
if !path.exists() {
fs::create_dir(&path)?;
}
path.push(&hashstr[4..]);
let mut fp = OpenOptions::new()
.write(true)
.create_new(true)
.open(&path)?;
serialize::serialize(&mut fp, object)
}
/// Utility to insert a new object in the store.
///
/// Insert the object, indexing the back references, and parsing the object
/// to handle permanodes.
fn insert_object_in_index(&mut self, object: Object) {
assert!(!self.objects.contains_key(&object.id));
{
// Record reverse references
// This is run on all values of type reference on the object,
// whether it is a list or a dict
let mut insert = |target: &ID, key: Backkey, source: ID| {
if log_enabled!(Level::Debug) {
match key {
Backkey::Key(ref k) => {
debug!("Reference {} -> {} ({})",
source, target, k);
}
Backkey::Index(i) => {
debug!("Reference {} -> {} ({})",
source, target, i);
}
}
}
// Add backlink
insert_into_multimap(&mut self.backlinks,
target, (key, source));
};
// Go over the object, calling insert() above on all its values of
// type reference
match object.data {
ObjectData::Dict(ref dict) => {
for (k, v) in dict {
if let Property::Reference(ref id) = *v {
insert(id,
Backkey::Key(k.clone()),
object.id.clone());
}
}
}
ObjectData::List(ref list) => {
for (k, v) in list.into_iter().enumerate() {
if let Property::Reference(ref id) = *v {
insert(id,
Backkey::Index(k),
object.id.clone());
}
}
}
}
}
// Check for special objects
if let ObjectData::Dict(ref dict) = object.data {
match dict.get("dhstore_kind") {
Some(&Property::String(ref kind)) => match kind as &str {
"permanode" => {
info!("Found permanode: {}", object.id);
self.index_permanode(&object);
}
"claim" => {
info!("Found claim: {}", object.id);
self.index_claim(&object);
}
kind => debug!("Found unknown kind {:?}", kind),
},
Some(_) => {
info!("Object has dhstore_kind with non-string value");
}
None => {}
}
}
// Now inserts the object
self.objects.insert(object.id.clone(), object);
}
fn index_permanode(&mut self, permanode: &Object) {
// Validate the permanode
let ref id = permanode.id;
let permanode = match permanode.data {
ObjectData::Dict(ref d) => d,
ObjectData::List(_) => {
panic!("Invalid permanode {}: not a dict", id);
}
};
match permanode.get("random") {
Some(&Property::String(ref s)) => {
if s.len() != HASH_STR_SIZE {
warn!("Invalid permanode {}: invalid random size {}",
id, s.len());
return;
}
}
_ => {
warn!("Invalid permanode {}: missing random", id);
return;
}
}
let sort = match permanode.get("sort") {
Some(&Property::String(ref s)) => match s.parse() {
Ok(f
|
{
let mut map = BTreeMap::new();
swap(&mut self.claims, &mut map);
let mut map = map.into_iter();
let (k, v) = match self.sort {
Sort::Ascending(_) => map.next_back().unwrap(),
Sort::Descending(_) => map.next().unwrap(),
};
self.claims.insert(k, v);
}
|
conditional_block
|
policy.go
|
nil {
return err
}
}
for j, _ := range policyDoc.Ingress {
for i, _ := range policyDoc.Ingress[j].Rules {
rule := &policyDoc.Ingress[j].Rules[i]
rule.Protocol = strings.ToUpper(rule.Protocol)
}
for i, _ := range policyDoc.Ingress[j].Peers {
endpoint := &policyDoc.Ingress[j].Peers[i]
err = policy.augmentEndpoint(endpoint)
if err != nil {
return err
}
}
}
return nil
}
// distributePolicy distributes policy to all agents.
// TODO how should error handling work here really?
func (policy *PolicySvc) distributePolicy(policyDoc *common.Policy) error {
hosts, err := policy.client.ListHosts()
if err != nil {
return err
}
errStr := make([]string, 0)
for _, host := range hosts {
// TODO make schema configurable
url := fmt.Sprintf("http://%s:%d/policies", host.Ip, host.AgentPort)
log.Printf("Sending policy %s to agent at %s", policyDoc.Name, url)
result := make(map[string]interface{})
err = policy.client.Post(url, policyDoc, &result)
log.Printf("Agent at %s returned %v", host.Ip, result)
if err != nil {
errStr = append(errStr, fmt.Sprintf("Error applying policy %d to host %s: %v. ", policyDoc.ID, host.Ip, err))
}
}
if len(errStr) > 0 {
return common.NewError500(errStr)
}
return nil
}
func (policy *PolicySvc) getPolicy(input interface{}, ctx common.RestContext) (interface{}, error) {
idStr := ctx.PathVariables["policyID"]
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
return nil, common.NewError404("policy", idStr)
}
policyDoc, err := policy.store.getPolicy(id, false)
log.Printf("Found policy for ID %d: %s (%v)", id, policyDoc, err)
return policyDoc, err
}
func (policy *PolicySvc) deletePolicyHandler(input interface{}, ctx common.RestContext) (interface{}, error) {
idStr := strings.TrimSpace(ctx.PathVariables["policyID"])
if idStr == "" {
if input == nil {
return nil, common.NewError400("Request must either be to /policies/{policyID} or have a body.")
}
policyDoc := input.(*common.Policy)
err := policyDoc.Validate()
if err != nil {
return nil, err
}
log.Printf("IN deletePolicyHandler with %v", policyDoc)
id, err := policy.store.lookupPolicy(policyDoc.ExternalID)
if err != nil {
// TODO
// Important! This should really be done in policy agent.
// Only done here as temporary measure.
externalId := makeId(policyDoc.AppliedTo, policyDoc.Name)
log.Printf("Constructing internal policy name = %s", externalId)
policyDoc.ExternalID = externalId
id, err = policy.store.lookupPolicy(policyDoc.ExternalID)
}
log.Printf("Found %d / %v (%T) from external ID %s", id, err, err, policyDoc.ExternalID)
if err != nil {
return nil, err
}
return policy.deletePolicy(id)
} else {
if input != nil {
common.NewError400("Request must either be to /policies/{policyID} or have a body.")
}
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
return nil, common.NewError404("policy", idStr)
}
return policy.deletePolicy(id)
}
}
// deletePolicy deletes policy based the following algorithm:
//1. Mark the policy as "deleted" in the backend store.
func (policy *PolicySvc) deletePolicy(id uint64) (interface{}, error) {
// TODO do we need this to be transactional or not ... case can be made for either.
err := policy.store.inactivatePolicy(id)
if err != nil {
return nil, err
}
policyDoc, err := policy.store.getPolicy(id, true)
log.Printf("Found policy for ID %d: %s (%v)", id, policyDoc, err)
if err != nil {
return nil, err
}
hosts, err := policy.client.ListHosts()
if err != nil {
return nil, err
}
if policyDoc.ExternalID == "" {
// TODO
// Important! This should really be done in policy agent.
// Only done here as temporary measure.
externalId := makeId(policyDoc.AppliedTo, policyDoc.Name)
log.Printf("Constructing internal policy name = %s", externalId)
policyDoc.ExternalID = externalId
}
errStr := make([]string, 0)
for _, host := range hosts {
// TODO make schema configurable
url := fmt.Sprintf("http://%s:%d/policies", host.Ip, host.AgentPort)
result := make(map[string]interface{})
err = policy.client.Delete(url, policyDoc, result)
log.Printf("Agent at %s returned %v", host.Ip, result)
if err != nil {
errStr = append(errStr, fmt.Sprintf("Error deleting policy %d (%s) from host %s: %v. ", id, policyDoc.Name, host.Ip, err))
}
}
if len(errStr) > 0 {
return nil, common.NewError500(errStr)
}
err = policy.store.deletePolicy(id)
if err != nil {
return nil, err
}
policyDoc.Datacenter = nil
return policyDoc, nil
}
// listPolicies lists all policices.
func (policy *PolicySvc) listPolicies(input interface{}, ctx common.RestContext) (interface{}, error) {
policies, err := policy.store.listPolicies()
if err != nil {
return nil, err
}
for i, _ := range policies {
policies[i].Datacenter = nil
}
return policies, nil
}
// findPolicyByName returns the first policy found corresponding
// to the given policy name. Policy names are not unique unlike
// policy ID's.
func (policy *PolicySvc) findPolicyByName(input interface{}, ctx common.RestContext) (interface{}, error) {
nameStr := ctx.PathVariables["policyName"]
log.Printf("In findPolicy(%s)\n", nameStr)
if nameStr == "" {
return nil, common.NewError500(fmt.Sprintf("Expected policy name, got %s", nameStr))
}
policyDoc, err := policy.store.findPolicyByName(nameStr)
if err != nil {
return nil, err
}
policyDoc.Datacenter = nil
return policyDoc, nil
}
// addPolicy stores the new policy and sends it to all agents.
func (policy *PolicySvc) addPolicy(input interface{}, ctx common.RestContext) (interface{}, error) {
policyDoc := input.(*common.Policy)
log.Printf("addPolicy(): Request for a new policy to be added: %s", policyDoc.Name)
err := policyDoc.Validate()
if err != nil {
log.Printf("addPolicy(): Error validating: %v", err)
return nil, err
}
log.Printf("addPolicy(): Request for a new policy to be added: %v", policyDoc)
err = policy.augmentPolicy(policyDoc)
if err != nil {
log.Printf("addPolicy(): Error augmenting: %v", err)
return nil, err
}
// Save it
err = policy.store.addPolicy(policyDoc)
if err != nil {
log.Printf("addPolicy(): Error storing: %v", err)
return nil, err
}
log.Printf("addPolicy(): Stored policy %s", policyDoc.Name)
err = policy.distributePolicy(policyDoc)
if err != nil {
log.Printf("addPolicy(): Error distributing: %v", err)
return nil, err
}
policyDoc.Datacenter = nil
return policyDoc, nil
}
// Name provides name of this service.
func (policy *PolicySvc) Name() string {
return "policy"
}
// SetConfig implements SetConfig function of the Service interface.
// Returns an error if cannot connect to the data store
func (policy *PolicySvc) SetConfig(config common.ServiceConfig) error {
// TODO this is a copy-paste of topology service, to refactor
log.Println(config)
policy.config = config
// storeConfig := config.ServiceSpecific["store"].(map[string]interface{})
log.Printf("Policy port: %d", config.Common.Api.Port)
policy.store = policyStore{}
storeConfig := config.ServiceSpecific["store"].(map[string]interface{})
policy.store.ServiceStore = &policy.store
return policy.store.SetConfig(storeConfig)
}
func (policy *PolicySvc) CreateSchema(overwrite bool) error {
return policy.store.CreateSchema(overwrite)
}
func (policy *PolicySvc) Initialize(client *common.RestClient) error
|
{
log.Println("Entering policy.Initialize()")
err := policy.store.Connect()
if err != nil {
return err
}
policy.client = client
return nil
}
|
identifier_body
|
|
policy.go
|
},
common.Route{
Method: "GET",
Pattern: findPath + policiesPath + "/{policyName}",
Handler: policy.findPolicyByName,
},
}
return routes
}
// augmentEndpoint augments the endpoint provided with appropriate information
// by looking it up in the appropriate service.
func (policy *PolicySvc) augmentEndpoint(endpoint *common.Endpoint) error {
tenantSvcUrl, err := policy.client.GetServiceUrl("tenant")
if err != nil {
return err
}
if endpoint.Peer == common.Wildcard {
// If a wildcard is specfied, there is nothing to augment
return nil
}
log.Printf("Policy: Augmenting %#v", endpoint)
// Code below tries to resolve tenant name into tenant_network_id if possible.
//
// TODO this will have to be changed once we implement
// https://paninetworks.kanbanize.com/ctrl_board/3/cards/319/details
ten := &tenant.Tenant{}
if endpoint.TenantNetworkID == nil {
if endpoint.TenantID != 0 {
tenantIDToUse := strconv.FormatUint(endpoint.TenantID, 10)
tenantsUrl := fmt.Sprintf("%s/tenants/%s", tenantSvcUrl, tenantIDToUse)
log.Printf("Policy: Looking tenant up at %s", tenantsUrl)
err = policy.client.Get(tenantsUrl, ten)
if err != nil
|
endpoint.TenantNetworkID = &ten.NetworkID
} else if endpoint.TenantExternalID != "" || endpoint.TenantName != "" {
if endpoint.TenantExternalID != "" {
ten.ExternalID = endpoint.TenantExternalID
}
if endpoint.TenantName != "" {
ten.Name = endpoint.TenantName
}
err = policy.client.Find(ten, common.FindLast)
if err != nil {
return err
}
endpoint.TenantNetworkID = &ten.NetworkID
}
}
if endpoint.SegmentNetworkID == nil {
if ten == nil && (endpoint.SegmentID != 0 || endpoint.SegmentExternalID != "" || endpoint.SegmentName != "") {
return common.NewError400("No tenant information specified, cannot look up segment.")
}
segment := &tenant.Segment{}
if endpoint.SegmentID != 0 {
segmentIDToUse := strconv.FormatUint(endpoint.SegmentID, 10)
segmentsUrl := fmt.Sprintf("%s/tenants/%d/segments/%s", tenantSvcUrl, ten.ID, segmentIDToUse)
log.Printf("Policy: Looking segment up at %s for %#v", segmentsUrl, endpoint)
err = policy.client.Get(segmentsUrl, &segment)
if err != nil {
return err
}
endpoint.SegmentNetworkID = &segment.NetworkID
} else if endpoint.SegmentExternalID != "" || endpoint.SegmentName != "" {
segmentsUrl := fmt.Sprintf("%s/findLast/segments?tenant_id=%d&", tenantSvcUrl, ten.ID)
if endpoint.SegmentExternalID != "" {
segmentsUrl += "external_id=" + endpoint.TenantExternalID + "&"
}
if endpoint.SegmentName != "" {
segmentsUrl += "name=" + endpoint.SegmentName
}
log.Printf("Policy: Finding segments at %s for %#v (Tenant %#v %t)", segmentsUrl, endpoint, ten, ten == nil)
err = policy.client.Get(segmentsUrl, &segment)
if err != nil {
return err
}
endpoint.SegmentNetworkID = &segment.NetworkID
}
}
return nil
}
// augmentPolicy augments the provided policy with information gotten from
// various services.
func (policy *PolicySvc) augmentPolicy(policyDoc *common.Policy) error {
// Get info from topology service
log.Printf("Augmenting policy %s", policyDoc.Name)
if policyDoc.ExternalID != "" {
// TODO
// Important! This should really be done in policy agent.
// Only done here as temporary measure.
externalId := makeId(policyDoc.AppliedTo, policyDoc.Name)
log.Printf("Constructing internal policy name = %s", externalId)
policyDoc.ExternalID = externalId
}
topoUrl, err := policy.client.GetServiceUrl("topology")
if err != nil {
return err
}
// Query topology for data center information
// TODO move this to root
index := common.IndexResponse{}
err = policy.client.Get(topoUrl, &index)
if err != nil {
return err
}
dcURL := index.Links.FindByRel("datacenter")
dc := &common.Datacenter{}
err = policy.client.Get(dcURL, dc)
if err != nil {
return err
}
log.Printf("Policy server received datacenter information from topology service: %+v\n", dc)
policyDoc.Datacenter = dc
for i, _ := range policyDoc.AppliedTo {
endpoint := &policyDoc.AppliedTo[i]
err = policy.augmentEndpoint(endpoint)
if err != nil {
return err
}
}
for j, _ := range policyDoc.Ingress {
for i, _ := range policyDoc.Ingress[j].Rules {
rule := &policyDoc.Ingress[j].Rules[i]
rule.Protocol = strings.ToUpper(rule.Protocol)
}
for i, _ := range policyDoc.Ingress[j].Peers {
endpoint := &policyDoc.Ingress[j].Peers[i]
err = policy.augmentEndpoint(endpoint)
if err != nil {
return err
}
}
}
return nil
}
// distributePolicy distributes policy to all agents.
// TODO how should error handling work here really?
func (policy *PolicySvc) distributePolicy(policyDoc *common.Policy) error {
hosts, err := policy.client.ListHosts()
if err != nil {
return err
}
errStr := make([]string, 0)
for _, host := range hosts {
// TODO make schema configurable
url := fmt.Sprintf("http://%s:%d/policies", host.Ip, host.AgentPort)
log.Printf("Sending policy %s to agent at %s", policyDoc.Name, url)
result := make(map[string]interface{})
err = policy.client.Post(url, policyDoc, &result)
log.Printf("Agent at %s returned %v", host.Ip, result)
if err != nil {
errStr = append(errStr, fmt.Sprintf("Error applying policy %d to host %s: %v. ", policyDoc.ID, host.Ip, err))
}
}
if len(errStr) > 0 {
return common.NewError500(errStr)
}
return nil
}
func (policy *PolicySvc) getPolicy(input interface{}, ctx common.RestContext) (interface{}, error) {
idStr := ctx.PathVariables["policyID"]
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
return nil, common.NewError404("policy", idStr)
}
policyDoc, err := policy.store.getPolicy(id, false)
log.Printf("Found policy for ID %d: %s (%v)", id, policyDoc, err)
return policyDoc, err
}
func (policy *PolicySvc) deletePolicyHandler(input interface{}, ctx common.RestContext) (interface{}, error) {
idStr := strings.TrimSpace(ctx.PathVariables["policyID"])
if idStr == "" {
if input == nil {
return nil, common.NewError400("Request must either be to /policies/{policyID} or have a body.")
}
policyDoc := input.(*common.Policy)
err := policyDoc.Validate()
if err != nil {
return nil, err
}
log.Printf("IN deletePolicyHandler with %v", policyDoc)
id, err := policy.store.lookupPolicy(policyDoc.ExternalID)
if err != nil {
// TODO
// Important! This should really be done in policy agent.
// Only done here as temporary measure.
externalId := makeId(policyDoc.AppliedTo, policyDoc.Name)
log.Printf("Constructing internal policy name = %s", externalId)
policyDoc.ExternalID = externalId
id, err = policy.store.lookupPolicy(policyDoc.ExternalID)
}
log.Printf("Found %d / %v (%T) from external ID %s", id, err, err, policyDoc.ExternalID)
if err != nil {
return nil, err
}
return policy.deletePolicy(id)
} else {
if input != nil {
common.NewError400("Request must either be to /policies/{policyID} or have a body.")
}
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
return nil, common.NewError404("policy", idStr)
}
return policy.deletePolicy(id)
}
}
// deletePolicy deletes policy based the following algorithm:
//1. Mark the policy as "deleted" in the backend store.
func (policy *PolicySvc) deletePolicy(id uint6
|
{
return err
}
|
conditional_block
|
policy.go
|
center = dc
for i, _ := range policyDoc.AppliedTo {
endpoint := &policyDoc.AppliedTo[i]
err = policy.augmentEndpoint(endpoint)
if err != nil {
return err
}
}
for j, _ := range policyDoc.Ingress {
for i, _ := range policyDoc.Ingress[j].Rules {
rule := &policyDoc.Ingress[j].Rules[i]
rule.Protocol = strings.ToUpper(rule.Protocol)
}
for i, _ := range policyDoc.Ingress[j].Peers {
endpoint := &policyDoc.Ingress[j].Peers[i]
err = policy.augmentEndpoint(endpoint)
if err != nil {
return err
}
}
}
return nil
}
// distributePolicy distributes policy to all agents.
// TODO how should error handling work here really?
func (policy *PolicySvc) distributePolicy(policyDoc *common.Policy) error {
hosts, err := policy.client.ListHosts()
if err != nil {
return err
}
errStr := make([]string, 0)
for _, host := range hosts {
// TODO make schema configurable
url := fmt.Sprintf("http://%s:%d/policies", host.Ip, host.AgentPort)
log.Printf("Sending policy %s to agent at %s", policyDoc.Name, url)
result := make(map[string]interface{})
err = policy.client.Post(url, policyDoc, &result)
log.Printf("Agent at %s returned %v", host.Ip, result)
if err != nil {
errStr = append(errStr, fmt.Sprintf("Error applying policy %d to host %s: %v. ", policyDoc.ID, host.Ip, err))
}
}
if len(errStr) > 0 {
return common.NewError500(errStr)
}
return nil
}
func (policy *PolicySvc) getPolicy(input interface{}, ctx common.RestContext) (interface{}, error) {
idStr := ctx.PathVariables["policyID"]
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
return nil, common.NewError404("policy", idStr)
}
policyDoc, err := policy.store.getPolicy(id, false)
log.Printf("Found policy for ID %d: %s (%v)", id, policyDoc, err)
return policyDoc, err
}
func (policy *PolicySvc) deletePolicyHandler(input interface{}, ctx common.RestContext) (interface{}, error) {
idStr := strings.TrimSpace(ctx.PathVariables["policyID"])
if idStr == "" {
if input == nil {
return nil, common.NewError400("Request must either be to /policies/{policyID} or have a body.")
}
policyDoc := input.(*common.Policy)
err := policyDoc.Validate()
if err != nil {
return nil, err
}
log.Printf("IN deletePolicyHandler with %v", policyDoc)
id, err := policy.store.lookupPolicy(policyDoc.ExternalID)
if err != nil {
// TODO
// Important! This should really be done in policy agent.
// Only done here as temporary measure.
externalId := makeId(policyDoc.AppliedTo, policyDoc.Name)
log.Printf("Constructing internal policy name = %s", externalId)
policyDoc.ExternalID = externalId
id, err = policy.store.lookupPolicy(policyDoc.ExternalID)
}
log.Printf("Found %d / %v (%T) from external ID %s", id, err, err, policyDoc.ExternalID)
if err != nil {
return nil, err
}
return policy.deletePolicy(id)
} else {
if input != nil {
common.NewError400("Request must either be to /policies/{policyID} or have a body.")
}
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
return nil, common.NewError404("policy", idStr)
}
return policy.deletePolicy(id)
}
}
// deletePolicy deletes policy based the following algorithm:
//1. Mark the policy as "deleted" in the backend store.
func (policy *PolicySvc) deletePolicy(id uint64) (interface{}, error) {
// TODO do we need this to be transactional or not ... case can be made for either.
err := policy.store.inactivatePolicy(id)
if err != nil {
return nil, err
}
policyDoc, err := policy.store.getPolicy(id, true)
log.Printf("Found policy for ID %d: %s (%v)", id, policyDoc, err)
if err != nil {
return nil, err
}
hosts, err := policy.client.ListHosts()
if err != nil {
return nil, err
}
if policyDoc.ExternalID == "" {
// TODO
// Important! This should really be done in policy agent.
// Only done here as temporary measure.
externalId := makeId(policyDoc.AppliedTo, policyDoc.Name)
log.Printf("Constructing internal policy name = %s", externalId)
policyDoc.ExternalID = externalId
}
errStr := make([]string, 0)
for _, host := range hosts {
// TODO make schema configurable
url := fmt.Sprintf("http://%s:%d/policies", host.Ip, host.AgentPort)
result := make(map[string]interface{})
err = policy.client.Delete(url, policyDoc, result)
log.Printf("Agent at %s returned %v", host.Ip, result)
if err != nil {
errStr = append(errStr, fmt.Sprintf("Error deleting policy %d (%s) from host %s: %v. ", id, policyDoc.Name, host.Ip, err))
}
}
if len(errStr) > 0 {
return nil, common.NewError500(errStr)
}
err = policy.store.deletePolicy(id)
if err != nil {
return nil, err
}
policyDoc.Datacenter = nil
return policyDoc, nil
}
// listPolicies lists all policices.
func (policy *PolicySvc) listPolicies(input interface{}, ctx common.RestContext) (interface{}, error) {
policies, err := policy.store.listPolicies()
if err != nil {
return nil, err
}
for i, _ := range policies {
policies[i].Datacenter = nil
}
return policies, nil
}
// findPolicyByName returns the first policy found corresponding
// to the given policy name. Policy names are not unique unlike
// policy ID's.
func (policy *PolicySvc) findPolicyByName(input interface{}, ctx common.RestContext) (interface{}, error) {
nameStr := ctx.PathVariables["policyName"]
log.Printf("In findPolicy(%s)\n", nameStr)
if nameStr == "" {
return nil, common.NewError500(fmt.Sprintf("Expected policy name, got %s", nameStr))
}
policyDoc, err := policy.store.findPolicyByName(nameStr)
if err != nil {
return nil, err
}
policyDoc.Datacenter = nil
return policyDoc, nil
}
// addPolicy stores the new policy and sends it to all agents.
func (policy *PolicySvc) addPolicy(input interface{}, ctx common.RestContext) (interface{}, error) {
policyDoc := input.(*common.Policy)
log.Printf("addPolicy(): Request for a new policy to be added: %s", policyDoc.Name)
err := policyDoc.Validate()
if err != nil {
log.Printf("addPolicy(): Error validating: %v", err)
return nil, err
}
log.Printf("addPolicy(): Request for a new policy to be added: %v", policyDoc)
err = policy.augmentPolicy(policyDoc)
if err != nil {
log.Printf("addPolicy(): Error augmenting: %v", err)
return nil, err
}
// Save it
err = policy.store.addPolicy(policyDoc)
if err != nil {
log.Printf("addPolicy(): Error storing: %v", err)
return nil, err
}
log.Printf("addPolicy(): Stored policy %s", policyDoc.Name)
err = policy.distributePolicy(policyDoc)
if err != nil {
log.Printf("addPolicy(): Error distributing: %v", err)
return nil, err
}
policyDoc.Datacenter = nil
return policyDoc, nil
}
// Name provides name of this service.
func (policy *PolicySvc) Name() string {
return "policy"
}
// SetConfig implements SetConfig function of the Service interface.
// Returns an error if cannot connect to the data store
func (policy *PolicySvc) SetConfig(config common.ServiceConfig) error {
// TODO this is a copy-paste of topology service, to refactor
log.Println(config)
policy.config = config
// storeConfig := config.ServiceSpecific["store"].(map[string]interface{})
log.Printf("Policy port: %d", config.Common.Api.Port)
policy.store = policyStore{}
storeConfig := config.ServiceSpecific["store"].(map[string]interface{})
policy.store.ServiceStore = &policy.store
return policy.store.SetConfig(storeConfig)
}
func (policy *PolicySvc) CreateSchema(overwrite bool) error {
return policy.store.CreateSchema(overwrite)
}
func (policy *PolicySvc)
|
Initialize
|
identifier_name
|
|
policy.go
|
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package policy
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"log"
"strconv"
"strings"
"github.com/romana/core/common"
"github.com/romana/core/tenant"
)
// PolicySvc provides Policy service.
type PolicySvc struct {
client *common.RestClient
config common.ServiceConfig
store policyStore
}
const (
infoListPath = "/info"
findPath = "/find"
policiesPath = "/policies"
policyNameQueryVar = "policyName"
)
func (policy *PolicySvc) Routes() common.Routes {
routes := common.Routes{
common.Route{
Method: "POST",
Pattern: policiesPath,
Handler: policy.addPolicy,
MakeMessage: func() interface{} { return &common.Policy{} },
UseRequestToken: false,
},
common.Route{
Method: "DELETE",
Pattern: policiesPath,
Handler: policy.deletePolicyHandler,
MakeMessage: func() interface{} { return &common.Policy{} },
UseRequestToken: false,
},
common.Route{
Method: "DELETE",
Pattern: policiesPath + "/{policyID}",
Handler: policy.deletePolicyHandler,
MakeMessage: func() interface{} { return &common.Policy{} },
UseRequestToken: false,
},
common.Route{
Method: "GET",
Pattern: policiesPath,
Handler: policy.listPolicies,
MakeMessage: nil,
UseRequestToken: false,
},
common.Route{
Method: "GET",
Pattern: policiesPath + "/{policyID}",
Handler: policy.getPolicy,
MakeMessage: nil,
UseRequestToken: false,
},
common.Route{
Method: "GET",
Pattern: findPath + policiesPath + "/{policyName}",
Handler: policy.findPolicyByName,
},
}
return routes
}
// augmentEndpoint augments the endpoint provided with appropriate information
// by looking it up in the appropriate service.
func (policy *PolicySvc) augmentEndpoint(endpoint *common.Endpoint) error {
tenantSvcUrl, err := policy.client.GetServiceUrl("tenant")
if err != nil {
return err
}
if endpoint.Peer == common.Wildcard {
// If a wildcard is specfied, there is nothing to augment
return nil
}
log.Printf("Policy: Augmenting %#v", endpoint)
// Code below tries to resolve tenant name into tenant_network_id if possible.
//
// TODO this will have to be changed once we implement
// https://paninetworks.kanbanize.com/ctrl_board/3/cards/319/details
ten := &tenant.Tenant{}
if endpoint.TenantNetworkID == nil {
if endpoint.TenantID != 0 {
tenantIDToUse := strconv.FormatUint(endpoint.TenantID, 10)
tenantsUrl := fmt.Sprintf("%s/tenants/%s", tenantSvcUrl, tenantIDToUse)
log.Printf("Policy: Looking tenant up at %s", tenantsUrl)
err = policy.client.Get(tenantsUrl, ten)
if err != nil {
return err
}
endpoint.TenantNetworkID = &ten.NetworkID
} else if endpoint.TenantExternalID != "" || endpoint.TenantName != "" {
if endpoint.TenantExternalID != "" {
ten.ExternalID = endpoint.TenantExternalID
}
if endpoint.TenantName != "" {
ten.Name = endpoint.TenantName
}
err = policy.client.Find(ten, common.FindLast)
if err != nil {
return err
}
endpoint.TenantNetworkID = &ten.NetworkID
}
}
if endpoint.SegmentNetworkID == nil {
if ten == nil && (endpoint.SegmentID != 0 || endpoint.SegmentExternalID != "" || endpoint.SegmentName != "") {
return common.NewError400("No tenant information specified, cannot look up segment.")
}
segment := &tenant.Segment{}
if endpoint.SegmentID != 0 {
segmentIDToUse := strconv.FormatUint(endpoint.SegmentID, 10)
segmentsUrl := fmt.Sprintf("%s/tenants/%d/segments/%s", tenantSvcUrl, ten.ID, segmentIDToUse)
log.Printf("Policy: Looking segment up at %s for %#v", segmentsUrl, endpoint)
err = policy.client.Get(segmentsUrl, &segment)
if err != nil {
return err
}
endpoint.SegmentNetworkID = &segment.NetworkID
} else if endpoint.SegmentExternalID != "" || endpoint.SegmentName != "" {
segmentsUrl := fmt.Sprintf("%s/findLast/segments?tenant_id=%d&", tenantSvcUrl, ten.ID)
if endpoint.SegmentExternalID != "" {
segmentsUrl += "external_id=" + endpoint.TenantExternalID + "&"
}
if endpoint.SegmentName != "" {
segmentsUrl += "name=" + endpoint.SegmentName
}
log.Printf("Policy: Finding segments at %s for %#v (Tenant %#v %t)", segmentsUrl, endpoint, ten, ten == nil)
err = policy.client.Get(segmentsUrl, &segment)
if err != nil {
return err
}
endpoint.SegmentNetworkID = &segment.NetworkID
}
}
return nil
}
// augmentPolicy augments the provided policy with information gotten from
// various services.
func (policy *PolicySvc) augmentPolicy(policyDoc *common.Policy) error {
// Get info from topology service
log.Printf("Augmenting policy %s", policyDoc.Name)
if policyDoc.ExternalID != "" {
// TODO
// Important! This should really be done in policy agent.
// Only done here as temporary measure.
externalId := makeId(policyDoc.AppliedTo, policyDoc.Name)
log.Printf("Constructing internal policy name = %s", externalId)
policyDoc.ExternalID = externalId
}
topoUrl, err := policy.client.GetServiceUrl("topology")
if err != nil {
return err
}
// Query topology for data center information
// TODO move this to root
index := common.IndexResponse{}
err = policy.client.Get(topoUrl, &index)
if err != nil {
return err
}
dcURL := index.Links.FindByRel("datacenter")
dc := &common.Datacenter{}
err = policy.client.Get(dcURL, dc)
if err != nil {
return err
}
log.Printf("Policy server received datacenter information from topology service: %+v\n", dc)
policyDoc.Datacenter = dc
for i, _ := range policyDoc.AppliedTo {
endpoint := &policyDoc.AppliedTo[i]
err = policy.augmentEndpoint(endpoint)
if err != nil {
return err
}
}
for j, _ := range policyDoc.Ingress {
for i, _ := range policyDoc.Ingress[j].Rules {
rule := &policyDoc.Ingress[j].Rules[i]
rule.Protocol = strings.ToUpper(rule.Protocol)
}
for i, _ := range policyDoc.Ingress[j].Peers {
endpoint := &policyDoc.Ingress[j].Peers[i]
err = policy.augmentEndpoint(endpoint)
if err != nil {
return err
}
}
}
return nil
}
// distributePolicy distributes policy to all agents.
// TODO how should error handling work here really?
func (policy *PolicySvc) distributePolicy(policyDoc *common.Policy) error {
hosts, err := policy.client.ListHosts()
if err != nil {
return err
}
errStr := make([]string, 0)
for _, host := range hosts {
// TODO make schema configurable
url := fmt.Sprintf("http://%s:%d/policies", host.Ip, host.AgentPort)
log.Printf("Sending policy %s to agent at %s", policyDoc.Name, url)
result := make(map[string]interface{})
err = policy.client.Post(url, policyDoc, &result)
log.Printf("Agent at %s returned %v", host.Ip, result)
if err != nil {
errStr = append(errStr, fmt.Sprintf("Error applying policy %d to host %s: %v. ", policyDoc.ID, host.Ip, err))
}
}
if len(errStr) > 0 {
return common.NewError500(errStr)
}
return nil
}
func (policy *PolicySvc) getPolicy(input interface{}, ctx common.RestContext) (interface{}, error) {
idStr := ctx.PathVariables["policyID"]
id
|
random_line_split
|
||
views.py
|
, childcid, order_rule):
foodtyps = Foodtype.objects.all()
if childcid == ALL_TYPE:
goods_list = Goods.objects.filter(categoryid=categoryid)
else:
goods_list = Goods.objects.filter(categoryid=categoryid).filter(childcid=childcid)
foodtype = Foodtype.objects.get(typeid=categoryid)
"""
order_rule
0 代表综合排序
1 代表价格升序
2 代表价格降序
3 竞价排名
"""
if order_rule == ORDER_TOTAL:
pass
elif order_rule == PRICE_ASC:
goods_list = goods_list.order_by("price")
elif order_rule == PRICE_DESC:
goods_list = goods_list.order_by("-price")
"""
全部类型:0#进口水果:110#国产水果:120
"""
childtypenames = foodtype.childtypenames
"""
[全部类型:0, 进口水果:110, 国产水果:120]
"""
childtypename_list = childtypenames.split("#")
child_type_list = []
"""
[[全部类型, 0], [进口水果, 110], [国产水果, 120]]
"""
for childtypename in childtypename_list:
child_type_list.append(childtypename.split(":"))
data = {
'title': '闪购',
'foodtypes': foodtyps,
'goods_list': goods_list,
'categoryid': int(categoryid),
'child_type_list': child_type_list,
'childcid': childcid,
'order_rule': order_rule,
}
return render(request, 'market/market.html', context=data)
def cart(request):
userid = request.session.get('user_id')
if not userid:
return redirect(reverse('axf:user_login'))
user = UserModel.objects.get(pk=userid)
cartmodels = user.cartmodel_set.all()
is_all_select = True
total_price = 0
for cartmodel in cartmodels:
if not cartmodel.c_goods_select:
is_all_select = False
else:
total_price += cartmodel.c_goods_num * cartmodel.c_goods.price
data = {
'title': '购物车',
'cartmodels': cartmodels,
'is_all_select': is_all_select,
'total_price': '{:.2f}'.format(total_price)
}
return render(request, 'cart/cart.html', context=data)
def mine(request):
is_login = False
user_id = request.session.get('user_id')
data = {
'title': '我的',
'is_login': is_login,
}
if user_id:
is_login = True
user = UserModel.objects.get(pk=user_id)
data['is_login'] = is_login
data['user_icon'] = '/static/upload/' + user.u_icon.url
data['username'] = user.u_name
ordered_count = OrderModel.objects.filter(o_user=user).filter(o_status=order_status.ORDERED).count()
if ordered_count > 0:
data['ordered_count'] = ordered_count
wait_receive_count = OrderModel.objects.filter(o_user=user).filter(o_status=order_status.PAYED).count()
if wait_receive_count > 0:
data['wait_receive_count'] = wait_receive_count
return render(request, 'mine/mine.html', context=data)
def user_register(request):
if request.method == "GET":
data = {
"title": '用户注册'
}
return render(request, 'user/user_register.html', context=data)
elif request.method == "POST":
username = request.POST.get('u_name')
password = request.POST.get('u_password')
email = request.POST.get('u_email')
icon = request.FILES.get('u_icon')
print(password)
user = UserModel()
user.u_name = username
user.u_email = email
user.u_icon = icon
user.set_password(password)
user.save()
request.session['user_id'] = user.id
send_mail_learn(username, email, user.id)
return redirect(reverse('axf:mine'))
def user_logout(request):
request.session.flush()
return redirect(reverse('axf:mine'))
def check_user(request):
username = request.GET.get("u_name")
# 0 或 1
users = UserModel.objects.filter(u_name=username)
data = {
'status': '200',
'msg': 'ok'
}
if users.exists():
# 801 代表用户已存在
data['status'] = '801'
data['msg'] = 'already exists'
else:
data['msg'] = 'can use'
return JsonResponse(data)
def check_email(request):
email = request.GET.get('u_email')
users = UserModel.objects.filter(u_email=email)
data = {
'status': '200',
'msg': 'ok'
}
if users.exists():
data['status'] = '802'
data['msg'] = 'email already exists'
else:
data['msg'] = 'can use'
return JsonResponse(data)
def user_login(request):
if request.method == "GET":
msg = request.session.get('msg')
data = {
'title': '用户登录',
'msg': msg
}
return render(request, 'user/user_login.html', context=data)
elif request.method == "POST":
username = request.POST.get('u_name')
password = request.POST.get('u_password')
users = UserModel.objects.filter(u_name=username)
if users.exists():
user = users.first()
if user.check_password(password):
if not user.is_active:
request.session['msg'] = '用户未激活'
return redirect(reverse('axf:user_login'))
request.session['user_id'] = user.id
|
# 密码错误
return redirect(reverse('axf:user_login'))
else:
request.session['msg'] = '用户不存在'
# 用户不存在
return redirect(reverse('axf:user_login'))
"""
激活
能找到用户的方式
- 根据用户唯一标识
修改用户状态
"""
def send_mail_learn(username, email, userid):
subject = '爱鲜蜂VIP激活邮件'
message = ""
recipient_list = [email, ]
temp = loader.get_template('user/user_active.html')
token = str(uuid.uuid4())
cache.set(token, userid, timeout=60 * 60)
data = {
'username': username,
'active_url': 'http://127.0.0.1:8001/axf/activeuser/?utoken=%s' % token,
}
html = temp.render(data)
send_mail(subject, message, 'rongjiawei1204@163.com', recipient_list, html_message=html)
def active_user(request):
user_token = request.GET.get('utoken')
user_id = cache.get(user_token)
cache.delete(user_token)
if not user_id:
return HttpResponse("激活已过期,请重新申请激活邮件")
user = UserModel.objects.get(pk=user_id)
user.is_active = True
user.save()
return HttpResponse('用户激活成功')
def add_to_cart(request):
goodsid = request.GET.get('goodsid')
userid = request.session.get('user_id')
print(goodsid)
data = {
'status': '200',
'msg': 'ok'
}
if not userid:
data['status'] = '302'
data['msg'] = 'not login'
else:
# 数据添加
goods = Goods.objects.get(pk=goodsid)
user = UserModel.objects.get(pk=userid)
cartmodels = CartModel.objects.filter(c_goods=goods).filter(c_user=user)
# cartmodels = CartModel.objects.filter(c_goods_id=goodsid).filter(c_user_id=userid)
# 至多也就一个
if cartmodels.exists():
cartmodel = cartmodels.first()
cartmodel.c_goods_num = cartmodel.c_goods_num + 1
cartmodel.save()
else:
cartmodel = CartModel()
cartmodel.c_goods = goods
cartmodel.c_user = user
cartmodel.save()
data['goods_num'] = cartmodel.c_goods_num
return JsonResponse(data)
def sub_to_cart(request):
cartid = request.GET.get('cartid')
cart_model = CartModel.objects.get(pk=cartid)
data = {
'status': '200',
'msg': 'ok'
}
if cart_model.c_goods_num == 1:
cart_model.delete()
data['goods_num'] = 0
else:
cart_model.c_goods_num = cart_model.c_goods_num - 1
cart_model.save()
data['goods_num'] = cart_model.c_goods_num
data['total_price'] = '{:.2f}'.format(calc_total(request.session.get('user_id')))
return JsonResponse(data)
def change_cart_status(request):
carid = request.GET.get('cartid')
cartmodel = CartModel.objects.get(pk=carid)
cartmodel.c_goods_select = not cartmodel.c_goods_select
cartmodel
|
return redirect(reverse('axf:mine'))
else:
request.session['msg'] = '密码错误'
|
conditional_block
|
views.py
|
'user/user_register.html', context=data)
elif request.method == "POST":
username = request.POST.get('u_name')
password = request.POST.get('u_password')
email = request.POST.get('u_email')
icon = request.FILES.get('u_icon')
print(password)
user = UserModel()
user.u_name = username
user.u_email = email
user.u_icon = icon
user.set_password(password)
user.save()
request.session['user_id'] = user.id
send_mail_learn(username, email, user.id)
return redirect(reverse('axf:mine'))
def user_logout(request):
request.session.flush()
return redirect(reverse('axf:mine'))
def check_user(request):
username = request.GET.get("u_name")
# 0 或 1
users = UserModel.objects.filter(u_name=username)
data = {
'status': '200',
'msg': 'ok'
}
if users.exists():
# 801 代表用户已存在
data['status'] = '801'
data['msg'] = 'already exists'
else:
data['msg'] = 'can use'
return JsonResponse(data)
def check_email(request):
email = request.GET.get('u_email')
users = UserModel.objects.filter(u_email=email)
data = {
'status': '200',
'msg': 'ok'
}
if users.exists():
data['status'] = '802'
data['msg'] = 'email already exists'
else:
data['msg'] = 'can use'
return JsonResponse(data)
def user_login(request):
if request.method == "GET":
msg = request.session.get('msg')
data = {
'title': '用户登录',
'msg': msg
}
return render(request, 'user/user_login.html', context=data)
elif request.method == "POST":
username = request.POST.get('u_name')
password = request.POST.get('u_password')
users = UserModel.objects.filter(u_name=username)
if users.exists():
user = users.first()
if user.check_password(password):
if not user.is_active:
request.session['msg'] = '用户未激活'
return redirect(reverse('axf:user_login'))
request.session['user_id'] = user.id
return redirect(reverse('axf:mine'))
else:
request.session['msg'] = '密码错误'
# 密码错误
return redirect(reverse('axf:user_login'))
else:
request.session['msg'] = '用户不存在'
# 用户不存在
return redirect(reverse('axf:user_login'))
"""
激活
能找到用户的方式
- 根据用户唯一标识
修改用户状态
"""
def send_mail_learn(username, email, userid):
subject = '爱鲜蜂VIP激活邮件'
message = ""
recipient_list = [email, ]
temp = loader.get_template('user/user_active.html')
token = str(uuid.uuid4())
cache.set(token, userid, timeout=60 * 60)
data = {
'username': username,
'active_url': 'http://127.0.0.1:8001/axf/activeuser/?utoken=%s' % token,
}
html = temp.render(data)
send_mail(subject, message, 'rongjiawei1204@163.com', recipient_list, html_message=html)
def active_user(request):
user_token = request.GET.get('utoken')
user_id = cache.get(user_token)
cache.delete(user_token)
if not user_id:
return HttpResponse("激活已过期,请重新申请激活邮件")
user = UserModel.objects.get(pk=user_id)
user.is_active = True
user.save()
return HttpResponse('用户激活成功')
def add_to_cart(request):
goodsid = request.GET.get('goodsid')
userid = request.session.get('user_id')
print(goodsid)
data = {
'status': '200',
'msg': 'ok'
}
if not userid:
data['status'] = '302'
data['msg'] = 'not login'
else:
# 数据添加
goods = Goods.objects.get(pk=goodsid)
user = UserModel.objects.get(pk=userid)
cartmodels = CartModel.objects.filter(c_goods=goods).filter(c_user=user)
# cartmodels = CartModel.objects.filter(c_goods_id=goodsid).filter(c_user_id=userid)
# 至多也就一个
if cartmodels.exists():
cartmodel = cartmodels.first()
cartmodel.c_goods_num = cartmodel.c_goods_num + 1
cartmodel.save()
else:
cartmodel = CartModel()
cartmodel.c_goods = goods
cartmodel.c_user = user
cartmodel.save()
data['goods_num'] = cartmodel.c_goods_num
return JsonResponse(data)
def sub_to_cart(request):
cartid = request.GET.get('cartid')
cart_model = CartModel.objects.get(pk=cartid)
data = {
'status': '200',
'msg': 'ok'
}
if cart_model.c_goods_num == 1:
cart_model.delete()
data['goods_num'] = 0
else:
cart_model.c_goods_num = cart_model.c_goods_num - 1
cart_model.save()
data['goods_num'] = cart_model.c_goods_num
data['total_price'] = '{:.2f}'.format(calc_total(request.session.get('user_id')))
return JsonResponse(data)
def change_cart_status(request):
carid = request.GET.get('cartid')
cartmodel = CartModel.objects.get(pk=carid)
cartmodel.c_goods_select = not cartmodel.c_goods_select
cartmodel.save()
is_all_select = True
userid = request.session.get('user_id')
cartmodels = CartModel.objects.filter(c_user_id=userid).filter(c_goods_select=False)
if cartmodels.exists():
is_all_select = False
data = {
'status': '200',
'msg': 'ok',
'is_select': cartmodel.c_goods_select,
'is_all_select': is_all_select,
'total_price': '{:.2f}'.format(calc_total(userid))
}
return JsonResponse(data)
def change_carts_status(request):
carts = request.GET.get('carts')
print(carts)
cart_list = carts.split("#")
print(cart_list)
select = request.GET.get('select')
print(select)
print(type(select))
if select == 'true':
print('选中')
is_select = True
else:
print('未选中')
is_select = False
for cartid in cart_list:
cartmodel = CartModel.objects.get(pk=cartid)
cartmodel.c_goods_select = is_select
cartmodel.save()
data = {
'msg': 'ok',
'status': '200',
'total_price': '{:.2f}'.format(calc_total(request.session.get('user_id')))
}
return JsonResponse(data)
def calc_total(user_id):
cartmodels = CartModel.objects.filter(c_user_id=user_id).filter(c_goods_select=True)
total_price = 0
for cartmodel in cartmodels:
total_price += cartmodel.c_goods_num * cartmodel.c_goods.price
return total_price
def make_order(request):
carts = request.GET.get('carts')
cart_list = carts.split('#')
print(cart_list)
"""
从购物车到订单
- 从购物车中查出需要下单的数据
- 生成一个订单
- 生成订单商品信息(购物车表中导出)
- 删除购物车数据
"""
userid = request.session.get('user_id')
# 生成订单
order = OrderModel()
# 订单绑定用户
order.o_user_id = userid
order.save()
# 导数据
for cartid in cart_list:
# 查出购物车中欲购买数据
cartmodel = CartModel.objects.get(pk=cartid)
# 生成要买的订单数据
ordergoods = OrderGoods()
# 指定订单
ordergoods.o_order = order
# 设置相关信息
ordergoods.o_goods = cartmodel.c_goods
ordergoods.o_goods_num = cartmodel.c_goods_num
# 存入数据库
ordergoods.save()
# 删除购物车中的信息
cartmodel.delete()
data = {
'msg': 'ok',
'status': '200',
'orderid': order.id
}
return JsonResponse(data)
def order_detail(request):
order_id = request.GET.get('order_id')
order = OrderModel.objects.get(pk=order_id)
# 隐形属性 主获取从的隐性属性, manager对象
# order.ordergoods_set
data = {
'order': order
}
return render(request, 'order/order_detail.html', context=data)
def alipay(request):
orderid = request.GET.get('orderid')
order = OrderModel.objects.get(pk=orderid)
order.o_status = order_status.PAYED
order.save()
data = {
'status': '200',
'msg': 'ok'
}
return JsonResponse(data)
def
|
order_list(
|
identifier_name
|
|
views.py
|
, childcid, order_rule):
foodtyps = Foodtype.objects.all()
if childcid == ALL_TYPE:
goods_list = Goods.objects.filter(categoryid=categoryid)
else:
goods_list = Goods.objects.filter(categoryid=categoryid).filter(childcid=childcid)
foodtype = Foodtype.objects.get(typeid=categoryid)
"""
order_rule
0 代表综合排序
1 代表价格升序
2 代表价格降序
3 竞价排名
"""
if order_rule == ORDER_TOTAL:
pass
elif order_rule == PRICE_ASC:
goods_list = goods_list.order_by("price")
elif order_rule == PRICE_DESC:
goods_list = goods_list.order_by("-price")
"""
全部类型:0#进口水果:110#国产水果:120
"""
childtypenames = foodtype.childtypenames
"""
[全部类型:0, 进口水果:110, 国产水果:120]
"""
childtypename_list = childtypenames.split("#")
child_type_list = []
"""
[[全部类型, 0], [进口水果, 110], [国产水果, 120]]
"""
for childtypename in childtypename_list:
child_type_list.append(childtypename.split(":"))
data = {
'title': '闪购',
'foodtypes': foodtyps,
'goods_list': goods_list,
'categoryid': int(categoryid),
'child_type_list': child_type_list,
'childcid': childcid,
'order_rule': order_rule,
}
return render(request, 'market/market.html', context=data)
def cart(request):
userid = request.session.get('user_id')
if not userid:
return redirect(reverse('axf:user_login'))
user = UserModel.objects.get(pk=userid)
cartmodels = user.cartmodel_set.all()
is_all_select = True
total_price = 0
for cartmodel in cartmodels:
if not cartmodel.c_goods_select:
is_all_select = False
else:
total_price += cartmodel.c_goods_num * cartmodel.c_goods.price
data = {
'title': '购物车',
'cartmodels': cartmodels,
'is_all_select': is_all_select,
'total_price': '{:.2f}'.format(total_price)
}
return render(request, 'cart/cart.html', context=data)
def mine(request):
is_login = False
user_id = request.session.get('user_id')
data = {
'title': '我的',
'is_login': is_login,
}
if user_id:
is_login = True
user = UserModel.objects.get(pk=user_id)
data['is_login'] = is_login
data['user_icon'] = '/static/upload/' + user.u_icon.url
data['username'] = user.u_name
ordered_count = OrderModel.objects.filter(o_user=user).filter(o_status=order_status.ORDERED).count()
if ordered_count > 0:
data['ordered_count'] = ordered_count
wait_receive_count = OrderModel.objects.filter(o_user=user).filter(o_status=order_status.PAYED).count()
if wait_receive_count > 0:
data['wait_receive_count'] = wait_receive_count
return render(request, 'mine/mine.html', context=data)
def user_register(request):
if request.method == "GET":
data = {
"title": '用户注册'
}
return render(request, 'user/user_register.html', context=data)
elif request.method == "POST":
username = request.POST.get('u_name')
password = request.POST.get('u_password')
email = request.POST.get('u_email')
icon = request.FILES.get('u_icon')
print(password)
user = UserModel()
user.u_name = username
user.u_email = email
user.u_icon = icon
user.set_password(password)
user.save()
request.session['user_id'] = user.id
send_mail_learn(username, email, user.id)
return redirect(reverse('axf:mine'))
def user_logout(request):
request.session.flush()
return redirect(reverse('axf:mine'))
def check_user(request):
username = request.GET.get("u_name")
# 0 或 1
users = UserModel.objects.filter(u_name=username)
data = {
'status': '200',
'msg': 'ok'
}
if users.exists():
# 801 代表用户已存在
data['status'] = '801'
data['msg'] = 'already exists'
else:
data['msg'] = 'can use'
return JsonResponse(data)
def check_email(request):
email = request.GET.get('u_email')
users = UserModel.objects.filter(u_email=email)
data = {
'status': '200',
'msg': 'ok'
}
if users.exists():
data['status'] = '802'
data['msg'] = 'email already exists'
else:
data['msg'] = 'can use'
return JsonResponse(data)
def user_login(request):
if request.method == "GET":
msg = request.session.get('msg')
data = {
'title': '用户登录',
'msg': msg
}
return render(request, 'user/user_login.html', context=data)
elif request.method == "POST":
username = request.POST.get('u_name')
password = request.POST.get('u_password')
users = UserModel.objects.filter(u_name=username)
if users.exists():
user = users.first()
if user.check_password(password):
if not user.is_active:
request.session['msg'] = '用户未激活'
return redirect(reverse('axf:user_login'))
request.session['user_id'] = user.id
return redirect(reverse('axf:mine'))
else:
request.session['msg'] = '密码错误'
# 密码错误
return redirect(reverse('axf:user_login'))
else:
request.session['msg'] = '用户不存在'
# 用户不存在
return redirect(reverse('axf:user_login'))
"""
激活
能找到用户的方式
- 根据用户唯一标识
修改用户状态
"""
def send_mail_learn(username, email, userid):
subject = '爱鲜蜂VIP激活邮件'
message = ""
recipient_list = [email, ]
temp = loader.get_template('user/user_active.html')
token = str(uuid.uuid4())
cache.set(token, userid, timeout=60 * 60)
data = {
'username': username,
|
True
user.save()
return HttpResponse('用户激活成功')
def add_to_cart(request):
goodsid = request.GET.get('goodsid')
userid = request.session.get('user_id')
print(goodsid)
data = {
'status': '200',
'msg': 'ok'
}
if not userid:
data['status'] = '302'
data['msg'] = 'not login'
else:
# 数据添加
goods = Goods.objects.get(pk=goodsid)
user = UserModel.objects.get(pk=userid)
cartmodels = CartModel.objects.filter(c_goods=goods).filter(c_user=user)
# cartmodels = CartModel.objects.filter(c_goods_id=goodsid).filter(c_user_id=userid)
# 至多也就一个
if cartmodels.exists():
cartmodel = cartmodels.first()
cartmodel.c_goods_num = cartmodel.c_goods_num + 1
cartmodel.save()
else:
cartmodel = CartModel()
cartmodel.c_goods = goods
cartmodel.c_user = user
cartmodel.save()
data['goods_num'] = cartmodel.c_goods_num
return JsonResponse(data)
def sub_to_cart(request):
cartid = request.GET.get('cartid')
cart_model = CartModel.objects.get(pk=cartid)
data = {
'status': '200',
'msg': 'ok'
}
if cart_model.c_goods_num == 1:
cart_model.delete()
data['goods_num'] = 0
else:
cart_model.c_goods_num = cart_model.c_goods_num - 1
cart_model.save()
data['goods_num'] = cart_model.c_goods_num
data['total_price'] = '{:.2f}'.format(calc_total(request.session.get('user_id')))
return JsonResponse(data)
def change_cart_status(request):
carid = request.GET.get('cartid')
cartmodel = CartModel.objects.get(pk=carid)
cartmodel.c_goods_select = not cartmodel.c_goods_select
cartmodel
|
'active_url': 'http://127.0.0.1:8001/axf/activeuser/?utoken=%s' % token,
}
html = temp.render(data)
send_mail(subject, message, 'rongjiawei1204@163.com', recipient_list, html_message=html)
def active_user(request):
user_token = request.GET.get('utoken')
user_id = cache.get(user_token)
cache.delete(user_token)
if not user_id:
return HttpResponse("激活已过期,请重新申请激活邮件")
user = UserModel.objects.get(pk=user_id)
user.is_active =
|
identifier_body
|
views.py
|
, childcid, order_rule):
foodtyps = Foodtype.objects.all()
if childcid == ALL_TYPE:
goods_list = Goods.objects.filter(categoryid=categoryid)
else:
goods_list = Goods.objects.filter(categoryid=categoryid).filter(childcid=childcid)
foodtype = Foodtype.objects.get(typeid=categoryid)
"""
order_rule
0 代表综合排序
1 代表价格升序
2 代表价格降序
3 竞价排名
"""
if order_rule == ORDER_TOTAL:
pass
elif order_rule == PRICE_ASC:
goods_list = goods_list.order_by("price")
elif order_rule == PRICE_DESC:
goods_list = goods_list.order_by("-price")
"""
全部类型:0#进口水果:110#国产水果:120
"""
childtypenames = foodtype.childtypenames
"""
[全部类型:0, 进口水果:110, 国产水果:120]
"""
childtypename_list = childtypenames.split("#")
child_type_list = []
"""
[[全部类型, 0], [进口水果, 110], [国产水果, 120]]
"""
for childtypename in childtypename_list:
child_type_list.append(childtypename.split(":"))
data = {
'title': '闪购',
'foodtypes': foodtyps,
'goods_list': goods_list,
'categoryid': int(categoryid),
'child_type_list': child_type_list,
'childcid': childcid,
'order_rule': order_rule,
}
return render(request, 'market/market.html', context=data)
def cart(request):
userid = request.session.get('user_id')
if not userid:
return redirect(reverse('axf:user_login'))
user = UserModel.objects.get(pk=userid)
cartmodels = user.cartmodel_set.all()
is_all_select = True
total_price = 0
for cartmodel in cartmodels:
if not cartmodel.c_goods_select:
is_all_select = False
else:
total_price += cartmodel.c_goods_num * cartmodel.c_goods.price
data = {
'title': '购物车',
'cartmodels': cartmodels,
'is_all_select': is_all_select,
'total_price': '{:.2f}'.format(total_price)
}
return render(request, 'cart/cart.html', context=data)
|
def mine(request):
is_login = False
user_id = request.session.get('user_id')
data = {
'title': '我的',
'is_login': is_login,
}
if user_id:
is_login = True
user = UserModel.objects.get(pk=user_id)
data['is_login'] = is_login
data['user_icon'] = '/static/upload/' + user.u_icon.url
data['username'] = user.u_name
ordered_count = OrderModel.objects.filter(o_user=user).filter(o_status=order_status.ORDERED).count()
if ordered_count > 0:
data['ordered_count'] = ordered_count
wait_receive_count = OrderModel.objects.filter(o_user=user).filter(o_status=order_status.PAYED).count()
if wait_receive_count > 0:
data['wait_receive_count'] = wait_receive_count
return render(request, 'mine/mine.html', context=data)
def user_register(request):
if request.method == "GET":
data = {
"title": '用户注册'
}
return render(request, 'user/user_register.html', context=data)
elif request.method == "POST":
username = request.POST.get('u_name')
password = request.POST.get('u_password')
email = request.POST.get('u_email')
icon = request.FILES.get('u_icon')
print(password)
user = UserModel()
user.u_name = username
user.u_email = email
user.u_icon = icon
user.set_password(password)
user.save()
request.session['user_id'] = user.id
send_mail_learn(username, email, user.id)
return redirect(reverse('axf:mine'))
def user_logout(request):
request.session.flush()
return redirect(reverse('axf:mine'))
def check_user(request):
username = request.GET.get("u_name")
# 0 或 1
users = UserModel.objects.filter(u_name=username)
data = {
'status': '200',
'msg': 'ok'
}
if users.exists():
# 801 代表用户已存在
data['status'] = '801'
data['msg'] = 'already exists'
else:
data['msg'] = 'can use'
return JsonResponse(data)
def check_email(request):
email = request.GET.get('u_email')
users = UserModel.objects.filter(u_email=email)
data = {
'status': '200',
'msg': 'ok'
}
if users.exists():
data['status'] = '802'
data['msg'] = 'email already exists'
else:
data['msg'] = 'can use'
return JsonResponse(data)
def user_login(request):
if request.method == "GET":
msg = request.session.get('msg')
data = {
'title': '用户登录',
'msg': msg
}
return render(request, 'user/user_login.html', context=data)
elif request.method == "POST":
username = request.POST.get('u_name')
password = request.POST.get('u_password')
users = UserModel.objects.filter(u_name=username)
if users.exists():
user = users.first()
if user.check_password(password):
if not user.is_active:
request.session['msg'] = '用户未激活'
return redirect(reverse('axf:user_login'))
request.session['user_id'] = user.id
return redirect(reverse('axf:mine'))
else:
request.session['msg'] = '密码错误'
# 密码错误
return redirect(reverse('axf:user_login'))
else:
request.session['msg'] = '用户不存在'
# 用户不存在
return redirect(reverse('axf:user_login'))
"""
激活
能找到用户的方式
- 根据用户唯一标识
修改用户状态
"""
def send_mail_learn(username, email, userid):
subject = '爱鲜蜂VIP激活邮件'
message = ""
recipient_list = [email, ]
temp = loader.get_template('user/user_active.html')
token = str(uuid.uuid4())
cache.set(token, userid, timeout=60 * 60)
data = {
'username': username,
'active_url': 'http://127.0.0.1:8001/axf/activeuser/?utoken=%s' % token,
}
html = temp.render(data)
send_mail(subject, message, 'rongjiawei1204@163.com', recipient_list, html_message=html)
def active_user(request):
user_token = request.GET.get('utoken')
user_id = cache.get(user_token)
cache.delete(user_token)
if not user_id:
return HttpResponse("激活已过期,请重新申请激活邮件")
user = UserModel.objects.get(pk=user_id)
user.is_active = True
user.save()
return HttpResponse('用户激活成功')
def add_to_cart(request):
goodsid = request.GET.get('goodsid')
userid = request.session.get('user_id')
print(goodsid)
data = {
'status': '200',
'msg': 'ok'
}
if not userid:
data['status'] = '302'
data['msg'] = 'not login'
else:
# 数据添加
goods = Goods.objects.get(pk=goodsid)
user = UserModel.objects.get(pk=userid)
cartmodels = CartModel.objects.filter(c_goods=goods).filter(c_user=user)
# cartmodels = CartModel.objects.filter(c_goods_id=goodsid).filter(c_user_id=userid)
# 至多也就一个
if cartmodels.exists():
cartmodel = cartmodels.first()
cartmodel.c_goods_num = cartmodel.c_goods_num + 1
cartmodel.save()
else:
cartmodel = CartModel()
cartmodel.c_goods = goods
cartmodel.c_user = user
cartmodel.save()
data['goods_num'] = cartmodel.c_goods_num
return JsonResponse(data)
def sub_to_cart(request):
cartid = request.GET.get('cartid')
cart_model = CartModel.objects.get(pk=cartid)
data = {
'status': '200',
'msg': 'ok'
}
if cart_model.c_goods_num == 1:
cart_model.delete()
data['goods_num'] = 0
else:
cart_model.c_goods_num = cart_model.c_goods_num - 1
cart_model.save()
data['goods_num'] = cart_model.c_goods_num
data['total_price'] = '{:.2f}'.format(calc_total(request.session.get('user_id')))
return JsonResponse(data)
def change_cart_status(request):
carid = request.GET.get('cartid')
cartmodel = CartModel.objects.get(pk=carid)
cartmodel.c_goods_select = not cartmodel.c_goods_select
cartmodel
|
random_line_split
|
|
elgamal.rs
|
V3bQvhB1tg5cCsTH~VNjts4taDTPWfDZmjtVaxxr\
PRII4NEDKqEzg3JBevM~yft-RDfMc8RVlm-gCGANrRQORFii7uD3o9~y~4P2tLnO7Fy3m5\
rdjRsOsWnCQZzw37mcBoT9rEZPrVpD8pjebJ1~HNc764xIpXDWVt8CbA==",
},
TestVector {
msg: "\0x00",
ct: "AHDZBKiWeaIYQS9R1l70IlRnoplwKTkLP2dLlXmVh1gB33kx65uX8OMb3hdZEO0Bbzxkkx\
quqlNn5w166nJO4nPbpEzVfgtY4ClUuv~W4H4CXBr0FcZM1COAkd6rtp6~lUp7cZ8FAkpH\
spl95IxlFM-F1HwiPcbmTjRO1AwCal4sH8S5WmJCvBU6jH6pBPo~9B9vAtP7vX1EwsG2Jf\
CQXkVkfvbWpSicbsWn77aECedS3HkIMrXrxojp7gAiPgQhX4NR387rcUPFsMHGeUraTUPZ\
D7ctk5tpUuYYwRQc5cRKHa4zOq~AQyljx5w5~FByLda--6yCe7qDcILyTygudJ4AHRs1pJ\
RU3uuRTHZx0XJQo~cPsoQ2piAOohITX9~yMCimCgv2EIhY3Z-mAgo8qQ4iMbItoE1cl93I\
u2YV2n4wMq9laBx0shuKOJqO3rjRnszzCbqMuFAXfc3KgGDEaCpI7049s3i2yIcv4vT9uU\
AlrM-dsrdw0JgJiFYl0JXh~TO0IyrcVcLpgZYgRhEvTAdkDNwTs-2GK4tzdPEd34os4a2c\
DPL8joh3jhp~eGoRzrpcdRekxENdzheL4w3wD1fJ9W2-leil1FH6EPc3FSL6e~nqbw69gN\
bsuXAMQ6CobukJdJEy37uKmEw4v6WPyfYMUUacchv1JoNfkHLpnAWifQ==",
},
TestVector {
msg: "\0x00\0x00\0x00",
ct: "AGwvKAMJcPAliP-n7F0Rrj0JMRaFGjww~zvBjyzc~SPJrBF831cMqZFRmMHotgA7S5BrH2\
6CL8okI2N-7as0F2l7OPx50dFEwSVSjqBjVV6SGRFC8oS-ii1FURMz2SCHSaj6kazAYq4s\
DwyqR7vnUrOtPnZujHSU~a02jinyn-QOaHkxRiUp-Oo0jlZiU5xomXgLdkhtuz6725WUDj\
3uVlMtIYfeKQsTdasujHe1oQhUmp58jfg5vgZ8g87cY8rn4p9DRwDBBuo6vi5on7T13sGx\
tY9wz6HTpwzDhEqpNrj~h4JibElfi0Jo8ZllmNTO1ZCNpUQgASoTtyFLD5rk6cIAMK0R7A\
7hjB0aelKM-V7AHkj-Fhrcm8xIgWhKaLn2wKbVNpAkllkiLALyfWJ9dhJ804RWQTMPE-GD\
kBMIFOOJ9MhpEN533OBQDwUKcoxMjl0zOMNCLx8IdCE6cLtUDKJXLB0atnDpLkBer6FwXP\
81EvKDYhtp1GsbiKvZDt8LSPJQnm2EdA3Pr9fpAisJ5Ocaxlfa6~uQCuqGA9nJ9n6w03u-\
ZpSMhSh4zm2s1MqijmaJRc-QNKmN~u1hh3R2hwWNi7FoStMA87sutEBXMdFI8un7StHNSE\
iCYwmmW2Nu3djkM-X8gGjSsdrphTU7uOXbwazmguobFGxI0JujYruM5Q==",
},
TestVector {
msg: "\0x00\0x01\0x02\0x00",
ct: "ALFYtPSwEEW3eTO4hLw6PZNlBKoSIseQNBi034gq6FwYEZsJOAo-1VXcvMviKw2MCP9ZkH\
lTNBfzc79ms2TU8kXxc7zwUc-l2HJLWh6dj2tIQLR8bbWM7U0iUx4XB1B-FEvdhbjz7dsu\
6SBXVhxo2ulrk7Q7vX3kPrePhZZldcNZcS0t65DHYYwL~E~ROjQwOO4Cb~8FgiIUjb8CCN\
w5zxJpBaEt7UvZffkVwj-EWTzFy3DIjWIRizxnsI~mUI-VspPE~xlmFX~TwPS9UbwJDpm8\
-WzINFcehSzF3y9rzSMX-KbU8m4YZj07itZOiIbWgLeulTUB-UgwEkfJBG0xiSUAspZf2~\
t~NthBlpcdrBLADXTJ7Jmkk4MIfysV~JpDB7IVg0v4WcUUwF3sYMmBCdPCwyYf0hTrl2Yb\
L6kmm4u97WgQqf0TyzXtVZYwjct4LzZlyH591y6O6AQ4Fydqos9ABInzu-SbXq6S1Hi6vr\
aNWU3mcy2myie32EEXtkX7P8eXWY35GCv9ThPEYHG5g1qKOk95ZCTYYwlpgeyaMKsnN3C~\
x9TJA8K8T44v7vE6--Nw4Z4zjepwkIOht9iQsA6D6wRUQpeYX8bjIyYDPC7GUHq0WhXR6E\
6Ojc9k8V5uh0SZ-rCQX6sccdk3JbyRhjGP4rSKr6MmvxVVsqBjcbpxsg==",
},
];
let enc = {
let mut data = [0u8; 256];
data.copy_from_slice(&I2P_BASE64.decode(pub_key.as_bytes()).unwrap());
Encryptor::from(&PublicKey(data))
};
let dec = {
let mut data = [0u8; 256];
data.copy_from_slice(&I2P_BASE64.decode(priv_key.as_bytes()).unwrap());
Decryptor::from(&PrivateKey(data))
};
for tv in test_vectors {
let msg = tv.msg.as_bytes();
let ct = I2P_BASE64.decode(tv.ct.as_bytes()).unwrap();
// Check round-trip
assert_eq!(
dec.decrypt(&enc.encrypt(msg, true).unwrap(), true).unwrap(),
msg
);
assert_eq!(
dec.decrypt(&enc.encrypt(msg, false).unwrap(), false)
.unwrap(),
msg
);
|
// Check test vector
assert_eq!(dec.decrypt(&ct, true).unwrap(), msg);
}
|
random_line_split
|
|
elgamal.rs
|
};
// γ = α^k mod p
let gamma = ELGAMAL_G.modpow(&k, &ELGAMAL_P);
(k, gamma)
}
/// Generates ElGamal keypairs.
pub struct KeyPairGenerator;
impl KeyPairGenerator {
/// ElGamal key generation, following algorithm 8.17.
pub fn generate() -> (PrivateKey, PublicKey) {
// Select a random integer a, 1 <= a <= p - 2
// Public key is α^a mod p
let (a, alpha_a) = gen_gamma_k();
let priv_key = {
let buf = rectify(&a, 256);
let mut x = [0u8; 256];
x.copy_from_slice(&buf[..]);
PrivateKey(x)
};
let pub_key = {
let buf = rectify(&alpha_a, 256);
let mut x = [0u8; 256];
x.copy_from_slice(&buf[..]);
PublicKey(x)
};
(priv_key, pub_key)
}
}
pub struct Encryptor(BigUint);
impl<'a> From<&'a PublicKey> for Encryptor {
fn from(pub_key: &PublicKey) -> Self {
Encryptor(BigUint::from_bytes_be(&pub_key.0[..]))
}
}
impl Encryptor {
/// Basic ElGamal encryption, following algorithm 8.18 1).
fn encrypt_basic(&self, msg: &[u8]) -> Result<(BigUint, BigUint), Error> {
// Represent the message as an integer m in the range {0, 1, ..., p - 1}
let m = BigUint::from_bytes_be(msg);
if m > *ELGAMAL_PM1 {
return Err(Error::InvalidMessage);
}
// Select a random integer k, 1 <= k <= p - 2
// γ = α^k mod p
let (k, gamma) = gen_gamma_k();
// δ = m * (α^a)^k mod p
let s = self.0.modpow(&k, &ELGAMAL_P);
let delta = m.mul(s).rem(&(*ELGAMAL_P));
Ok((gamma, delta))
}
/// ElGamal encryption using I2P's message and ciphertext encoding schemes.
pub fn encrypt(&self, msg: &[u8], include_zeroes: bool) -> Result<Vec<u8>, Error> {
// Message must be no more than 222 bytes
if msg.len() > 222 {
return Err(Error::InvalidMessage);
}
let mut rng = OsRng;
let hash = Sha256::digest(msg);
// ElGamal plaintext:
// 0 1 33
// | nonzero byte | SHA256(msg) | msg |
let mut data = Vec::with_capacity(33 + msg.len());
data.push(loop {
let val = rng.gen();
if val != 0 {
break val;
}
});
data.extend_from_slice(hash.as_slice());
data.extend_from_slice(msg);
self.encrypt_basic(&data).map(|(gamma, delta)| {
if include_zeroes {
// ElGamal ciphertext:
// 0 1 257 258 514
// | 0 | padding zeroes | gamma | 0 | padding zeroes | delta |
let gamma = rectify(&gamma, 256);
let delta = rectify(&delta, 256);
let mut ct = vec![0; 514];
ct[1..257].copy_from_slice(&gamma);
ct[258..514].copy_from_slice(&delta);
ct
} else {
// ElGamal ciphertext:
// 0 256 512
// | padding zeroes | gamma | padding zeroes | delta |
let gamma = rectify(&gamma, 256);
let delta = rectify(&delta, 256);
let mut ct = vec![0; 512];
ct[0..256].copy_from_slice(&gamma);
ct[256..512].copy_from_slice(&delta);
ct
}
})
}
}
#[derive(Clone)]
pub struct Decryptor(BigUint);
impl<'a> From<&'a PrivateKey> for Decryptor {
fn from(priv_key: &PrivateKey) -> Self {
Decryptor(BigUint::from_bytes_be(&priv_key.0[..]))
}
}
impl Decryptor {
/// Basic ElGamal decryption, following algorithm 8.18 2).
fn decrypt_basic(&self, (gamma, delta): (BigUint, BigUint)) -> Vec<u8> {
// γ^{-a} = γ^{p-1-a}
let gamma_neg_a = gamma.modpow(&(&(*ELGAMAL_PM1)).sub(&self.0), &ELGAMAL_P);
// m = (γ^{-a}) * δ mod p
let m = gamma_neg_a.mul(delta).rem(&(*ELGAMAL_P));
m.to_bytes_be()
}
/// ElGamal decryption using I2P's message and ciphertext encoding schemes.
pub fn decrypt(&self, ct: &[u8], has_zeroes: bool) -> Result<Vec<u8>, Error> {
let (gamma, delta) = if has_zeroes {
// Ciphertext must be 514 bytes
if ct.len() != 514 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 1 257 258 514
// | 0 | padding zeroes | gamma | 0 | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..257]);
let delta = BigUint::from_bytes_be(&ct[257..]);
(gamma, delta)
} else {
// Ciphertext must be 512 bytes
if ct.len() != 512 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 256 512
// | padding zeroes | gamma | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..256]);
let delta = BigUint::from_bytes_be(&ct[256..]);
(gamma, delta)
};
let data = self.decrypt_basic((gamma, delta));
if data.len() < 33 {
// Decrypted data is too small
return Err(Error::InvalidCiphertext);
}
// ElGamal plaintext:
// 0 1 33
// | nonzero byte | SHA256(msg) | msg |
let msg = data[33..].to_vec();
let hash = Sha256::digest(&msg);
if hash.as_slice() == &data[1..33] {
Ok(msg)
} else {
Err(Error::InvalidCiphertext)
}
}
}
#[cfg(test)]
mod tests {
use super::{Decryptor, Encryptor, KeyPairGenerator};
use crate::constants::I2P_BASE64;
use crate::crypto::{PrivateKey, PublicKey};
#[test]
fn round_trip_basic() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// All-zeroes message is returned as a single byte
let msg = [0u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt, &[0]);
// All-ones message is returned as-is
let msg = [1u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt[..], &msg[..]);
}
#[test]
fn round_trip() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// Message too long
assert!(enc.encrypt(&[0u8; 223], true).is_err());
// Full-width all-zeroes message
let msg = [0u8; 222];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Short all-zeroes message
let msg = [0u8; 8];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Full-width all-ones message
let msg = [1u8; 222];
let ct = enc.encrypt(&msg[..], true
|
{
break k;
}
|
conditional_block
|
|
elgamal.rs
|
self.encrypt_basic(&data).map(|(gamma, delta)| {
if include_zeroes {
// ElGamal ciphertext:
// 0 1 257 258 514
// | 0 | padding zeroes | gamma | 0 | padding zeroes | delta |
let gamma = rectify(&gamma, 256);
let delta = rectify(&delta, 256);
let mut ct = vec![0; 514];
ct[1..257].copy_from_slice(&gamma);
ct[258..514].copy_from_slice(&delta);
ct
} else {
// ElGamal ciphertext:
// 0 256 512
// | padding zeroes | gamma | padding zeroes | delta |
let gamma = rectify(&gamma, 256);
let delta = rectify(&delta, 256);
let mut ct = vec![0; 512];
ct[0..256].copy_from_slice(&gamma);
ct[256..512].copy_from_slice(&delta);
ct
}
})
}
}
#[d
erive(Clone)]
pub struct Decryptor(BigUint);
impl<'a> From<&'a PrivateKey> for Decryptor {
fn from(priv_key: &PrivateKey) -> Self {
Decryptor(BigUint::from_bytes_be(&priv_key.0[..]))
}
}
impl Decryptor {
/// Basic ElGamal decryption, following algorithm 8.18 2).
fn decrypt_basic(&self, (gamma, delta): (BigUint, BigUint)) -> Vec<u8> {
// γ^{-a} = γ^{p-1-a}
let gamma_neg_a = gamma.modpow(&(&(*ELGAMAL_PM1)).sub(&self.0), &ELGAMAL_P);
// m = (γ^{-a}) * δ mod p
let m = gamma_neg_a.mul(delta).rem(&(*ELGAMAL_P));
m.to_bytes_be()
}
/// ElGamal decryption using I2P's message and ciphertext encoding schemes.
pub fn decrypt(&self, ct: &[u8], has_zeroes: bool) -> Result<Vec<u8>, Error> {
let (gamma, delta) = if has_zeroes {
// Ciphertext must be 514 bytes
if ct.len() != 514 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 1 257 258 514
// | 0 | padding zeroes | gamma | 0 | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..257]);
let delta = BigUint::from_bytes_be(&ct[257..]);
(gamma, delta)
} else {
// Ciphertext must be 512 bytes
if ct.len() != 512 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 256 512
// | padding zeroes | gamma | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..256]);
let delta = BigUint::from_bytes_be(&ct[256..]);
(gamma, delta)
};
let data = self.decrypt_basic((gamma, delta));
if data.len() < 33 {
// Decrypted data is too small
return Err(Error::InvalidCiphertext);
}
// ElGamal plaintext:
// 0 1 33
// | nonzero byte | SHA256(msg) | msg |
let msg = data[33..].to_vec();
let hash = Sha256::digest(&msg);
if hash.as_slice() == &data[1..33] {
Ok(msg)
} else {
Err(Error::InvalidCiphertext)
}
}
}
#[cfg(test)]
mod tests {
use super::{Decryptor, Encryptor, KeyPairGenerator};
use crate::constants::I2P_BASE64;
use crate::crypto::{PrivateKey, PublicKey};
#[test]
fn round_trip_basic() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// All-zeroes message is returned as a single byte
let msg = [0u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt, &[0]);
// All-ones message is returned as-is
let msg = [1u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt[..], &msg[..]);
}
#[test]
fn round_trip() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// Message too long
assert!(enc.encrypt(&[0u8; 223], true).is_err());
// Full-width all-zeroes message
let msg = [0u8; 222];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Short all-zeroes message
let msg = [0u8; 8];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Full-width all-ones message
let msg = [1u8; 222];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Short all-ones message
let msg = [1u8; 8];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
}
/// From `core/java/test/junit/net/i2p/crypto/ElGamalTest.java` in Java I2P.
#[test]
fn test_vectors() {
let pub_key = "pOvBUMrSUUeN5awynzbPbCAwe3MqWprhSpp3OR7pvdfm9PhWaNbPoKRLeEmDoUwyNDoHE0\
E6mcZSG8qPQ8XUZFlczpilOl0MJBvsI9u9SMyi~bEqzSgzh9FNfS-NcGji3q2wI~Ux~q5B\
KOjGlyMLgd1nxl5R5wIYL4uHKZNaYuArsRYmtV~MgMQPGvDtIbdGTV6aL6UbOYryzQSUMY\
OuO3S~YoBjA6Nmi0SeJM3tyTxlI6U1EYjR6oQcI4SOFUW4L~8pfYWijcncCODAqpXVN6ZI\
AJ3a6vjxGu56IDp4xCcKlOEHgdXvqmEC67dR5qf2btH6dtWoB3-Z6QPsS6tPTQ==";
let priv_key = "gMlIhURVXU8uPube20Xr8E1K11g-3qZxOj1riThHqt-rBx72MPq5ivT1rr28cE9mzOmsXi\
bbsuBuQKYDvF7hGICRB3ROSPePYhcupV3j7XiXUIYjWNw9hvylHXK~nTT7jkpIBazBJZfr\
LJPcDZTDB0YnCOHOL-KFn4N1R5B22g0iYRABN~O10AUjQmf1ep
|
// Message must be no more than 222 bytes
if msg.len() > 222 {
return Err(Error::InvalidMessage);
}
let mut rng = OsRng;
let hash = Sha256::digest(msg);
// ElGamal plaintext:
// 0 1 33
// | nonzero byte | SHA256(msg) | msg |
let mut data = Vec::with_capacity(33 + msg.len());
data.push(loop {
let val = rng.gen();
if val != 0 {
break val;
}
});
data.extend_from_slice(hash.as_slice());
data.extend_from_slice(msg);
|
identifier_body
|
|
elgamal.rs
|
(gamma, delta): (BigUint, BigUint)) -> Vec<u8> {
// γ^{-a} = γ^{p-1-a}
let gamma_neg_a = gamma.modpow(&(&(*ELGAMAL_PM1)).sub(&self.0), &ELGAMAL_P);
// m = (γ^{-a}) * δ mod p
let m = gamma_neg_a.mul(delta).rem(&(*ELGAMAL_P));
m.to_bytes_be()
}
/// ElGamal decryption using I2P's message and ciphertext encoding schemes.
pub fn decrypt(&self, ct: &[u8], has_zeroes: bool) -> Result<Vec<u8>, Error> {
let (gamma, delta) = if has_zeroes {
// Ciphertext must be 514 bytes
if ct.len() != 514 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 1 257 258 514
// | 0 | padding zeroes | gamma | 0 | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..257]);
let delta = BigUint::from_bytes_be(&ct[257..]);
(gamma, delta)
} else {
// Ciphertext must be 512 bytes
if ct.len() != 512 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 256 512
// | padding zeroes | gamma | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..256]);
let delta = BigUint::from_bytes_be(&ct[256..]);
(gamma, delta)
};
let data = self.decrypt_basic((gamma, delta));
if data.len() < 33 {
// Decrypted data is too small
return Err(Error::InvalidCiphertext);
}
// ElGamal plaintext:
// 0 1 33
// | nonzero byte | SHA256(msg) | msg |
let msg = data[33..].to_vec();
let hash = Sha256::digest(&msg);
if hash.as_slice() == &data[1..33] {
Ok(msg)
} else {
Err(Error::InvalidCiphertext)
}
}
}
#[cfg(test)]
mod tests {
use super::{Decryptor, Encryptor, KeyPairGenerator};
use crate::constants::I2P_BASE64;
use crate::crypto::{PrivateKey, PublicKey};
#[test]
fn round_trip_basic() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// All-zeroes message is returned as a single byte
let msg = [0u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt, &[0]);
// All-ones message is returned as-is
let msg = [1u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt[..], &msg[..]);
}
#[test]
fn round_trip() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// Message too long
assert!(enc.encrypt(&[0u8; 223], true).is_err());
// Full-width all-zeroes message
let msg = [0u8; 222];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Short all-zeroes message
let msg = [0u8; 8];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Full-width all-ones message
let msg = [1u8; 222];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Short all-ones message
let msg = [1u8; 8];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
}
/// From `core/java/test/junit/net/i2p/crypto/ElGamalTest.java` in Java I2P.
#[test]
fn test_vectors() {
let pub_key = "pOvBUMrSUUeN5awynzbPbCAwe3MqWprhSpp3OR7pvdfm9PhWaNbPoKRLeEmDoUwyNDoHE0\
E6mcZSG8qPQ8XUZFlczpilOl0MJBvsI9u9SMyi~bEqzSgzh9FNfS-NcGji3q2wI~Ux~q5B\
KOjGlyMLgd1nxl5R5wIYL4uHKZNaYuArsRYmtV~MgMQPGvDtIbdGTV6aL6UbOYryzQSUMY\
OuO3S~YoBjA6Nmi0SeJM3tyTxlI6U1EYjR6oQcI4SOFUW4L~8pfYWijcncCODAqpXVN6ZI\
AJ3a6vjxGu56IDp4xCcKlOEHgdXvqmEC67dR5qf2btH6dtWoB3-Z6QPsS6tPTQ==";
let priv_key = "gMlIhURVXU8uPube20Xr8E1K11g-3qZxOj1riThHqt-rBx72MPq5ivT1rr28cE9mzOmsXi\
bbsuBuQKYDvF7hGICRB3ROSPePYhcupV3j7XiXUIYjWNw9hvylHXK~nTT7jkpIBazBJZfr\
LJPcDZTDB0YnCOHOL-KFn4N1R5B22g0iYRABN~O10AUjQmf1epklAXPqYlzmOYeJSfTPBI\
E44nEccWJp0M0KynhKVbDI0v9VYm6sPFK7WrzRyWwHL~r735wiRkwywuMmKJtA7-PuJjcW\
NLkJwx6WScH2msMzhzYPi8JSZJBl~PosX934l-L0T-KNV4jg1Ih6yoCnm1748A==";
struct TestVector<'a> {
msg: &'a str,
ct: &'a str,
};
let test_vectors = vec![
TestVector {
msg: "",
ct: "AMfISa8KvTpaC7KXZzSvC2axyiSk0xPexBAf29yU~IKq21DzaU19wQcGJg-ktpG4hjGSg7\
u-mJ07b61yo-EGmVGZsv3nYuQYW-GjvsZQa9nm98VljlMtWrxu7TsRXw~SQlWQxMvthqJB\
1A7Y7Qa~C7-UlRytkD-cpVdgUfM-esuMWmjGs6Vc33N5U-tce5Fywa-9y7PSn3ukBO8KGR\
wm7T12~H2gvhgxrVeK2roOzsV7f5dGkvBQRZJ309Vg3j0kjaxWutgI3vli0pzDbSK9d5NR\
-GUDtdOb6IIfLiOckBegcv6I-wlSXjYJe8mIoaK45Ok3rEpHwWKVKS2MeuI7AmsAWgkQmW\
f8irmZaKc9X910VWSO5GYu6006hSc~r2TL3O7vwtW-Z9Oq~sAam9av1PPVJzAx8A4g~m~1\
avtN
|
_basic(&self,
|
identifier_name
|
|
nelder_mead.rs
|
<Obs<Vec<f64>, V>>,
alpha: f64,
beta: f64,
gamma: f64,
delta: f64,
initial: Vec<Vec<f64>>,
centroid: Vec<f64>,
evaluating: Option<ObsId>,
state: State<V>,
}
impl<V> NelderMeadOptimizer<V>
where
V: Ord,
{
/// Makes a new `NelderMeadOptimizer`.
pub fn new<R: Rng>(params_domain: Vec<ContinuousDomain>, mut rng: R) -> Result<Self> {
let point = params_domain
.iter()
.map(|p| p.sample(&mut rng))
.collect::<Vec<_>>();
track!(Self::with_initial_point(params_domain, &point))
}
/// Makes a new `NelderMeadOptimizer` which has the given search point.
pub fn with_initial_point(params_domain: Vec<ContinuousDomain>, point: &[f64]) -> Result<Self> {
let mut initial_simplex = vec![point.to_vec()];
for i in 0..params_domain.len() {
let tau = if point[i] == 0.0 { 0.00025 } else { 0.05 };
let x = point
.iter()
.enumerate()
.map(|(j, &x0)| if i == j { x0 + tau } else { x0 })
.collect();
initial_simplex.push(x);
}
track!(Self::with_initial_simplex(params_domain, initial_simplex))
}
/// Makes a new `NelderMeadOptimizer` with the given simplex.
pub fn with_initial_simplex(
params_domain: Vec<ContinuousDomain>,
initial_simplex: Vec<Vec<f64>>,
) -> Result<Self> {
track_assert!(
params_domain.len() >= 2,
ErrorKind::InvalidInput,
"Too few dimensions: {}",
params_domain.len()
);
track_assert_eq!(
params_domain.len() + 1,
initial_simplex.len(),
ErrorKind::InvalidInput
);
let dim = params_domain.len() as f64;
Ok(Self {
params_domain,
simplex: Vec::with_capacity(initial_simplex.len()),
alpha: 1.0,
beta: 1.0 + 2.0 / dim,
gamma: 0.75 - 1.0 / (2.0 * dim),
delta: 1.0 - 1.0 / dim,
initial: initial_simplex,
centroid: Vec::new(),
evaluating: None,
state: State::Initialize,
})
}
fn dim(&self) -> usize {
self.params_domain.len()
}
fn adjust(&self, x: Vec<f64>) -> Vec<f64> {
self.params_domain
.iter()
.zip(x.into_iter())
.map(|(p, v)| {
let v = p.low().max(v);
let mut v = (p.high() - std::f64::EPSILON).min(v);
for i in 2.. {
if (v - p.high()).abs() > EPSILON {
break;
}
v -= EPSILON * f64::from(i);
}
v
})
.collect()
}
fn initial_ask(&mut self) -> Vec<f64> {
self.initial.pop().unwrap_or_else(|| unreachable!())
}
fn initial_tell(&mut self, obs: Obs<Vec<f64>, V>) {
|
fn reflect_ask(&mut self) -> Vec<f64> {
self.centroid
.iter()
.zip(self.highest().param.iter())
.map(|(&x0, &xh)| x0 + self.alpha * (x0 - xh))
.collect()
}
fn reflect_tell(&mut self, obs: Obs<Vec<f64>, V>) {
if obs.value < self.lowest().value {
self.state = State::Expand(obs);
} else if obs.value < self.second_highest().value {
self.accept(obs);
} else if obs.value < self.highest().value {
self.state = State::ContractOutside(obs);
} else {
self.state = State::ContractInside(obs);
}
}
fn expand_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.beta * (x - c))
.collect()
}
fn expand_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if prev.value < curr.value {
self.accept(prev);
} else {
self.accept(curr);
}
}
fn contract_outside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.gamma * (x - c))
.collect()
}
fn contract_outside_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value <= prev.value {
self.accept(curr);
} else {
self.shrink();
}
}
fn contract_inside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c - self.gamma * (x - c))
.collect()
}
fn contract_inside_tell(&mut self, _prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value < self.highest().value {
self.accept(curr);
} else {
self.shrink();
}
}
fn shrink_ask(&mut self, index: usize) -> Vec<f64> {
self.lowest()
.param
.iter()
.zip(self.simplex[index].param.iter())
.map(|(&xl, &xi)| xl + self.delta * (xi - xl))
.collect()
}
fn shrink_tell(&mut self, obs: Obs<Vec<f64>, V>, index: usize) {
self.simplex[index] = obs;
if index < self.simplex.len() - 1 {
self.state = State::Shrink { index: index + 1 };
} else {
self.update_centroid();
self.state = State::Reflect;
}
}
fn accept(&mut self, obs: Obs<Vec<f64>, V>) {
// FIXME: optimize
self.simplex.push(obs);
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.simplex.pop();
self.update_centroid();
self.state = State::Reflect;
}
fn shrink(&mut self) {
self.state = State::Shrink { index: 1 };
}
fn lowest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[0]
}
fn second_highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 2]
}
fn highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 1]
}
fn update_centroid(&mut self) {
assert!(self.simplex.len() == self.dim() + 1);
// NOTE: We assume that `self.simplex` have been sorted by its values.
let n = self.dim();
let mut c = vec![f64::default(); n];
for t in self.simplex.iter().take(n) {
for (i, c) in c.iter_mut().enumerate() {
*c += t.param[i];
}
}
let n = n as f64;
for c in &mut c {
*c /= n;
}
self.centroid = c
}
}
impl<V> Optimizer for NelderMeadOptimizer<V>
where
V: Ord,
{
type Param = Vec<f64>;
type Value = V;
fn ask<R: Rng, G: IdGen>(&mut self, _rng: R, idg: G) -> Result<Obs<Self::Param>> {
track_assert!(self.evaluating.is_none(), ErrorKind::Other);
let x = match &self.state {
State::Initialize => self.initial_ask(),
State::Reflect => self.reflect_ask(),
State::Expand(prev) => {
let prev = prev.param.clone();
self.expand_ask(prev)
}
State::ContractOutside(prev) => {
let prev = prev.param.clone();
self.contract_outside_ask(prev)
}
State::ContractInside(prev) => {
let prev = prev.param.clone();
self.contract_inside_ask(prev)
}
|
self.simplex.push(obs);
if self.simplex.len() == self.dim() + 1 {
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.update_centroid();
self.state = State::Reflect;
}
}
|
identifier_body
|
nelder_mead.rs
|
<Obs<Vec<f64>, V>>,
alpha: f64,
beta: f64,
gamma: f64,
delta: f64,
initial: Vec<Vec<f64>>,
centroid: Vec<f64>,
evaluating: Option<ObsId>,
state: State<V>,
}
impl<V> NelderMeadOptimizer<V>
where
V: Ord,
{
/// Makes a new `NelderMeadOptimizer`.
pub fn new<R: Rng>(params_domain: Vec<ContinuousDomain>, mut rng: R) -> Result<Self> {
let point = params_domain
.iter()
.map(|p| p.sample(&mut rng))
.collect::<Vec<_>>();
track!(Self::with_initial_point(params_domain, &point))
}
/// Makes a new `NelderMeadOptimizer` which has the given search point.
pub fn with_initial_point(params_domain: Vec<ContinuousDomain>, point: &[f64]) -> Result<Self> {
let mut initial_simplex = vec![point.to_vec()];
for i in 0..params_domain.len() {
let tau = if point[i] == 0.0 { 0.00025 } else { 0.05 };
let x = point
.iter()
.enumerate()
.map(|(j, &x0)| if i == j { x0 + tau } else {
|
.collect();
initial_simplex.push(x);
}
track!(Self::with_initial_simplex(params_domain, initial_simplex))
}
/// Makes a new `NelderMeadOptimizer` with the given simplex.
pub fn with_initial_simplex(
params_domain: Vec<ContinuousDomain>,
initial_simplex: Vec<Vec<f64>>,
) -> Result<Self> {
track_assert!(
params_domain.len() >= 2,
ErrorKind::InvalidInput,
"Too few dimensions: {}",
params_domain.len()
);
track_assert_eq!(
params_domain.len() + 1,
initial_simplex.len(),
ErrorKind::InvalidInput
);
let dim = params_domain.len() as f64;
Ok(Self {
params_domain,
simplex: Vec::with_capacity(initial_simplex.len()),
alpha: 1.0,
beta: 1.0 + 2.0 / dim,
gamma: 0.75 - 1.0 / (2.0 * dim),
delta: 1.0 - 1.0 / dim,
initial: initial_simplex,
centroid: Vec::new(),
evaluating: None,
state: State::Initialize,
})
}
fn dim(&self) -> usize {
self.params_domain.len()
}
fn adjust(&self, x: Vec<f64>) -> Vec<f64> {
self.params_domain
.iter()
.zip(x.into_iter())
.map(|(p, v)| {
let v = p.low().max(v);
let mut v = (p.high() - std::f64::EPSILON).min(v);
for i in 2.. {
if (v - p.high()).abs() > EPSILON {
break;
}
v -= EPSILON * f64::from(i);
}
v
})
.collect()
}
fn initial_ask(&mut self) -> Vec<f64> {
self.initial.pop().unwrap_or_else(|| unreachable!())
}
fn initial_tell(&mut self, obs: Obs<Vec<f64>, V>) {
self.simplex.push(obs);
if self.simplex.len() == self.dim() + 1 {
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.update_centroid();
self.state = State::Reflect;
}
}
fn reflect_ask(&mut self) -> Vec<f64> {
self.centroid
.iter()
.zip(self.highest().param.iter())
.map(|(&x0, &xh)| x0 + self.alpha * (x0 - xh))
.collect()
}
fn reflect_tell(&mut self, obs: Obs<Vec<f64>, V>) {
if obs.value < self.lowest().value {
self.state = State::Expand(obs);
} else if obs.value < self.second_highest().value {
self.accept(obs);
} else if obs.value < self.highest().value {
self.state = State::ContractOutside(obs);
} else {
self.state = State::ContractInside(obs);
}
}
fn expand_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.beta * (x - c))
.collect()
}
fn expand_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if prev.value < curr.value {
self.accept(prev);
} else {
self.accept(curr);
}
}
fn contract_outside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.gamma * (x - c))
.collect()
}
fn contract_outside_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value <= prev.value {
self.accept(curr);
} else {
self.shrink();
}
}
fn contract_inside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c - self.gamma * (x - c))
.collect()
}
fn contract_inside_tell(&mut self, _prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value < self.highest().value {
self.accept(curr);
} else {
self.shrink();
}
}
fn shrink_ask(&mut self, index: usize) -> Vec<f64> {
self.lowest()
.param
.iter()
.zip(self.simplex[index].param.iter())
.map(|(&xl, &xi)| xl + self.delta * (xi - xl))
.collect()
}
fn shrink_tell(&mut self, obs: Obs<Vec<f64>, V>, index: usize) {
self.simplex[index] = obs;
if index < self.simplex.len() - 1 {
self.state = State::Shrink { index: index + 1 };
} else {
self.update_centroid();
self.state = State::Reflect;
}
}
fn accept(&mut self, obs: Obs<Vec<f64>, V>) {
// FIXME: optimize
self.simplex.push(obs);
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.simplex.pop();
self.update_centroid();
self.state = State::Reflect;
}
fn shrink(&mut self) {
self.state = State::Shrink { index: 1 };
}
fn lowest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[0]
}
fn second_highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 2]
}
fn highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 1]
}
fn update_centroid(&mut self) {
assert!(self.simplex.len() == self.dim() + 1);
// NOTE: We assume that `self.simplex` have been sorted by its values.
let n = self.dim();
let mut c = vec![f64::default(); n];
for t in self.simplex.iter().take(n) {
for (i, c) in c.iter_mut().enumerate() {
*c += t.param[i];
}
}
let n = n as f64;
for c in &mut c {
*c /= n;
}
self.centroid = c
}
}
impl<V> Optimizer for NelderMeadOptimizer<V>
where
V: Ord,
{
type Param = Vec<f64>;
type Value = V;
fn ask<R: Rng, G: IdGen>(&mut self, _rng: R, idg: G) -> Result<Obs<Self::Param>> {
track_assert!(self.evaluating.is_none(), ErrorKind::Other);
let x = match &self.state {
State::Initialize => self.initial_ask(),
State::Reflect => self.reflect_ask(),
State::Expand(prev) => {
let prev = prev.param.clone();
self.expand_ask(prev)
}
State::ContractOutside(prev) => {
let prev = prev.param.clone();
self.contract_outside_ask(prev)
}
State::ContractInside(prev) => {
let prev = prev.param.clone();
self.contract_inside_ask(prev)
|
x0 })
|
conditional_block
|
nelder_mead.rs
|
> {
params_domain: Vec<ContinuousDomain>,
simplex: Vec<Obs<Vec<f64>, V>>,
alpha: f64,
beta: f64,
gamma: f64,
delta: f64,
initial: Vec<Vec<f64>>,
centroid: Vec<f64>,
evaluating: Option<ObsId>,
state: State<V>,
}
impl<V> NelderMeadOptimizer<V>
where
V: Ord,
{
/// Makes a new `NelderMeadOptimizer`.
pub fn new<R: Rng>(params_domain: Vec<ContinuousDomain>, mut rng: R) -> Result<Self> {
let point = params_domain
.iter()
.map(|p| p.sample(&mut rng))
.collect::<Vec<_>>();
track!(Self::with_initial_point(params_domain, &point))
}
/// Makes a new `NelderMeadOptimizer` which has the given search point.
pub fn with_initial_point(params_domain: Vec<ContinuousDomain>, point: &[f64]) -> Result<Self> {
let mut initial_simplex = vec![point.to_vec()];
for i in 0..params_domain.len() {
let tau = if point[i] == 0.0 { 0.00025 } else { 0.05 };
let x = point
.iter()
.enumerate()
.map(|(j, &x0)| if i == j { x0 + tau } else { x0 })
.collect();
initial_simplex.push(x);
}
track!(Self::with_initial_simplex(params_domain, initial_simplex))
}
/// Makes a new `NelderMeadOptimizer` with the given simplex.
pub fn with_initial_simplex(
params_domain: Vec<ContinuousDomain>,
initial_simplex: Vec<Vec<f64>>,
) -> Result<Self> {
track_assert!(
params_domain.len() >= 2,
ErrorKind::InvalidInput,
"Too few dimensions: {}",
params_domain.len()
);
track_assert_eq!(
params_domain.len() + 1,
initial_simplex.len(),
ErrorKind::InvalidInput
);
let dim = params_domain.len() as f64;
Ok(Self {
params_domain,
simplex: Vec::with_capacity(initial_simplex.len()),
alpha: 1.0,
beta: 1.0 + 2.0 / dim,
gamma: 0.75 - 1.0 / (2.0 * dim),
delta: 1.0 - 1.0 / dim,
initial: initial_simplex,
centroid: Vec::new(),
evaluating: None,
state: State::Initialize,
})
}
fn dim(&self) -> usize {
self.params_domain.len()
}
fn adjust(&self, x: Vec<f64>) -> Vec<f64> {
self.params_domain
.iter()
.zip(x.into_iter())
.map(|(p, v)| {
let v = p.low().max(v);
let mut v = (p.high() - std::f64::EPSILON).min(v);
for i in 2.. {
if (v - p.high()).abs() > EPSILON {
break;
}
v -= EPSILON * f64::from(i);
}
v
})
.collect()
}
fn initial_ask(&mut self) -> Vec<f64> {
self.initial.pop().unwrap_or_else(|| unreachable!())
}
fn initial_tell(&mut self, obs: Obs<Vec<f64>, V>) {
self.simplex.push(obs);
if self.simplex.len() == self.dim() + 1 {
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.update_centroid();
self.state = State::Reflect;
}
}
fn reflect_ask(&mut self) -> Vec<f64> {
self.centroid
.iter()
.zip(self.highest().param.iter())
.map(|(&x0, &xh)| x0 + self.alpha * (x0 - xh))
.collect()
}
fn reflect_tell(&mut self, obs: Obs<Vec<f64>, V>) {
if obs.value < self.lowest().value {
self.state = State::Expand(obs);
} else if obs.value < self.second_highest().value {
self.accept(obs);
} else if obs.value < self.highest().value {
self.state = State::ContractOutside(obs);
} else {
self.state = State::ContractInside(obs);
}
}
fn expand_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.beta * (x - c))
.collect()
}
fn expand_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if prev.value < curr.value {
self.accept(prev);
} else {
self.accept(curr);
}
}
fn contract_outside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.gamma * (x - c))
.collect()
}
fn contract_outside_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value <= prev.value {
self.accept(curr);
} else {
self.shrink();
}
}
fn contract_inside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c - self.gamma * (x - c))
.collect()
}
fn contract_inside_tell(&mut self, _prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value < self.highest().value {
self.accept(curr);
} else {
self.shrink();
}
}
fn shrink_ask(&mut self, index: usize) -> Vec<f64> {
self.lowest()
.param
.iter()
.zip(self.simplex[index].param.iter())
.map(|(&xl, &xi)| xl + self.delta * (xi - xl))
.collect()
}
fn shrink_tell(&mut self, obs: Obs<Vec<f64>, V>, index: usize) {
self.simplex[index] = obs;
if index < self.simplex.len() - 1 {
self.state = State::Shrink { index: index + 1 };
} else {
self.update_centroid();
self.state = State::Reflect;
}
}
fn accept(&mut self, obs: Obs<Vec<f64>, V>) {
// FIXME: optimize
self.simplex.push(obs);
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.simplex.pop();
self.update_centroid();
self.state = State::Reflect;
}
fn shrink(&mut self) {
self.state = State::Shrink { index: 1 };
}
fn lowest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[0]
}
fn second_highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 2]
}
fn highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 1]
}
fn update_centroid(&mut self) {
assert!(self.simplex.len() == self.dim() + 1);
// NOTE: We assume that `self.simplex` have been sorted by its values.
let n = self.dim();
let mut c = vec![f64::default(); n];
for t in self.simplex.iter().take(n) {
for (i, c) in c.iter_mut().enumerate() {
*c += t.param[i];
}
}
let n = n as f64;
for c in &mut c {
*c /= n;
}
self.centroid = c
}
}
impl<V> Optimizer for NelderMeadOptimizer<V>
where
V: Ord,
{
type Param = Vec<f64>;
type Value = V;
fn ask<R: Rng, G: IdGen>(&mut self, _rng: R, idg: G) -> Result<Obs<Self::Param>> {
track_assert!(self.evaluating.is_none(), ErrorKind::Other);
let x = match &self.state {
State::Initialize => self.initial_ask(),
State::Reflect => self.reflect_ask(),
State::Expand(prev) => {
let prev = prev.param.clone();
self.expand_ask(prev)
}
State::ContractOutside(prev) => {
let prev = prev.param.clone();
self.contract_outside_ask(prev)
}
State::ContractInside(prev
|
lderMeadOptimizer<V
|
identifier_name
|
|
nelder_mead.rs
|
<Obs<Vec<f64>, V>>,
alpha: f64,
beta: f64,
gamma: f64,
delta: f64,
initial: Vec<Vec<f64>>,
centroid: Vec<f64>,
evaluating: Option<ObsId>,
state: State<V>,
}
impl<V> NelderMeadOptimizer<V>
where
V: Ord,
{
/// Makes a new `NelderMeadOptimizer`.
pub fn new<R: Rng>(params_domain: Vec<ContinuousDomain>, mut rng: R) -> Result<Self> {
let point = params_domain
.iter()
.map(|p| p.sample(&mut rng))
.collect::<Vec<_>>();
track!(Self::with_initial_point(params_domain, &point))
}
/// Makes a new `NelderMeadOptimizer` which has the given search point.
pub fn with_initial_point(params_domain: Vec<ContinuousDomain>, point: &[f64]) -> Result<Self> {
let mut initial_simplex = vec![point.to_vec()];
for i in 0..params_domain.len() {
let tau = if point[i] == 0.0 { 0.00025 } else { 0.05 };
let x = point
.iter()
.enumerate()
.map(|(j, &x0)| if i == j { x0 + tau } else { x0 })
.collect();
initial_simplex.push(x);
}
track!(Self::with_initial_simplex(params_domain, initial_simplex))
}
/// Makes a new `NelderMeadOptimizer` with the given simplex.
pub fn with_initial_simplex(
params_domain: Vec<ContinuousDomain>,
initial_simplex: Vec<Vec<f64>>,
) -> Result<Self> {
track_assert!(
params_domain.len() >= 2,
ErrorKind::InvalidInput,
"Too few dimensions: {}",
params_domain.len()
);
track_assert_eq!(
params_domain.len() + 1,
initial_simplex.len(),
ErrorKind::InvalidInput
);
let dim = params_domain.len() as f64;
Ok(Self {
params_domain,
simplex: Vec::with_capacity(initial_simplex.len()),
alpha: 1.0,
beta: 1.0 + 2.0 / dim,
gamma: 0.75 - 1.0 / (2.0 * dim),
delta: 1.0 - 1.0 / dim,
initial: initial_simplex,
centroid: Vec::new(),
evaluating: None,
state: State::Initialize,
})
}
fn dim(&self) -> usize {
self.params_domain.len()
}
fn adjust(&self, x: Vec<f64>) -> Vec<f64> {
self.params_domain
.iter()
.zip(x.into_iter())
.map(|(p, v)| {
let v = p.low().max(v);
let mut v = (p.high() - std::f64::EPSILON).min(v);
for i in 2.. {
if (v - p.high()).abs() > EPSILON {
break;
}
v -= EPSILON * f64::from(i);
}
v
})
.collect()
}
fn initial_ask(&mut self) -> Vec<f64> {
self.initial.pop().unwrap_or_else(|| unreachable!())
}
fn initial_tell(&mut self, obs: Obs<Vec<f64>, V>) {
self.simplex.push(obs);
if self.simplex.len() == self.dim() + 1 {
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.update_centroid();
self.state = State::Reflect;
}
}
fn reflect_ask(&mut self) -> Vec<f64> {
self.centroid
.iter()
.zip(self.highest().param.iter())
.map(|(&x0, &xh)| x0 + self.alpha * (x0 - xh))
.collect()
}
fn reflect_tell(&mut self, obs: Obs<Vec<f64>, V>) {
if obs.value < self.lowest().value {
self.state = State::Expand(obs);
} else if obs.value < self.second_highest().value {
|
self.state = State::ContractInside(obs);
}
}
fn expand_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.beta * (x - c))
.collect()
}
fn expand_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if prev.value < curr.value {
self.accept(prev);
} else {
self.accept(curr);
}
}
fn contract_outside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.gamma * (x - c))
.collect()
}
fn contract_outside_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value <= prev.value {
self.accept(curr);
} else {
self.shrink();
}
}
fn contract_inside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c - self.gamma * (x - c))
.collect()
}
fn contract_inside_tell(&mut self, _prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value < self.highest().value {
self.accept(curr);
} else {
self.shrink();
}
}
fn shrink_ask(&mut self, index: usize) -> Vec<f64> {
self.lowest()
.param
.iter()
.zip(self.simplex[index].param.iter())
.map(|(&xl, &xi)| xl + self.delta * (xi - xl))
.collect()
}
fn shrink_tell(&mut self, obs: Obs<Vec<f64>, V>, index: usize) {
self.simplex[index] = obs;
if index < self.simplex.len() - 1 {
self.state = State::Shrink { index: index + 1 };
} else {
self.update_centroid();
self.state = State::Reflect;
}
}
fn accept(&mut self, obs: Obs<Vec<f64>, V>) {
// FIXME: optimize
self.simplex.push(obs);
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.simplex.pop();
self.update_centroid();
self.state = State::Reflect;
}
fn shrink(&mut self) {
self.state = State::Shrink { index: 1 };
}
fn lowest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[0]
}
fn second_highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 2]
}
fn highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 1]
}
fn update_centroid(&mut self) {
assert!(self.simplex.len() == self.dim() + 1);
// NOTE: We assume that `self.simplex` have been sorted by its values.
let n = self.dim();
let mut c = vec![f64::default(); n];
for t in self.simplex.iter().take(n) {
for (i, c) in c.iter_mut().enumerate() {
*c += t.param[i];
}
}
let n = n as f64;
for c in &mut c {
*c /= n;
}
self.centroid = c
}
}
impl<V> Optimizer for NelderMeadOptimizer<V>
where
V: Ord,
{
type Param = Vec<f64>;
type Value = V;
fn ask<R: Rng, G: IdGen>(&mut self, _rng: R, idg: G) -> Result<Obs<Self::Param>> {
track_assert!(self.evaluating.is_none(), ErrorKind::Other);
let x = match &self.state {
State::Initialize => self.initial_ask(),
State::Reflect => self.reflect_ask(),
State::Expand(prev) => {
let prev = prev.param.clone();
self.expand_ask(prev)
}
State::ContractOutside(prev) => {
let prev = prev.param.clone();
self.contract_outside_ask(prev)
}
State::ContractInside(prev) => {
let prev = prev.param.clone();
self.contract_inside_ask(prev)
}
|
self.accept(obs);
} else if obs.value < self.highest().value {
self.state = State::ContractOutside(obs);
} else {
|
random_line_split
|
from_module.go
|
.Diagnostics
// The way this function works is pretty ugly, but we accept it because
// -from-module is a less important case than normal module installation
// and so it's better to keep this ugly complexity out here rather than
// adding even more complexity to the normal module installer.
// The target directory must exist but be empty.
{
entries, err := ioutil.ReadDir(rootDir)
if err != nil {
if os.IsNotExist(err) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Target directory does not exist",
fmt.Sprintf("Cannot initialize non-existent directory %s.", rootDir),
))
} else {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to read target directory",
fmt.Sprintf("Error reading %s to ensure it is empty: %s.", rootDir, err),
))
}
return diags
}
haveEntries := false
for _, entry := range entries {
if entry.Name() == "." || entry.Name() == ".." || entry.Name() == ".terraform" {
continue
}
haveEntries = true
}
if haveEntries {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Can't populate non-empty directory",
fmt.Sprintf("The target directory %s is not empty, so it cannot be initialized with the -from-module=... option.", rootDir),
))
return diags
}
}
instDir := filepath.Join(rootDir, ".terraform/init-from-module")
inst := NewModuleInstaller(instDir, reg)
log.Printf("[DEBUG] installing modules in %s to initialize working directory from %q", instDir, sourceAddr)
os.RemoveAll(instDir) // if this fails then we'll fail on MkdirAll below too
err := os.MkdirAll(instDir, os.ModePerm)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to create temporary directory",
fmt.Sprintf("Failed to create temporary directory %s: %s.", instDir, err),
))
return diags
}
instManifest := make(modsdir.Manifest)
retManifest := make(modsdir.Manifest)
fakeFilename := fmt.Sprintf("-from-module=%q", sourceAddr)
fakePos := tfconfig.SourcePos{
Filename: fakeFilename,
Line: 1,
}
// -from-module allows relative paths but it's different than a normal
// module address where it'd be resolved relative to the module call
// (which is synthetic, here.) To address this, we'll just patch up any
// relative paths to be absolute paths before we run, ensuring we'll
// get the right result. This also, as an important side-effect, ensures
// that the result will be "downloaded" with go-getter (copied from the
// source location), rather than just recorded as a relative path.
{
maybePath := filepath.ToSlash(sourceAddr)
if maybePath == "." || strings.HasPrefix(maybePath, "./") || strings.HasPrefix(maybePath, "../") {
if wd, err := os.Getwd(); err == nil {
sourceAddr = filepath.Join(wd, sourceAddr)
log.Printf("[TRACE] -from-module relative path rewritten to absolute path %s", sourceAddr)
}
}
}
// Now we need to create an artificial root module that will seed our
// installation process.
fakeRootModule := &tfconfig.Module{
ModuleCalls: map[string]*tfconfig.ModuleCall{
initFromModuleRootCallName: {
Name: initFromModuleRootCallName,
Source: sourceAddr,
Pos: fakePos,
},
},
}
// wrapHooks filters hook notifications to only include Download calls
// and to trim off the initFromModuleRootCallName prefix. We'll produce
// our own Install notifications directly below.
wrapHooks := installHooksInitDir{
Wrapped: hooks,
}
getter := reusingGetter{}
_, instDiags := inst.installDescendentModules(fakeRootModule, rootDir, instManifest, true, wrapHooks, getter)
diags = append(diags, instDiags...)
if instDiags.HasErrors() {
return diags
}
// If all of that succeeded then we'll now migrate what was installed
|
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to create local modules directory",
fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err),
))
return diags
}
recordKeys := make([]string, 0, len(instManifest))
for k := range instManifest {
recordKeys = append(recordKeys, k)
}
sort.Strings(recordKeys)
for _, recordKey := range recordKeys {
record := instManifest[recordKey]
if record.Key == initFromModuleRootCallName {
// We've found the module the user requested, which we must
// now copy into rootDir so it can be used directly.
log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir)
err := copyDir(rootDir, record.Dir)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to copy root module",
fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err),
))
continue
}
// We'll try to load the newly-copied module here just so we can
// sniff for any module calls that ../ out of the root directory
// and must thus be rewritten to be absolute addresses again.
// For now we can't do this rewriting automatically, but we'll
// generate an error to help the user do it manually.
mod, _ := earlyconfig.LoadModule(rootDir) // ignore diagnostics since we're just doing value-add here anyway
if mod != nil {
for _, mc := range mod.ModuleCalls {
if pathTraversesUp(mc.Source) {
packageAddr, givenSubdir := splitAddrSubdir(sourceAddr)
newSubdir := filepath.Join(givenSubdir, mc.Source)
if pathTraversesUp(newSubdir) {
// This should never happen in any reasonable
// configuration since this suggests a path that
// traverses up out of the package root. We'll just
// ignore this, since we'll fail soon enough anyway
// trying to resolve this path when this module is
// loaded.
continue
}
var newAddr = packageAddr
if newSubdir != "" {
newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir))
}
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Root module references parent directory",
fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr),
))
continue
}
}
}
retManifest[""] = modsdir.Record{
Key: "",
Dir: rootDir,
}
continue
}
if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) {
// Ignore the *real* root module, whose key is empty, since
// we're only interested in the module named "root" and its
// descendents.
continue
}
newKey := record.Key[len(initFromModuleRootKeyPrefix):]
instPath := filepath.Join(modulesDir, newKey)
tempPath := filepath.Join(instDir, record.Key)
// tempPath won't be present for a module that was installed from
// a relative path, so in that case we just record the installation
// directory and assume it was already copied into place as part
// of its parent.
if _, err := os.Stat(tempPath); err != nil {
if !os.IsNotExist(err) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to stat temporary module install directory",
fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err),
))
continue
}
var parentKey string
if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 {
parentKey = newKey[:lastDot]
} else {
parentKey = "" // parent is the root module
}
parentOld := instManifest[initFromModuleRootKeyPrefix+parentKey]
parentNew := retManifest[parentKey]
|
// into the final directory structure.
err = os.MkdirAll(modulesDir, os.ModePerm)
|
random_line_split
|
from_module.go
|
own Install notifications directly below.
wrapHooks := installHooksInitDir{
Wrapped: hooks,
}
getter := reusingGetter{}
_, instDiags := inst.installDescendentModules(fakeRootModule, rootDir, instManifest, true, wrapHooks, getter)
diags = append(diags, instDiags...)
if instDiags.HasErrors() {
return diags
}
// If all of that succeeded then we'll now migrate what was installed
// into the final directory structure.
err = os.MkdirAll(modulesDir, os.ModePerm)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to create local modules directory",
fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err),
))
return diags
}
recordKeys := make([]string, 0, len(instManifest))
for k := range instManifest {
recordKeys = append(recordKeys, k)
}
sort.Strings(recordKeys)
for _, recordKey := range recordKeys {
record := instManifest[recordKey]
if record.Key == initFromModuleRootCallName {
// We've found the module the user requested, which we must
// now copy into rootDir so it can be used directly.
log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir)
err := copyDir(rootDir, record.Dir)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to copy root module",
fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err),
))
continue
}
// We'll try to load the newly-copied module here just so we can
// sniff for any module calls that ../ out of the root directory
// and must thus be rewritten to be absolute addresses again.
// For now we can't do this rewriting automatically, but we'll
// generate an error to help the user do it manually.
mod, _ := earlyconfig.LoadModule(rootDir) // ignore diagnostics since we're just doing value-add here anyway
if mod != nil {
for _, mc := range mod.ModuleCalls {
if pathTraversesUp(mc.Source) {
packageAddr, givenSubdir := splitAddrSubdir(sourceAddr)
newSubdir := filepath.Join(givenSubdir, mc.Source)
if pathTraversesUp(newSubdir) {
// This should never happen in any reasonable
// configuration since this suggests a path that
// traverses up out of the package root. We'll just
// ignore this, since we'll fail soon enough anyway
// trying to resolve this path when this module is
// loaded.
continue
}
var newAddr = packageAddr
if newSubdir != "" {
newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir))
}
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Root module references parent directory",
fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr),
))
continue
}
}
}
retManifest[""] = modsdir.Record{
Key: "",
Dir: rootDir,
}
continue
}
if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) {
// Ignore the *real* root module, whose key is empty, since
// we're only interested in the module named "root" and its
// descendents.
continue
}
newKey := record.Key[len(initFromModuleRootKeyPrefix):]
instPath := filepath.Join(modulesDir, newKey)
tempPath := filepath.Join(instDir, record.Key)
// tempPath won't be present for a module that was installed from
// a relative path, so in that case we just record the installation
// directory and assume it was already copied into place as part
// of its parent.
if _, err := os.Stat(tempPath); err != nil {
if !os.IsNotExist(err) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to stat temporary module install directory",
fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err),
))
continue
}
var parentKey string
if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 {
parentKey = newKey[:lastDot]
} else {
parentKey = "" // parent is the root module
}
parentOld := instManifest[initFromModuleRootKeyPrefix+parentKey]
parentNew := retManifest[parentKey]
// We need to figure out which portion of our directory is the
// parent package path and which portion is the subdirectory
// under that.
baseDirRel, err := filepath.Rel(parentOld.Dir, record.Dir)
if err != nil {
// Should never happen, because we constructed both directories
// from the same base and so they must have a common prefix.
panic(err)
}
newDir := filepath.Join(parentNew.Dir, baseDirRel)
log.Printf("[TRACE] relative reference for %s rewritten from %s to %s", newKey, record.Dir, newDir)
newRecord := record // shallow copy
newRecord.Dir = newDir
newRecord.Key = newKey
retManifest[newKey] = newRecord
hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir)
continue
}
err = os.MkdirAll(instPath, os.ModePerm)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to create module install directory",
fmt.Sprintf("Error creating directory %s for module %s: %s.", instPath, newKey, err),
))
continue
}
// We copy rather than "rename" here because renaming between directories
// can be tricky in edge-cases like network filesystems, etc.
log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath)
err := copyDir(instPath, tempPath)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to copy descendent module",
fmt.Sprintf("Error copying module %q from %s to %s: %s.", newKey, tempPath, rootDir, err),
))
continue
}
subDir, err := filepath.Rel(tempPath, record.Dir)
if err != nil {
// Should never happen, because we constructed both directories
// from the same base and so they must have a common prefix.
panic(err)
}
newRecord := record // shallow copy
newRecord.Dir = filepath.Join(instPath, subDir)
newRecord.Key = newKey
retManifest[newKey] = newRecord
hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir)
}
retManifest.WriteSnapshotToDir(modulesDir)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to write module manifest",
fmt.Sprintf("Error writing module manifest: %s.", err),
))
}
if !diags.HasErrors() {
// Try to clean up our temporary directory, but don't worry if we don't
// succeed since it shouldn't hurt anything.
os.RemoveAll(instDir)
}
return diags
}
func pathTraversesUp(path string) bool {
return strings.HasPrefix(filepath.ToSlash(path), "../")
}
// installHooksInitDir is an adapter wrapper for an InstallHooks that
// does some fakery to make downloads look like they are happening in their
// final locations, rather than in the temporary loader we use.
//
// It also suppresses "Install" calls entirely, since InitDirFromModule
// does its own installation steps after the initial installation pass
// has completed.
type installHooksInitDir struct {
Wrapped ModuleInstallHooks
ModuleInstallHooksImpl
}
func (h installHooksInitDir) Download(moduleAddr, packageAddr string, version *version.Version)
|
{
if !strings.HasPrefix(moduleAddr, initFromModuleRootKeyPrefix) {
// We won't announce the root module, since hook implementations
// don't expect to see that and the caller will usually have produced
// its own user-facing notification about what it's doing anyway.
return
}
trimAddr := moduleAddr[len(initFromModuleRootKeyPrefix):]
h.Wrapped.Download(trimAddr, packageAddr, version)
}
|
identifier_body
|
|
from_module.go
|
our
// installation process.
fakeRootModule := &tfconfig.Module{
ModuleCalls: map[string]*tfconfig.ModuleCall{
initFromModuleRootCallName: {
Name: initFromModuleRootCallName,
Source: sourceAddr,
Pos: fakePos,
},
},
}
// wrapHooks filters hook notifications to only include Download calls
// and to trim off the initFromModuleRootCallName prefix. We'll produce
// our own Install notifications directly below.
wrapHooks := installHooksInitDir{
Wrapped: hooks,
}
getter := reusingGetter{}
_, instDiags := inst.installDescendentModules(fakeRootModule, rootDir, instManifest, true, wrapHooks, getter)
diags = append(diags, instDiags...)
if instDiags.HasErrors() {
return diags
}
// If all of that succeeded then we'll now migrate what was installed
// into the final directory structure.
err = os.MkdirAll(modulesDir, os.ModePerm)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to create local modules directory",
fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err),
))
return diags
}
recordKeys := make([]string, 0, len(instManifest))
for k := range instManifest {
recordKeys = append(recordKeys, k)
}
sort.Strings(recordKeys)
for _, recordKey := range recordKeys {
record := instManifest[recordKey]
if record.Key == initFromModuleRootCallName {
// We've found the module the user requested, which we must
// now copy into rootDir so it can be used directly.
log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir)
err := copyDir(rootDir, record.Dir)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to copy root module",
fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err),
))
continue
}
// We'll try to load the newly-copied module here just so we can
// sniff for any module calls that ../ out of the root directory
// and must thus be rewritten to be absolute addresses again.
// For now we can't do this rewriting automatically, but we'll
// generate an error to help the user do it manually.
mod, _ := earlyconfig.LoadModule(rootDir) // ignore diagnostics since we're just doing value-add here anyway
if mod != nil {
for _, mc := range mod.ModuleCalls {
if pathTraversesUp(mc.Source) {
packageAddr, givenSubdir := splitAddrSubdir(sourceAddr)
newSubdir := filepath.Join(givenSubdir, mc.Source)
if pathTraversesUp(newSubdir) {
// This should never happen in any reasonable
// configuration since this suggests a path that
// traverses up out of the package root. We'll just
// ignore this, since we'll fail soon enough anyway
// trying to resolve this path when this module is
// loaded.
continue
}
var newAddr = packageAddr
if newSubdir != "" {
newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir))
}
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Root module references parent directory",
fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr),
))
continue
}
}
}
retManifest[""] = modsdir.Record{
Key: "",
Dir: rootDir,
}
continue
}
if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) {
// Ignore the *real* root module, whose key is empty, since
// we're only interested in the module named "root" and its
// descendents.
continue
}
newKey := record.Key[len(initFromModuleRootKeyPrefix):]
instPath := filepath.Join(modulesDir, newKey)
tempPath := filepath.Join(instDir, record.Key)
// tempPath won't be present for a module that was installed from
// a relative path, so in that case we just record the installation
// directory and assume it was already copied into place as part
// of its parent.
if _, err := os.Stat(tempPath); err != nil {
if !os.IsNotExist(err) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to stat temporary module install directory",
fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err),
))
continue
}
var parentKey string
if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 {
parentKey = newKey[:lastDot]
} else {
parentKey = "" // parent is the root module
}
parentOld := instManifest[initFromModuleRootKeyPrefix+parentKey]
parentNew := retManifest[parentKey]
// We need to figure out which portion of our directory is the
// parent package path and which portion is the subdirectory
// under that.
baseDirRel, err := filepath.Rel(parentOld.Dir, record.Dir)
if err != nil {
// Should never happen, because we constructed both directories
// from the same base and so they must have a common prefix.
panic(err)
}
newDir := filepath.Join(parentNew.Dir, baseDirRel)
log.Printf("[TRACE] relative reference for %s rewritten from %s to %s", newKey, record.Dir, newDir)
newRecord := record // shallow copy
newRecord.Dir = newDir
newRecord.Key = newKey
retManifest[newKey] = newRecord
hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir)
continue
}
err = os.MkdirAll(instPath, os.ModePerm)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to create module install directory",
fmt.Sprintf("Error creating directory %s for module %s: %s.", instPath, newKey, err),
))
continue
}
// We copy rather than "rename" here because renaming between directories
// can be tricky in edge-cases like network filesystems, etc.
log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath)
err := copyDir(instPath, tempPath)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to copy descendent module",
fmt.Sprintf("Error copying module %q from %s to %s: %s.", newKey, tempPath, rootDir, err),
))
continue
}
subDir, err := filepath.Rel(tempPath, record.Dir)
if err != nil {
// Should never happen, because we constructed both directories
// from the same base and so they must have a common prefix.
panic(err)
}
newRecord := record // shallow copy
newRecord.Dir = filepath.Join(instPath, subDir)
newRecord.Key = newKey
retManifest[newKey] = newRecord
hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir)
}
retManifest.WriteSnapshotToDir(modulesDir)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to write module manifest",
fmt.Sprintf("Error writing module manifest: %s.", err),
))
}
if !diags.HasErrors() {
// Try to clean up our temporary directory, but don't worry if we don't
// succeed since it shouldn't hurt anything.
os.RemoveAll(instDir)
}
return diags
}
func pathTraversesUp(path string) bool {
return strings.HasPrefix(filepath.ToSlash(path), "../")
}
// installHooksInitDir is an adapter wrapper for an InstallHooks that
// does some fakery to make downloads look like they are happening in their
// final locations, rather than in the temporary loader we use.
//
// It also suppresses "Install" calls entirely, since InitDirFromModule
// does its own installation steps after the initial installation pass
// has completed.
type installHooksInitDir struct {
Wrapped ModuleInstallHooks
ModuleInstallHooksImpl
}
func (h installHooksInitDir)
|
Download
|
identifier_name
|
|
from_module.go
|
.Diagnostics
// The way this function works is pretty ugly, but we accept it because
// -from-module is a less important case than normal module installation
// and so it's better to keep this ugly complexity out here rather than
// adding even more complexity to the normal module installer.
// The target directory must exist but be empty.
{
entries, err := ioutil.ReadDir(rootDir)
if err != nil {
if os.IsNotExist(err) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Target directory does not exist",
fmt.Sprintf("Cannot initialize non-existent directory %s.", rootDir),
))
} else {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to read target directory",
fmt.Sprintf("Error reading %s to ensure it is empty: %s.", rootDir, err),
))
}
return diags
}
haveEntries := false
for _, entry := range entries {
if entry.Name() == "." || entry.Name() == ".." || entry.Name() == ".terraform" {
continue
}
haveEntries = true
}
if haveEntries {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Can't populate non-empty directory",
fmt.Sprintf("The target directory %s is not empty, so it cannot be initialized with the -from-module=... option.", rootDir),
))
return diags
}
}
instDir := filepath.Join(rootDir, ".terraform/init-from-module")
inst := NewModuleInstaller(instDir, reg)
log.Printf("[DEBUG] installing modules in %s to initialize working directory from %q", instDir, sourceAddr)
os.RemoveAll(instDir) // if this fails then we'll fail on MkdirAll below too
err := os.MkdirAll(instDir, os.ModePerm)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to create temporary directory",
fmt.Sprintf("Failed to create temporary directory %s: %s.", instDir, err),
))
return diags
}
instManifest := make(modsdir.Manifest)
retManifest := make(modsdir.Manifest)
fakeFilename := fmt.Sprintf("-from-module=%q", sourceAddr)
fakePos := tfconfig.SourcePos{
Filename: fakeFilename,
Line: 1,
}
// -from-module allows relative paths but it's different than a normal
// module address where it'd be resolved relative to the module call
// (which is synthetic, here.) To address this, we'll just patch up any
// relative paths to be absolute paths before we run, ensuring we'll
// get the right result. This also, as an important side-effect, ensures
// that the result will be "downloaded" with go-getter (copied from the
// source location), rather than just recorded as a relative path.
{
maybePath := filepath.ToSlash(sourceAddr)
if maybePath == "." || strings.HasPrefix(maybePath, "./") || strings.HasPrefix(maybePath, "../") {
if wd, err := os.Getwd(); err == nil {
sourceAddr = filepath.Join(wd, sourceAddr)
log.Printf("[TRACE] -from-module relative path rewritten to absolute path %s", sourceAddr)
}
}
}
// Now we need to create an artificial root module that will seed our
// installation process.
fakeRootModule := &tfconfig.Module{
ModuleCalls: map[string]*tfconfig.ModuleCall{
initFromModuleRootCallName: {
Name: initFromModuleRootCallName,
Source: sourceAddr,
Pos: fakePos,
},
},
}
// wrapHooks filters hook notifications to only include Download calls
// and to trim off the initFromModuleRootCallName prefix. We'll produce
// our own Install notifications directly below.
wrapHooks := installHooksInitDir{
Wrapped: hooks,
}
getter := reusingGetter{}
_, instDiags := inst.installDescendentModules(fakeRootModule, rootDir, instManifest, true, wrapHooks, getter)
diags = append(diags, instDiags...)
if instDiags.HasErrors() {
return diags
}
// If all of that succeeded then we'll now migrate what was installed
// into the final directory structure.
err = os.MkdirAll(modulesDir, os.ModePerm)
if err != nil
|
recordKeys := make([]string, 0, len(instManifest))
for k := range instManifest {
recordKeys = append(recordKeys, k)
}
sort.Strings(recordKeys)
for _, recordKey := range recordKeys {
record := instManifest[recordKey]
if record.Key == initFromModuleRootCallName {
// We've found the module the user requested, which we must
// now copy into rootDir so it can be used directly.
log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir)
err := copyDir(rootDir, record.Dir)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to copy root module",
fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err),
))
continue
}
// We'll try to load the newly-copied module here just so we can
// sniff for any module calls that ../ out of the root directory
// and must thus be rewritten to be absolute addresses again.
// For now we can't do this rewriting automatically, but we'll
// generate an error to help the user do it manually.
mod, _ := earlyconfig.LoadModule(rootDir) // ignore diagnostics since we're just doing value-add here anyway
if mod != nil {
for _, mc := range mod.ModuleCalls {
if pathTraversesUp(mc.Source) {
packageAddr, givenSubdir := splitAddrSubdir(sourceAddr)
newSubdir := filepath.Join(givenSubdir, mc.Source)
if pathTraversesUp(newSubdir) {
// This should never happen in any reasonable
// configuration since this suggests a path that
// traverses up out of the package root. We'll just
// ignore this, since we'll fail soon enough anyway
// trying to resolve this path when this module is
// loaded.
continue
}
var newAddr = packageAddr
if newSubdir != "" {
newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir))
}
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Root module references parent directory",
fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr),
))
continue
}
}
}
retManifest[""] = modsdir.Record{
Key: "",
Dir: rootDir,
}
continue
}
if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) {
// Ignore the *real* root module, whose key is empty, since
// we're only interested in the module named "root" and its
// descendents.
continue
}
newKey := record.Key[len(initFromModuleRootKeyPrefix):]
instPath := filepath.Join(modulesDir, newKey)
tempPath := filepath.Join(instDir, record.Key)
// tempPath won't be present for a module that was installed from
// a relative path, so in that case we just record the installation
// directory and assume it was already copied into place as part
// of its parent.
if _, err := os.Stat(tempPath); err != nil {
if !os.IsNotExist(err) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to stat temporary module install directory",
fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err),
))
continue
}
var parentKey string
if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 {
parentKey = newKey[:lastDot]
} else {
parentKey = "" // parent is the root module
}
parentOld := instManifest[initFromModuleRootKeyPrefix+parentKey]
parentNew := retManifest[parentKey
|
{
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to create local modules directory",
fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err),
))
return diags
}
|
conditional_block
|
proxyserver.go
|
(servers []types.DatabaseServer) []types.DatabaseServer {
sort.Sort(types.DatabaseServers(servers))
return servers
}
var (
// mu protects the shuffleFunc global access.
mu sync.RWMutex
// shuffleFunc provides shuffle behavior for multiple database agents.
shuffleFunc ShuffleFunc = ShuffleRandom
)
// SetShuffleFunc sets the shuffle behavior when proxying to multiple agents.
func SetShuffleFunc(fn ShuffleFunc) {
mu.Lock()
defer mu.Unlock()
shuffleFunc = fn
}
// getShuffleFunc returns the configured function used to shuffle agents.
func getShuffleFunc() ShuffleFunc {
mu.RLock()
defer mu.RUnlock()
return shuffleFunc
}
// CheckAndSetDefaults validates the config and sets default values.
func (c *ProxyServerConfig) CheckAndSetDefaults() error {
if c.AccessPoint == nil {
return trace.BadParameter("missing AccessPoint")
}
if c.AuthClient == nil {
return trace.BadParameter("missing AuthClient")
}
if c.Authorizer == nil {
return trace.BadParameter("missing Authorizer")
}
if c.Tunnel == nil {
return trace.BadParameter("missing Tunnel")
}
if c.TLSConfig == nil {
return trace.BadParameter("missing TLSConfig")
}
if c.Clock == nil {
c.Clock = clockwork.NewRealClock()
}
if c.ServerID == "" {
return trace.BadParameter("missing ServerID")
}
if c.LockWatcher == nil {
return trace.BadParameter("missing LockWatcher")
}
if c.Limiter == nil {
// Empty config means no connection limit.
connLimiter, err := limiter.NewLimiter(limiter.Config{})
if err != nil {
return trace.Wrap(err)
}
c.Limiter = connLimiter
}
return nil
}
// NewProxyServer creates a new instance of the database proxy server.
func NewProxyServer(ctx context.Context, config ProxyServerConfig) (*ProxyServer, error) {
if err := config.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
server := &ProxyServer{
cfg: config,
middleware: &auth.Middleware{
AccessPoint: config.AccessPoint,
AcceptedUsage: []string{teleport.UsageDatabaseOnly},
},
closeCtx: ctx,
log: logrus.WithField(trace.Component, "db:proxy"),
}
server.cfg.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
server.cfg.TLSConfig.GetConfigForClient = getConfigForClient(
server.cfg.TLSConfig, server.cfg.AccessPoint, server.log, types.UserCA)
return server, nil
}
// ServePostgres starts accepting Postgres connections from the provided listener.
func (s *ProxyServer) ServePostgres(listener net.Listener) error {
s.log.Debug("Started database proxy.")
defer s.log.Debug("Database proxy exited.")
for {
// Accept the connection from the database client, such as psql.
// The connection is expected to come through via multiplexer.
clientConn, err := listener.Accept()
if err != nil {
if utils.IsOKNetworkError(err) || trace.IsConnectionProblem(err) {
return nil
}
return trace.Wrap(err)
}
// Let the appropriate proxy handle the connection and go back
// to listening.
go func() {
defer clientConn.Close()
err := s.PostgresProxy().HandleConnection(s.closeCtx, clientConn)
if err != nil && !utils.IsOKNetworkError(err) {
s.log.WithError(err).Warn("Failed to handle Postgres client connection.")
}
}()
}
}
// ServeMySQL starts accepting MySQL client connections.
func (s *ProxyServer) ServeMySQL(listener net.Listener) error {
s.log.Debug("Started MySQL proxy.")
defer s.log.Debug("MySQL proxy exited.")
for {
// Accept the connection from a MySQL client.
clientConn, err := listener.Accept()
if err != nil {
if utils.IsOKNetworkError(err) || trace.IsConnectionProblem(err) {
return nil
}
return trace.Wrap(err)
}
// Pass over to the MySQL proxy handler.
go func() {
defer clientConn.Close()
err := s.MySQLProxy().HandleConnection(s.closeCtx, clientConn)
if err != nil && !utils.IsOKNetworkError(err) {
s.log.WithError(err).Error("Failed to handle MySQL client connection.")
}
}()
}
}
// ServeMongo starts accepting Mongo client connections.
func (s *ProxyServer) ServeMongo(listener net.Listener, tlsConfig *tls.Config) error {
return s.serveGenericTLS(listener, tlsConfig, defaults.ProtocolMongoDB)
}
// serveGenericTLS starts accepting a plain TLS database client connection.
// dbName is used only for logging purposes.
func (s *ProxyServer) serveGenericTLS(listener net.Listener, tlsConfig *tls.Config, dbName string) error {
s.log.Debugf("Started %s proxy.", dbName)
defer s.log.Debugf("%s proxy exited.", dbName)
for {
clientConn, err := listener.Accept()
if err != nil {
if utils.IsOKNetworkError(err) || trace.IsConnectionProblem(err) {
return nil
}
return trace.Wrap(err)
}
go func() {
defer clientConn.Close()
tlsConn := tls.Server(clientConn, tlsConfig)
if err := tlsConn.Handshake(); err != nil {
if !utils.IsOKNetworkError(err) {
s.log.WithError(err).Errorf("%s TLS handshake failed.", dbName)
}
return
}
err := s.handleConnection(tlsConn)
if err != nil {
s.log.WithError(err).Errorf("Failed to handle %s client connection.", dbName)
}
}()
}
}
// ServeTLS starts accepting database connections that use plain TLS connection.
func (s *ProxyServer) ServeTLS(listener net.Listener) error {
s.log.Debug("Started database TLS proxy.")
defer s.log.Debug("Database TLS proxy exited.")
for {
clientConn, err := listener.Accept()
if err != nil {
if utils.IsOKNetworkError(err) || trace.IsConnectionProblem(err) {
return nil
}
return trace.Wrap(err)
}
go func() {
defer clientConn.Close()
err := s.handleConnection(clientConn)
if err != nil {
s.log.WithError(err).Error("Failed to handle database TLS connection.")
}
}()
}
}
func (s *ProxyServer) handleConnection(conn net.Conn) error {
s.log.Debugf("Accepted TLS database connection from %v.", conn.RemoteAddr())
tlsConn, ok := conn.(*tls.Conn)
if !ok {
return trace.BadParameter("expected *tls.Conn, got %T", conn)
}
clientIP, err := utils.ClientIPFromConn(conn)
if err != nil {
return trace.Wrap(err)
}
// Apply connection and rate limiting.
release, err := s.cfg.Limiter.RegisterRequestAndConnection(clientIP)
if err != nil {
return trace.Wrap(err)
}
defer release()
proxyCtx, err := s.Authorize(s.closeCtx, tlsConn, common.ConnectParams{
ClientIP: clientIP,
})
if err != nil {
return trace.Wrap(err)
}
switch proxyCtx.Identity.RouteToDatabase.Protocol {
case defaults.ProtocolPostgres, defaults.ProtocolCockroachDB:
return s.PostgresProxyNoTLS().HandleConnection(s.closeCtx, tlsConn)
case defaults.ProtocolMySQL:
version := getMySQLVersionFromServer(proxyCtx.Servers)
// Set the version in the context to match a behavior in other handlers.
ctx := context.WithValue(s.closeCtx, dbutils.ContextMySQLServerVersion, version)
return s.MySQLProxyNoTLS().HandleConnection(ctx, tlsConn)
case defaults.ProtocolSQLServer:
return s.SQLServerProxy().HandleConnection(s.closeCtx, proxyCtx, tlsConn)
}
serviceConn, err := s.Connect(s.closeCtx, proxyCtx)
if err != nil {
return trace.Wrap(err)
}
defer serviceConn.Close()
err = s.Proxy(s.closeCtx, proxyCtx, tlsConn, serviceConn)
if err != nil {
return trace.Wrap(err)
}
return nil
}
// getMySQLVersionFromServer returns the MySQL version returned by an instance on last connection or
// the MySQL.ServerVersion set in configuration if the first one is not available.
// Function picks a random server each time if more than one are available.
func getMySQLVersionFromServer(servers []types.DatabaseServer) string {
count := len(servers)
db := servers[rand.Intn(count)].GetDatabase()
return db.GetMySQLServerVersion()
}
// PostgresProxy returns a new instance of the Postgres protocol aware proxy.
func (s *ProxyServer) PostgresProxy() *postgres.Proxy {
return &postgres.Proxy{
TLSConfig: s.cfg.TLSConfig,
Middleware: s.middleware,
Service: s,
Limiter: s.cfg.Limiter,
Log: s.log,
}
}
// PostgresProxyNoTLS returns a new instance of the non-TLS Postgres proxy.
func (s *ProxyServer) PostgresProxyNoTLS() *postgres.Proxy {
return &postgres.Proxy{
Middleware: s.middleware,
Service: s,
Limiter: s.cfg.Limiter,
Log: s.log,
}
}
// MySQLProxy returns a new instance of the MySQL protocol aware proxy.
func (s *ProxyServer) MySQLProxy() *mysql.Proxy
|
ShuffleSort
|
identifier_name
|
|
proxyserver.go
|
return trace.BadParameter("missing AuthClient")
}
if c.Authorizer == nil {
return trace.BadParameter("missing Authorizer")
}
if c.Tunnel == nil {
return trace.BadParameter("missing Tunnel")
}
if c.TLSConfig == nil {
return trace.BadParameter("missing TLSConfig")
}
if c.Clock == nil {
c.Clock = clockwork.NewRealClock()
}
if c.ServerID == "" {
return trace.BadParameter("missing ServerID")
}
if c.LockWatcher == nil {
return trace.BadParameter("missing LockWatcher")
}
if c.Limiter == nil {
// Empty config means no connection limit.
connLimiter, err := limiter.NewLimiter(limiter.Config{})
if err != nil {
return trace.Wrap(err)
}
c.Limiter = connLimiter
}
return nil
}
// NewProxyServer creates a new instance of the database proxy server.
func NewProxyServer(ctx context.Context, config ProxyServerConfig) (*ProxyServer, error) {
if err := config.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
server := &ProxyServer{
cfg: config,
middleware: &auth.Middleware{
AccessPoint: config.AccessPoint,
AcceptedUsage: []string{teleport.UsageDatabaseOnly},
},
closeCtx: ctx,
log: logrus.WithField(trace.Component, "db:proxy"),
}
server.cfg.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
server.cfg.TLSConfig.GetConfigForClient = getConfigForClient(
server.cfg.TLSConfig, server.cfg.AccessPoint, server.log, types.UserCA)
return server, nil
}
// ServePostgres starts accepting Postgres connections from the provided listener.
func (s *ProxyServer) ServePostgres(listener net.Listener) error {
s.log.Debug("Started database proxy.")
defer s.log.Debug("Database proxy exited.")
for {
// Accept the connection from the database client, such as psql.
// The connection is expected to come through via multiplexer.
clientConn, err := listener.Accept()
if err != nil {
if utils.IsOKNetworkError(err) || trace.IsConnectionProblem(err)
|
return trace.Wrap(err)
}
// Let the appropriate proxy handle the connection and go back
// to listening.
go func() {
defer clientConn.Close()
err := s.PostgresProxy().HandleConnection(s.closeCtx, clientConn)
if err != nil && !utils.IsOKNetworkError(err) {
s.log.WithError(err).Warn("Failed to handle Postgres client connection.")
}
}()
}
}
// ServeMySQL starts accepting MySQL client connections.
func (s *ProxyServer) ServeMySQL(listener net.Listener) error {
s.log.Debug("Started MySQL proxy.")
defer s.log.Debug("MySQL proxy exited.")
for {
// Accept the connection from a MySQL client.
clientConn, err := listener.Accept()
if err != nil {
if utils.IsOKNetworkError(err) || trace.IsConnectionProblem(err) {
return nil
}
return trace.Wrap(err)
}
// Pass over to the MySQL proxy handler.
go func() {
defer clientConn.Close()
err := s.MySQLProxy().HandleConnection(s.closeCtx, clientConn)
if err != nil && !utils.IsOKNetworkError(err) {
s.log.WithError(err).Error("Failed to handle MySQL client connection.")
}
}()
}
}
// ServeMongo starts accepting Mongo client connections.
func (s *ProxyServer) ServeMongo(listener net.Listener, tlsConfig *tls.Config) error {
return s.serveGenericTLS(listener, tlsConfig, defaults.ProtocolMongoDB)
}
// serveGenericTLS starts accepting a plain TLS database client connection.
// dbName is used only for logging purposes.
func (s *ProxyServer) serveGenericTLS(listener net.Listener, tlsConfig *tls.Config, dbName string) error {
s.log.Debugf("Started %s proxy.", dbName)
defer s.log.Debugf("%s proxy exited.", dbName)
for {
clientConn, err := listener.Accept()
if err != nil {
if utils.IsOKNetworkError(err) || trace.IsConnectionProblem(err) {
return nil
}
return trace.Wrap(err)
}
go func() {
defer clientConn.Close()
tlsConn := tls.Server(clientConn, tlsConfig)
if err := tlsConn.Handshake(); err != nil {
if !utils.IsOKNetworkError(err) {
s.log.WithError(err).Errorf("%s TLS handshake failed.", dbName)
}
return
}
err := s.handleConnection(tlsConn)
if err != nil {
s.log.WithError(err).Errorf("Failed to handle %s client connection.", dbName)
}
}()
}
}
// ServeTLS starts accepting database connections that use plain TLS connection.
func (s *ProxyServer) ServeTLS(listener net.Listener) error {
s.log.Debug("Started database TLS proxy.")
defer s.log.Debug("Database TLS proxy exited.")
for {
clientConn, err := listener.Accept()
if err != nil {
if utils.IsOKNetworkError(err) || trace.IsConnectionProblem(err) {
return nil
}
return trace.Wrap(err)
}
go func() {
defer clientConn.Close()
err := s.handleConnection(clientConn)
if err != nil {
s.log.WithError(err).Error("Failed to handle database TLS connection.")
}
}()
}
}
func (s *ProxyServer) handleConnection(conn net.Conn) error {
s.log.Debugf("Accepted TLS database connection from %v.", conn.RemoteAddr())
tlsConn, ok := conn.(*tls.Conn)
if !ok {
return trace.BadParameter("expected *tls.Conn, got %T", conn)
}
clientIP, err := utils.ClientIPFromConn(conn)
if err != nil {
return trace.Wrap(err)
}
// Apply connection and rate limiting.
release, err := s.cfg.Limiter.RegisterRequestAndConnection(clientIP)
if err != nil {
return trace.Wrap(err)
}
defer release()
proxyCtx, err := s.Authorize(s.closeCtx, tlsConn, common.ConnectParams{
ClientIP: clientIP,
})
if err != nil {
return trace.Wrap(err)
}
switch proxyCtx.Identity.RouteToDatabase.Protocol {
case defaults.ProtocolPostgres, defaults.ProtocolCockroachDB:
return s.PostgresProxyNoTLS().HandleConnection(s.closeCtx, tlsConn)
case defaults.ProtocolMySQL:
version := getMySQLVersionFromServer(proxyCtx.Servers)
// Set the version in the context to match a behavior in other handlers.
ctx := context.WithValue(s.closeCtx, dbutils.ContextMySQLServerVersion, version)
return s.MySQLProxyNoTLS().HandleConnection(ctx, tlsConn)
case defaults.ProtocolSQLServer:
return s.SQLServerProxy().HandleConnection(s.closeCtx, proxyCtx, tlsConn)
}
serviceConn, err := s.Connect(s.closeCtx, proxyCtx)
if err != nil {
return trace.Wrap(err)
}
defer serviceConn.Close()
err = s.Proxy(s.closeCtx, proxyCtx, tlsConn, serviceConn)
if err != nil {
return trace.Wrap(err)
}
return nil
}
// getMySQLVersionFromServer returns the MySQL version returned by an instance on last connection or
// the MySQL.ServerVersion set in configuration if the first one is not available.
// Function picks a random server each time if more than one are available.
func getMySQLVersionFromServer(servers []types.DatabaseServer) string {
count := len(servers)
db := servers[rand.Intn(count)].GetDatabase()
return db.GetMySQLServerVersion()
}
// PostgresProxy returns a new instance of the Postgres protocol aware proxy.
func (s *ProxyServer) PostgresProxy() *postgres.Proxy {
return &postgres.Proxy{
TLSConfig: s.cfg.TLSConfig,
Middleware: s.middleware,
Service: s,
Limiter: s.cfg.Limiter,
Log: s.log,
}
}
// PostgresProxyNoTLS returns a new instance of the non-TLS Postgres proxy.
func (s *ProxyServer) PostgresProxyNoTLS() *postgres.Proxy {
return &postgres.Proxy{
Middleware: s.middleware,
Service: s,
Limiter: s.cfg.Limiter,
Log: s.log,
}
}
// MySQLProxy returns a new instance of the MySQL protocol aware proxy.
func (s *ProxyServer) MySQLProxy() *mysql.Proxy {
return &mysql.Proxy{
TLSConfig: s.cfg.TLSConfig,
Middleware: s.middleware,
Service: s,
Limiter: s.cfg.Limiter,
Log: s.log,
}
}
// MySQLProxyNoTLS returns a new instance of the non-TLS MySQL proxy.
func (s *ProxyServer) MySQLProxyNoTLS() *mysql.Proxy {
return &mysql.Proxy{
Middleware: s.middleware,
Service: s,
Limiter: s.cfg.Limiter,
Log: s.log,
}
}
// SQLServerProxy returns a new instance of the SQL Server protocol aware proxy.
func (s *ProxyServer) SQLServerProxy() *sqlserver.Proxy {
return &sqlserver.Proxy{
Middleware: s.middleware,
Service: s,
Log: s.log,
}
}
// Connect connects to the database server running on a remote cluster
// over reverse tunnel
|
{
return nil
}
|
conditional_block
|
proxyserver.go
|
return trace.BadParameter("missing AuthClient")
}
if c.Authorizer == nil {
return trace.BadParameter("missing Authorizer")
}
if c.Tunnel == nil {
return trace.BadParameter("missing Tunnel")
}
if c.TLSConfig == nil {
return trace.BadParameter("missing TLSConfig")
}
if c.Clock == nil {
c.Clock = clockwork.NewRealClock()
}
if c.ServerID == "" {
return trace.BadParameter("missing ServerID")
}
if c.LockWatcher == nil {
return trace.BadParameter("missing LockWatcher")
}
if c.Limiter == nil {
// Empty config means no connection limit.
connLimiter, err := limiter.NewLimiter(limiter.Config{})
if err != nil {
return trace.Wrap(err)
}
c.Limiter = connLimiter
}
return nil
}
// NewProxyServer creates a new instance of the database proxy server.
func NewProxyServer(ctx context.Context, config ProxyServerConfig) (*ProxyServer, error) {
if err := config.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
server := &ProxyServer{
cfg: config,
middleware: &auth.Middleware{
AccessPoint: config.AccessPoint,
AcceptedUsage: []string{teleport.UsageDatabaseOnly},
},
closeCtx: ctx,
log: logrus.WithField(trace.Component, "db:proxy"),
}
server.cfg.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
server.cfg.TLSConfig.GetConfigForClient = getConfigForClient(
server.cfg.TLSConfig, server.cfg.AccessPoint, server.log, types.UserCA)
return server, nil
}
// ServePostgres starts accepting Postgres connections from the provided listener.
func (s *ProxyServer) ServePostgres(listener net.Listener) error {
s.log.Debug("Started database proxy.")
defer s.log.Debug("Database proxy exited.")
for {
// Accept the connection from the database client, such as psql.
// The connection is expected to come through via multiplexer.
clientConn, err := listener.Accept()
if err != nil {
if utils.IsOKNetworkError(err) || trace.IsConnectionProblem(err) {
return nil
}
return trace.Wrap(err)
}
// Let the appropriate proxy handle the connection and go back
// to listening.
go func() {
defer clientConn.Close()
err := s.PostgresProxy().HandleConnection(s.closeCtx, clientConn)
if err != nil && !utils.IsOKNetworkError(err) {
s.log.WithError(err).Warn("Failed to handle Postgres client connection.")
}
}()
}
}
// ServeMySQL starts accepting MySQL client connections.
func (s *ProxyServer) ServeMySQL(listener net.Listener) error {
s.log.Debug("Started MySQL proxy.")
defer s.log.Debug("MySQL proxy exited.")
for {
// Accept the connection from a MySQL client.
clientConn, err := listener.Accept()
if err != nil {
if utils.IsOKNetworkError(err) || trace.IsConnectionProblem(err) {
return nil
}
return trace.Wrap(err)
}
// Pass over to the MySQL proxy handler.
go func() {
defer clientConn.Close()
err := s.MySQLProxy().HandleConnection(s.closeCtx, clientConn)
if err != nil && !utils.IsOKNetworkError(err) {
s.log.WithError(err).Error("Failed to handle MySQL client connection.")
}
}()
}
}
// ServeMongo starts accepting Mongo client connections.
func (s *ProxyServer) ServeMongo(listener net.Listener, tlsConfig *tls.Config) error {
return s.serveGenericTLS(listener, tlsConfig, defaults.ProtocolMongoDB)
}
// serveGenericTLS starts accepting a plain TLS database client connection.
// dbName is used only for logging purposes.
func (s *ProxyServer) serveGenericTLS(listener net.Listener, tlsConfig *tls.Config, dbName string) error {
s.log.Debugf("Started %s proxy.", dbName)
defer s.log.Debugf("%s proxy exited.", dbName)
for {
clientConn, err := listener.Accept()
if err != nil {
if utils.IsOKNetworkError(err) || trace.IsConnectionProblem(err) {
return nil
}
return trace.Wrap(err)
}
go func() {
defer clientConn.Close()
tlsConn := tls.Server(clientConn, tlsConfig)
if err := tlsConn.Handshake(); err != nil {
if !utils.IsOKNetworkError(err) {
s.log.WithError(err).Errorf("%s TLS handshake failed.", dbName)
}
return
}
err := s.handleConnection(tlsConn)
if err != nil {
s.log.WithError(err).Errorf("Failed to handle %s client connection.", dbName)
}
}()
}
}
// ServeTLS starts accepting database connections that use plain TLS connection.
func (s *ProxyServer) ServeTLS(listener net.Listener) error {
s.log.Debug("Started database TLS proxy.")
defer s.log.Debug("Database TLS proxy exited.")
for {
clientConn, err := listener.Accept()
if err != nil {
if utils.IsOKNetworkError(err) || trace.IsConnectionProblem(err) {
return nil
}
return trace.Wrap(err)
}
go func() {
defer clientConn.Close()
err := s.handleConnection(clientConn)
if err != nil {
s.log.WithError(err).Error("Failed to handle database TLS connection.")
}
}()
}
}
func (s *ProxyServer) handleConnection(conn net.Conn) error {
s.log.Debugf("Accepted TLS database connection from %v.", conn.RemoteAddr())
tlsConn, ok := conn.(*tls.Conn)
if !ok {
return trace.BadParameter("expected *tls.Conn, got %T", conn)
}
clientIP, err := utils.ClientIPFromConn(conn)
if err != nil {
return trace.Wrap(err)
}
// Apply connection and rate limiting.
release, err := s.cfg.Limiter.RegisterRequestAndConnection(clientIP)
if err != nil {
return trace.Wrap(err)
}
defer release()
proxyCtx, err := s.Authorize(s.closeCtx, tlsConn, common.ConnectParams{
ClientIP: clientIP,
})
if err != nil {
return trace.Wrap(err)
}
switch proxyCtx.Identity.RouteToDatabase.Protocol {
case defaults.ProtocolPostgres, defaults.ProtocolCockroachDB:
return s.PostgresProxyNoTLS().HandleConnection(s.closeCtx, tlsConn)
case defaults.ProtocolMySQL:
version := getMySQLVersionFromServer(proxyCtx.Servers)
// Set the version in the context to match a behavior in other handlers.
ctx := context.WithValue(s.closeCtx, dbutils.ContextMySQLServerVersion, version)
return s.MySQLProxyNoTLS().HandleConnection(ctx, tlsConn)
case defaults.ProtocolSQLServer:
return s.SQLServerProxy().HandleConnection(s.closeCtx, proxyCtx, tlsConn)
}
serviceConn, err := s.Connect(s.closeCtx, proxyCtx)
if err != nil {
return trace.Wrap(err)
}
defer serviceConn.Close()
err = s.Proxy(s.closeCtx, proxyCtx, tlsConn, serviceConn)
if err != nil {
return trace.Wrap(err)
}
return nil
}
// getMySQLVersionFromServer returns the MySQL version returned by an instance on last connection or
// the MySQL.ServerVersion set in configuration if the first one is not available.
// Function picks a random server each time if more than one are available.
func getMySQLVersionFromServer(servers []types.DatabaseServer) string {
count := len(servers)
db := servers[rand.Intn(count)].GetDatabase()
return db.GetMySQLServerVersion()
}
// PostgresProxy returns a new instance of the Postgres protocol aware proxy.
func (s *ProxyServer) PostgresProxy() *postgres.Proxy {
return &postgres.Proxy{
TLSConfig: s.cfg.TLSConfig,
Middleware: s.middleware,
Service: s,
Limiter: s.cfg.Limiter,
Log: s.log,
}
}
// PostgresProxyNoTLS returns a new instance of the non-TLS Postgres proxy.
func (s *ProxyServer) PostgresProxyNoTLS() *postgres.Proxy {
return &postgres.Proxy{
Middleware: s.middleware,
Service: s,
Limiter: s.cfg.Limiter,
Log: s.log,
}
}
// MySQLProxy returns a new instance of the MySQL protocol aware proxy.
func (s *ProxyServer) MySQLProxy() *mysql.Proxy {
return &mysql.Proxy{
TLSConfig: s.cfg.TLSConfig,
Middleware: s.middleware,
Service: s,
Limiter: s.cfg.Limiter,
Log: s.log,
}
}
// MySQLProxyNoTLS returns a new instance of the non-TLS MySQL proxy.
func (s *ProxyServer) MySQLProxyNoTLS() *mysql.Proxy {
return &mysql.Proxy{
Middleware: s.middleware,
Service: s,
|
// SQLServerProxy returns a new instance of the SQL Server protocol aware proxy.
func (s *ProxyServer) SQLServerProxy() *sqlserver.Proxy {
return &sqlserver.Proxy{
Middleware: s.middleware,
Service: s,
Log: s.log,
}
}
// Connect connects to the database server running on a remote cluster
// over reverse tunnel and
|
Limiter: s.cfg.Limiter,
Log: s.log,
}
}
|
random_line_split
|
proxyserver.go
|
SQLServerProxy returns a new instance of the SQL Server protocol aware proxy.
func (s *ProxyServer) SQLServerProxy() *sqlserver.Proxy {
return &sqlserver.Proxy{
Middleware: s.middleware,
Service: s,
Log: s.log,
}
}
// Connect connects to the database server running on a remote cluster
// over reverse tunnel and upgrades this end of the connection to TLS so
// the identity can be passed over it.
//
// The passed in context is expected to contain the identity information
// decoded from the client certificate by auth.Middleware.
//
// Implements common.Service.
func (s *ProxyServer) Connect(ctx context.Context, proxyCtx *common.ProxyContext) (net.Conn, error) {
// There may be multiple database servers proxying the same database. If
// we get a connection problem error trying to dial one of them, likely
// the database server is down so try the next one.
for _, server := range getShuffleFunc()(proxyCtx.Servers) {
s.log.Debugf("Dialing to %v.", server)
tlsConfig, err := s.getConfigForServer(ctx, proxyCtx.Identity, server)
if err != nil {
return nil, trace.Wrap(err)
}
serviceConn, err := proxyCtx.Cluster.Dial(reversetunnel.DialParams{
From: &utils.NetAddr{AddrNetwork: "tcp", Addr: "@db-proxy"},
To: &utils.NetAddr{AddrNetwork: "tcp", Addr: reversetunnel.LocalNode},
ServerID: fmt.Sprintf("%v.%v", server.GetHostID(), proxyCtx.Cluster.GetName()),
ConnType: types.DatabaseTunnel,
ProxyIDs: server.GetProxyIDs(),
})
if err != nil {
// If an agent is down, we'll retry on the next one (if available).
if isReverseTunnelDownError(err) {
s.log.WithError(err).Warnf("Failed to dial database %v.", server)
continue
}
return nil, trace.Wrap(err)
}
// Upgrade the connection so the client identity can be passed to the
// remote server during TLS handshake. On the remote side, the connection
// received from the reverse tunnel will be handled by tls.Server.
serviceConn = tls.Client(serviceConn, tlsConfig)
return serviceConn, nil
}
return nil, trace.BadParameter("failed to connect to any of the database servers")
}
// isReverseTunnelDownError returns true if the provided error indicates that
// the reverse tunnel connection is down e.g. because the agent is down.
func isReverseTunnelDownError(err error) bool {
return trace.IsConnectionProblem(err) ||
strings.Contains(err.Error(), reversetunnel.NoDatabaseTunnel)
}
// Proxy starts proxying all traffic received from database client between
// this proxy and Teleport database service over reverse tunnel.
//
// Implements common.Service.
func (s *ProxyServer) Proxy(ctx context.Context, proxyCtx *common.ProxyContext, clientConn, serviceConn net.Conn) error {
// Wrap a client connection into monitor that auto-terminates
// idle connection and connection with expired cert.
tc, err := monitorConn(ctx, monitorConnConfig{
conn: clientConn,
lockWatcher: s.cfg.LockWatcher,
lockTargets: proxyCtx.AuthContext.LockTargets(),
identity: proxyCtx.AuthContext.Identity.GetIdentity(),
checker: proxyCtx.AuthContext.Checker,
clock: s.cfg.Clock,
serverID: s.cfg.ServerID,
authClient: s.cfg.AuthClient,
teleportUser: proxyCtx.AuthContext.Identity.GetIdentity().Username,
emitter: s.cfg.Emitter,
log: s.log,
ctx: s.closeCtx,
})
if err != nil {
clientConn.Close()
serviceConn.Close()
return trace.Wrap(err)
}
errCh := make(chan error, 2)
go func() {
defer s.log.Debug("Stop proxying from client to service.")
defer serviceConn.Close()
defer tc.Close()
_, err := io.Copy(serviceConn, tc)
errCh <- err
}()
go func() {
defer s.log.Debug("Stop proxying from service to client.")
defer serviceConn.Close()
defer tc.Close()
_, err := io.Copy(tc, serviceConn)
errCh <- err
}()
var errs []error
for i := 0; i < 2; i++ {
select {
case err := <-errCh:
if err != nil && !utils.IsOKNetworkError(err) {
s.log.WithError(err).Warn("Connection problem.")
errs = append(errs, err)
}
case <-ctx.Done():
return trace.ConnectionProblem(nil, "context is closing")
}
}
return trace.NewAggregate(errs...)
}
// monitorConnConfig is a monitorConn configuration.
type monitorConnConfig struct {
conn net.Conn
lockWatcher *services.LockWatcher
lockTargets []types.LockTarget
checker services.AccessChecker
identity tlsca.Identity
clock clockwork.Clock
serverID string
authClient *auth.Client
teleportUser string
emitter events.Emitter
log logrus.FieldLogger
ctx context.Context
}
// monitorConn wraps a client connection with TrackingReadConn, starts a connection monitor and
// returns a tracking connection that will be auto-terminated in case disconnect_expired_cert or idle timeout is
// configured, and unmodified client connection otherwise.
func monitorConn(ctx context.Context, cfg monitorConnConfig) (net.Conn, error) {
authPref, err := cfg.authClient.GetAuthPreference(ctx)
if err != nil {
return nil, trace.Wrap(err)
}
netConfig, err := cfg.authClient.GetClusterNetworkingConfig(ctx)
if err != nil {
return nil, trace.Wrap(err)
}
certExpires := cfg.identity.Expires
var disconnectCertExpired time.Time
if !certExpires.IsZero() && cfg.checker.AdjustDisconnectExpiredCert(authPref.GetDisconnectExpiredCert()) {
disconnectCertExpired = certExpires
}
idleTimeout := cfg.checker.AdjustClientIdleTimeout(netConfig.GetClientIdleTimeout())
ctx, cancel := context.WithCancel(ctx)
tc, err := srv.NewTrackingReadConn(srv.TrackingReadConnConfig{
Conn: cfg.conn,
Clock: cfg.clock,
Context: ctx,
Cancel: cancel,
})
if err != nil {
return nil, trace.Wrap(err)
}
// Start monitoring client connection. When client connection is closed the monitor goroutine exits.
err = srv.StartMonitor(srv.MonitorConfig{
LockWatcher: cfg.lockWatcher,
LockTargets: cfg.lockTargets,
DisconnectExpiredCert: disconnectCertExpired,
ClientIdleTimeout: idleTimeout,
Conn: cfg.conn,
Tracker: tc,
Context: cfg.ctx,
Clock: cfg.clock,
ServerID: cfg.serverID,
TeleportUser: cfg.teleportUser,
Emitter: cfg.emitter,
Entry: cfg.log,
})
if err != nil {
tc.Close()
return nil, trace.Wrap(err)
}
return tc, nil
}
// Authorize authorizes the provided client TLS connection.
func (s *ProxyServer) Authorize(ctx context.Context, tlsConn *tls.Conn, params common.ConnectParams) (*common.ProxyContext, error) {
ctx, err := s.middleware.WrapContextWithUser(ctx, tlsConn)
if err != nil {
return nil, trace.Wrap(err)
}
authContext, err := s.cfg.Authorizer.Authorize(ctx)
if err != nil {
return nil, trace.Wrap(err)
}
identity := authContext.Identity.GetIdentity()
if params.User != "" {
identity.RouteToDatabase.Username = params.User
}
if params.Database != "" {
identity.RouteToDatabase.Database = params.Database
}
if params.ClientIP != "" {
identity.ClientIP = params.ClientIP
}
cluster, servers, err := s.getDatabaseServers(ctx, identity)
if err != nil {
return nil, trace.Wrap(err)
}
return &common.ProxyContext{
Identity: identity,
Cluster: cluster,
Servers: servers,
AuthContext: authContext,
}, nil
}
// getDatabaseServers finds database servers that proxy the database instance
// encoded in the provided identity.
func (s *ProxyServer) getDatabaseServers(ctx context.Context, identity tlsca.Identity) (reversetunnel.RemoteSite, []types.DatabaseServer, error)
|
{
cluster, err := s.cfg.Tunnel.GetSite(identity.RouteToCluster)
if err != nil {
return nil, nil, trace.Wrap(err)
}
accessPoint, err := cluster.CachingAccessPoint()
if err != nil {
return nil, nil, trace.Wrap(err)
}
servers, err := accessPoint.GetDatabaseServers(ctx, apidefaults.Namespace)
if err != nil {
return nil, nil, trace.Wrap(err)
}
s.log.Debugf("Available databases in %v: %s.", cluster.GetName(), servers)
// Find out which database servers proxy the database a user is
// connecting to using routing information from identity.
var result []types.DatabaseServer
for _, server := range servers {
if server.GetDatabase().GetName() == identity.RouteToDatabase.ServiceName {
result = append(result, server)
|
identifier_body
|
|
mod.rs
|
use std::char::{self, DecodeUtf16};
use std::fmt::{self, Write};
use std::hash::{Hash, Hasher};
use std::iter::Cloned;
use std::mem;
use std::ops::Deref;
use std::slice;
#[macro_use]
#[allow(improper_ctypes, non_camel_case_types, missing_docs)]
pub mod atom_macro {
include!(concat!(env!("OUT_DIR"), "/gecko/atom_macro.rs"));
}
#[macro_use]
pub mod namespace;
pub use self::namespace::{Namespace, WeakNamespace};
macro_rules! local_name {
($s: tt) => { atom!($s) }
}
/// A strong reference to a Gecko atom.
#[derive(Eq, PartialEq)]
pub struct Atom(*mut WeakAtom);
/// An atom *without* a strong reference.
///
/// Only usable as `&'a WeakAtom`,
/// where `'a` is the lifetime of something that holds a strong reference to that atom.
pub struct WeakAtom(nsAtom);
/// A BorrowedAtom for Gecko is just a weak reference to a `nsAtom`, that
/// hasn't been bumped.
pub type BorrowedAtom<'a> = &'a WeakAtom;
impl Deref for Atom {
type Target = WeakAtom;
#[inline]
fn deref(&self) -> &WeakAtom {
unsafe {
&*self.0
}
}
}
impl PrecomputedHash for Atom {
#[inline]
fn precomputed_hash(&self) -> u32 {
self.get_hash()
}
}
impl Borrow<WeakAtom> for Atom {
#[inline]
fn borrow(&self) -> &WeakAtom {
self
}
}
impl Eq for WeakAtom {}
impl PartialEq for WeakAtom {
#[inline]
fn eq(&self, other: &Self) -> bool {
let weak: *const WeakAtom = self;
let other: *const WeakAtom = other;
weak == other
}
}
unsafe impl Send for Atom {}
unsafe impl Sync for Atom {}
unsafe impl Sync for WeakAtom {}
impl WeakAtom {
/// Construct a `WeakAtom` from a raw `nsAtom`.
#[inline]
pub unsafe fn new<'a>(atom: *const nsAtom) -> &'a mut Self {
&mut *(atom as *mut WeakAtom)
}
/// Clone this atom, bumping the refcount if the atom is not static.
#[inline]
pub fn clone(&self) -> Atom {
Atom::from(self.as_ptr())
}
/// Get the atom hash.
#[inline]
pub fn get_hash(&self) -> u32 {
self.0.mHash
}
/// Get the atom as a slice of utf-16 chars.
#[inline]
pub fn as_slice(&self) -> &[u16] {
unsafe {
slice::from_raw_parts((*self.as_ptr()).mString, self.len() as usize)
}
}
// NOTE: don't expose this, since it's slow, and easy to be misused.
fn chars(&self) -> DecodeUtf16<Cloned<slice::Iter<u16>>> {
char::decode_utf16(self.as_slice().iter().cloned())
}
/// Execute `cb` with the string that this atom represents.
///
/// Find alternatives to this function when possible, please, since it's
/// pretty slow.
pub fn with_str<F, Output>(&self, cb: F) -> Output
where F: FnOnce(&str) -> Output
{
// FIXME(bholley): We should measure whether it makes more sense to
// cache the UTF-8 version in the Gecko atom table somehow.
let owned = self.to_string();
cb(&owned)
}
/// Convert this Atom into a string, decoding the UTF-16 bytes.
///
/// Find alternatives to this function when possible, please, since it's
/// pretty slow.
#[inline]
pub fn to_string(&self) -> String {
String::from_utf16(self.as_slice()).unwrap()
}
/// Returns whether this atom is static.
#[inline]
pub fn is_static(&self) -> bool {
unsafe {
(*self.as_ptr()).mKind() == nsAtom_AtomKind::StaticAtom as u32
}
}
/// Returns the length of the atom string.
#[inline]
pub fn len(&self) -> u32 {
unsafe {
(*self.as_ptr()).mLength()
}
}
/// Returns whether this atom is the empty string.
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the atom as a mutable pointer.
#[inline]
pub fn as_ptr(&self) -> *mut nsAtom {
let const_ptr: *const nsAtom = &self.0;
const_ptr as *mut nsAtom
}
/// Convert this atom to ASCII lower-case
pub fn to_ascii_lowercase(&self) -> Atom {
let slice = self.as_slice();
match slice.iter().position(|&char16| (b'A' as u16) <= char16 && char16 <= (b'Z' as u16)) {
None => self.clone(),
Some(i) => {
let mut buffer: [u16; 64] = unsafe { mem::uninitialized() };
let mut vec;
let mutable_slice = if let Some(buffer_prefix) = buffer.get_mut(..slice.len()) {
buffer_prefix.copy_from_slice(slice);
buffer_prefix
} else {
vec = slice.to_vec();
&mut vec
};
for char16 in &mut mutable_slice[i..] {
if *char16 <= 0x7F {
*char16 = (*char16 as u8).to_ascii_lowercase() as u16
}
}
Atom::from(&*mutable_slice)
}
}
}
/// Return whether two atoms are ASCII-case-insensitive matches
pub fn eq_ignore_ascii_case(&self, other: &Self) -> bool {
if self == other {
return true;
}
let a = self.as_slice();
let b = other.as_slice();
a.len() == b.len() && a.iter().zip(b).all(|(&a16, &b16)| {
if a16 <= 0x7F && b16 <= 0x7F {
(a16 as u8).eq_ignore_ascii_case(&(b16 as u8))
} else {
a16 == b16
}
})
}
/// Return whether this atom is an ASCII-case-insensitive match for the given string
pub fn eq_str_ignore_ascii_case(&self, other: &str) -> bool {
self.chars().map(|r| r.map(|c: char| c.to_ascii_lowercase()))
.eq(other.chars().map(|c: char| Ok(c.to_ascii_lowercase())))
}
}
impl fmt::Debug for WeakAtom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
write!(w, "Gecko WeakAtom({:p}, {})", self, self)
}
}
impl fmt::Display for WeakAtom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
for c in self.chars() {
w.write_char(c.unwrap_or(char::REPLACEMENT_CHARACTER))?
}
Ok(())
}
}
impl Atom {
/// Execute a callback with the atom represented by `ptr`.
pub unsafe fn with<F, R>(ptr: *mut nsAtom, callback: F) -> R where F: FnOnce(&Atom) -> R {
let atom = Atom(WeakAtom::new(ptr));
let ret = callback(&atom);
mem::forget(atom);
ret
}
/// Creates an atom from an static atom pointer without checking in release
/// builds.
///
/// Right now it's only used by the atom macro, and ideally it should keep
/// that way, now we have sugar for is_static, creating atoms using
/// Atom::from should involve almost no overhead.
#[inline]
unsafe fn from_static(ptr: *mut nsAtom) -> Self {
let atom = Atom(ptr as *mut WeakAtom);
debug_assert!(atom.is_static(),
"Called from_static for a non-static atom!");
atom
}
/// Creates an atom from a dynamic atom pointer that has already had AddRef
/// called on it.
#[inline]
pub unsafe fn from_addrefed(ptr: *mut nsAtom) -> Self {
assert!(!ptr.is_null());
unsafe {
Atom(WeakAtom::new(ptr))
}
}
/// Convert this atom into an addrefed nsAtom pointer.
#[inline]
pub fn into_addrefed(self) -> *mut nsAtom {
let ptr = self.as_ptr();
mem::forget(self);
ptr
}
}
impl Hash for Atom {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u32(self.get_hash());
}
}
impl Hash for WeakAtom {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u32(self.get_hash());
}
}
impl Clone for Atom {
#[inline(always
|
use std::borrow::{Cow, Borrow};
|
random_line_split
|
|
mod.rs
|
bool {
let weak: *const WeakAtom = self;
let other: *const WeakAtom = other;
weak == other
}
}
unsafe impl Send for Atom {}
unsafe impl Sync for Atom {}
unsafe impl Sync for WeakAtom {}
impl WeakAtom {
/// Construct a `WeakAtom` from a raw `nsAtom`.
#[inline]
pub unsafe fn new<'a>(atom: *const nsAtom) -> &'a mut Self {
&mut *(atom as *mut WeakAtom)
}
/// Clone this atom, bumping the refcount if the atom is not static.
#[inline]
pub fn clone(&self) -> Atom {
Atom::from(self.as_ptr())
}
/// Get the atom hash.
#[inline]
pub fn get_hash(&self) -> u32 {
self.0.mHash
}
/// Get the atom as a slice of utf-16 chars.
#[inline]
pub fn as_slice(&self) -> &[u16] {
unsafe {
slice::from_raw_parts((*self.as_ptr()).mString, self.len() as usize)
}
}
// NOTE: don't expose this, since it's slow, and easy to be misused.
fn chars(&self) -> DecodeUtf16<Cloned<slice::Iter<u16>>> {
char::decode_utf16(self.as_slice().iter().cloned())
}
/// Execute `cb` with the string that this atom represents.
///
/// Find alternatives to this function when possible, please, since it's
/// pretty slow.
pub fn with_str<F, Output>(&self, cb: F) -> Output
where F: FnOnce(&str) -> Output
{
// FIXME(bholley): We should measure whether it makes more sense to
// cache the UTF-8 version in the Gecko atom table somehow.
let owned = self.to_string();
cb(&owned)
}
/// Convert this Atom into a string, decoding the UTF-16 bytes.
///
/// Find alternatives to this function when possible, please, since it's
/// pretty slow.
#[inline]
pub fn to_string(&self) -> String {
String::from_utf16(self.as_slice()).unwrap()
}
/// Returns whether this atom is static.
#[inline]
pub fn is_static(&self) -> bool {
unsafe {
(*self.as_ptr()).mKind() == nsAtom_AtomKind::StaticAtom as u32
}
}
/// Returns the length of the atom string.
#[inline]
pub fn len(&self) -> u32 {
unsafe {
(*self.as_ptr()).mLength()
}
}
/// Returns whether this atom is the empty string.
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the atom as a mutable pointer.
#[inline]
pub fn as_ptr(&self) -> *mut nsAtom {
let const_ptr: *const nsAtom = &self.0;
const_ptr as *mut nsAtom
}
/// Convert this atom to ASCII lower-case
pub fn to_ascii_lowercase(&self) -> Atom {
let slice = self.as_slice();
match slice.iter().position(|&char16| (b'A' as u16) <= char16 && char16 <= (b'Z' as u16)) {
None => self.clone(),
Some(i) => {
let mut buffer: [u16; 64] = unsafe { mem::uninitialized() };
let mut vec;
let mutable_slice = if let Some(buffer_prefix) = buffer.get_mut(..slice.len()) {
buffer_prefix.copy_from_slice(slice);
buffer_prefix
} else {
vec = slice.to_vec();
&mut vec
};
for char16 in &mut mutable_slice[i..] {
if *char16 <= 0x7F {
*char16 = (*char16 as u8).to_ascii_lowercase() as u16
}
}
Atom::from(&*mutable_slice)
}
}
}
/// Return whether two atoms are ASCII-case-insensitive matches
pub fn eq_ignore_ascii_case(&self, other: &Self) -> bool {
if self == other {
return true;
}
let a = self.as_slice();
let b = other.as_slice();
a.len() == b.len() && a.iter().zip(b).all(|(&a16, &b16)| {
if a16 <= 0x7F && b16 <= 0x7F {
(a16 as u8).eq_ignore_ascii_case(&(b16 as u8))
} else {
a16 == b16
}
})
}
/// Return whether this atom is an ASCII-case-insensitive match for the given string
pub fn eq_str_ignore_ascii_case(&self, other: &str) -> bool {
self.chars().map(|r| r.map(|c: char| c.to_ascii_lowercase()))
.eq(other.chars().map(|c: char| Ok(c.to_ascii_lowercase())))
}
}
impl fmt::Debug for WeakAtom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
write!(w, "Gecko WeakAtom({:p}, {})", self, self)
}
}
impl fmt::Display for WeakAtom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
for c in self.chars() {
w.write_char(c.unwrap_or(char::REPLACEMENT_CHARACTER))?
}
Ok(())
}
}
impl Atom {
/// Execute a callback with the atom represented by `ptr`.
pub unsafe fn with<F, R>(ptr: *mut nsAtom, callback: F) -> R where F: FnOnce(&Atom) -> R {
let atom = Atom(WeakAtom::new(ptr));
let ret = callback(&atom);
mem::forget(atom);
ret
}
/// Creates an atom from an static atom pointer without checking in release
/// builds.
///
/// Right now it's only used by the atom macro, and ideally it should keep
/// that way, now we have sugar for is_static, creating atoms using
/// Atom::from should involve almost no overhead.
#[inline]
unsafe fn from_static(ptr: *mut nsAtom) -> Self {
let atom = Atom(ptr as *mut WeakAtom);
debug_assert!(atom.is_static(),
"Called from_static for a non-static atom!");
atom
}
/// Creates an atom from a dynamic atom pointer that has already had AddRef
/// called on it.
#[inline]
pub unsafe fn from_addrefed(ptr: *mut nsAtom) -> Self {
assert!(!ptr.is_null());
unsafe {
Atom(WeakAtom::new(ptr))
}
}
/// Convert this atom into an addrefed nsAtom pointer.
#[inline]
pub fn into_addrefed(self) -> *mut nsAtom {
let ptr = self.as_ptr();
mem::forget(self);
ptr
}
}
impl Hash for Atom {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u32(self.get_hash());
}
}
impl Hash for WeakAtom {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u32(self.get_hash());
}
}
impl Clone for Atom {
#[inline(always)]
fn clone(&self) -> Atom {
Atom::from(self.as_ptr())
}
}
impl Drop for Atom {
#[inline]
fn drop(&mut self) {
if !self.is_static() {
unsafe {
Gecko_ReleaseAtom(self.as_ptr());
}
}
}
}
impl Default for Atom {
#[inline]
fn default() -> Self {
atom!("")
}
}
impl fmt::Debug for Atom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
write!(w, "Gecko Atom({:p}, {})", self.0, self)
}
}
impl fmt::Display for Atom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
unsafe {
(&*self.0).fmt(w)
}
}
}
impl<'a> From<&'a str> for Atom {
#[inline]
fn from(string: &str) -> Atom {
debug_assert!(string.len() <= u32::max_value() as usize);
unsafe {
Atom(WeakAtom::new(
Gecko_Atomize(string.as_ptr() as *const _, string.len() as u32)
))
}
}
}
impl<'a> From<&'a [u16]> for Atom {
#[inline]
fn from(slice: &[u16]) -> Atom {
Atom::from(&*nsStr::from(slice))
}
}
impl<'a> From<&'a nsAString> for Atom {
#[inline]
fn from(string: &nsAString) -> Atom {
unsafe {
Atom(WeakAtom::new(
Gecko_Atomize16(string)
))
}
}
}
impl<'a> From<Cow<'a, str>> for Atom {
#[inline]
fn
|
from
|
identifier_name
|
|
mod.rs
|
Hash, Hasher};
use std::iter::Cloned;
use std::mem;
use std::ops::Deref;
use std::slice;
#[macro_use]
#[allow(improper_ctypes, non_camel_case_types, missing_docs)]
pub mod atom_macro {
include!(concat!(env!("OUT_DIR"), "/gecko/atom_macro.rs"));
}
#[macro_use]
pub mod namespace;
pub use self::namespace::{Namespace, WeakNamespace};
macro_rules! local_name {
($s: tt) => { atom!($s) }
}
/// A strong reference to a Gecko atom.
#[derive(Eq, PartialEq)]
pub struct Atom(*mut WeakAtom);
/// An atom *without* a strong reference.
///
/// Only usable as `&'a WeakAtom`,
/// where `'a` is the lifetime of something that holds a strong reference to that atom.
pub struct WeakAtom(nsAtom);
/// A BorrowedAtom for Gecko is just a weak reference to a `nsAtom`, that
/// hasn't been bumped.
pub type BorrowedAtom<'a> = &'a WeakAtom;
impl Deref for Atom {
type Target = WeakAtom;
#[inline]
fn deref(&self) -> &WeakAtom {
unsafe {
&*self.0
}
}
}
impl PrecomputedHash for Atom {
#[inline]
fn precomputed_hash(&self) -> u32 {
self.get_hash()
}
}
impl Borrow<WeakAtom> for Atom {
#[inline]
fn borrow(&self) -> &WeakAtom {
self
}
}
impl Eq for WeakAtom {}
impl PartialEq for WeakAtom {
#[inline]
fn eq(&self, other: &Self) -> bool {
let weak: *const WeakAtom = self;
let other: *const WeakAtom = other;
weak == other
}
}
unsafe impl Send for Atom {}
unsafe impl Sync for Atom {}
unsafe impl Sync for WeakAtom {}
impl WeakAtom {
/// Construct a `WeakAtom` from a raw `nsAtom`.
#[inline]
pub unsafe fn new<'a>(atom: *const nsAtom) -> &'a mut Self {
&mut *(atom as *mut WeakAtom)
}
/// Clone this atom, bumping the refcount if the atom is not static.
#[inline]
pub fn clone(&self) -> Atom {
Atom::from(self.as_ptr())
}
/// Get the atom hash.
#[inline]
pub fn get_hash(&self) -> u32
|
/// Get the atom as a slice of utf-16 chars.
#[inline]
pub fn as_slice(&self) -> &[u16] {
unsafe {
slice::from_raw_parts((*self.as_ptr()).mString, self.len() as usize)
}
}
// NOTE: don't expose this, since it's slow, and easy to be misused.
fn chars(&self) -> DecodeUtf16<Cloned<slice::Iter<u16>>> {
char::decode_utf16(self.as_slice().iter().cloned())
}
/// Execute `cb` with the string that this atom represents.
///
/// Find alternatives to this function when possible, please, since it's
/// pretty slow.
pub fn with_str<F, Output>(&self, cb: F) -> Output
where F: FnOnce(&str) -> Output
{
// FIXME(bholley): We should measure whether it makes more sense to
// cache the UTF-8 version in the Gecko atom table somehow.
let owned = self.to_string();
cb(&owned)
}
/// Convert this Atom into a string, decoding the UTF-16 bytes.
///
/// Find alternatives to this function when possible, please, since it's
/// pretty slow.
#[inline]
pub fn to_string(&self) -> String {
String::from_utf16(self.as_slice()).unwrap()
}
/// Returns whether this atom is static.
#[inline]
pub fn is_static(&self) -> bool {
unsafe {
(*self.as_ptr()).mKind() == nsAtom_AtomKind::StaticAtom as u32
}
}
/// Returns the length of the atom string.
#[inline]
pub fn len(&self) -> u32 {
unsafe {
(*self.as_ptr()).mLength()
}
}
/// Returns whether this atom is the empty string.
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the atom as a mutable pointer.
#[inline]
pub fn as_ptr(&self) -> *mut nsAtom {
let const_ptr: *const nsAtom = &self.0;
const_ptr as *mut nsAtom
}
/// Convert this atom to ASCII lower-case
pub fn to_ascii_lowercase(&self) -> Atom {
let slice = self.as_slice();
match slice.iter().position(|&char16| (b'A' as u16) <= char16 && char16 <= (b'Z' as u16)) {
None => self.clone(),
Some(i) => {
let mut buffer: [u16; 64] = unsafe { mem::uninitialized() };
let mut vec;
let mutable_slice = if let Some(buffer_prefix) = buffer.get_mut(..slice.len()) {
buffer_prefix.copy_from_slice(slice);
buffer_prefix
} else {
vec = slice.to_vec();
&mut vec
};
for char16 in &mut mutable_slice[i..] {
if *char16 <= 0x7F {
*char16 = (*char16 as u8).to_ascii_lowercase() as u16
}
}
Atom::from(&*mutable_slice)
}
}
}
/// Return whether two atoms are ASCII-case-insensitive matches
pub fn eq_ignore_ascii_case(&self, other: &Self) -> bool {
if self == other {
return true;
}
let a = self.as_slice();
let b = other.as_slice();
a.len() == b.len() && a.iter().zip(b).all(|(&a16, &b16)| {
if a16 <= 0x7F && b16 <= 0x7F {
(a16 as u8).eq_ignore_ascii_case(&(b16 as u8))
} else {
a16 == b16
}
})
}
/// Return whether this atom is an ASCII-case-insensitive match for the given string
pub fn eq_str_ignore_ascii_case(&self, other: &str) -> bool {
self.chars().map(|r| r.map(|c: char| c.to_ascii_lowercase()))
.eq(other.chars().map(|c: char| Ok(c.to_ascii_lowercase())))
}
}
impl fmt::Debug for WeakAtom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
write!(w, "Gecko WeakAtom({:p}, {})", self, self)
}
}
impl fmt::Display for WeakAtom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
for c in self.chars() {
w.write_char(c.unwrap_or(char::REPLACEMENT_CHARACTER))?
}
Ok(())
}
}
impl Atom {
/// Execute a callback with the atom represented by `ptr`.
pub unsafe fn with<F, R>(ptr: *mut nsAtom, callback: F) -> R where F: FnOnce(&Atom) -> R {
let atom = Atom(WeakAtom::new(ptr));
let ret = callback(&atom);
mem::forget(atom);
ret
}
/// Creates an atom from an static atom pointer without checking in release
/// builds.
///
/// Right now it's only used by the atom macro, and ideally it should keep
/// that way, now we have sugar for is_static, creating atoms using
/// Atom::from should involve almost no overhead.
#[inline]
unsafe fn from_static(ptr: *mut nsAtom) -> Self {
let atom = Atom(ptr as *mut WeakAtom);
debug_assert!(atom.is_static(),
"Called from_static for a non-static atom!");
atom
}
/// Creates an atom from a dynamic atom pointer that has already had AddRef
/// called on it.
#[inline]
pub unsafe fn from_addrefed(ptr: *mut nsAtom) -> Self {
assert!(!ptr.is_null());
unsafe {
Atom(WeakAtom::new(ptr))
}
}
/// Convert this atom into an addrefed nsAtom pointer.
#[inline]
pub fn into_addrefed(self) -> *mut nsAtom {
let ptr = self.as_ptr();
mem::forget(self);
ptr
}
}
impl Hash for Atom {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u32(self.get_hash());
}
}
impl Hash for WeakAtom {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u32(self.get_hash());
}
}
impl Clone for Atom {
#[inline(always)]
fn clone(&self) -> Atom {
Atom::from(self.as_ptr())
}
}
impl Drop for Atom {
#[inline]
fn drop
|
{
self.0.mHash
}
|
identifier_body
|
di_tools.py
|
self):
d = {}
for cn in self.cols:
ci = self.cols[cn]
d[cn] = self.fields[ci]
return str(d)
def __repr__(self):
return self.__str__()
class _DelimitedFileReader:
def __init__(self, strip):
self.coldict = None
self.handle = None
self.line_reader = None
self.lineno = 0
self.strip = strip
self.original_line = None
def __iter__(self):
return self
def next(self):
done = 0
while done == 0:
self.original_line = self.line_reader(self.handle)
if self.original_line == None:
raise StopIteration
l = self.original_line.rstrip("\r\n")
if len(l) == 0:
raise StopIteration
self.lineno = self.lineno + 1
self.fields = l.split("\t")
if self.strip:
self.fields = [f.strip() for f in self.fields]
if len(self.fields) == len(self.coldict):
return ColAccessor(self.coldict, self.fields)
# Apparently for NewBalance we need to handle filenames flipping case at any
# drop. The segments lookup file, segment_lu.txt, may suddenly be called
# Segment_lu.txt. call init_resolve_filename() with the path where the feed
# files are, then pass every filename through resolve_filename() and it'll
# pick the one with the highest timestamp.
resolve_filename_map = None # after init_resolve_filename, this will
# contain a mapping from lower-cases filename to a tuple with
# actual filename and timestamp
class DictedData_cb(_DelimitedFileReader):
"""This is a generalized DictedData that takes a callback function to
get a line from some handle. This is useful if what you want to
iterate over is not a file. update.py uses this to iterate over rows
of tab-delimited text that is stored in a table."""
def __init__(self, dictfile, data_handle, line_reader, strip = 0):
DelimitedFileReader.__init__(self, strip)
if resolve_filename_map != None:
if dictfile[0] != '/':
dictfile = resolve_filename_map[dictfile.lower()][0]
self.coldict = parse_dict(open(dictfile).read())
self.handle = data_handle
self.line_reader = line_reader
self.strip = strip
class DictedData(DictedData_cb):
'Easily iterate over a tab-delimited text file with a dictionary.'
def __init__(self, dictfile, datafile, strip = 0):
if resolve_filename_map != None:
if datafile[0] != '/':
datafile = resolve_filename_map[datafile.lower()][0]
def line_reader(handle):
line = handle.readline()
return line
DictedData_cb.__init__(self, dictfile, open(datafile), line_reader, strip)
class FileWithHeaders(_DelimitedFileReader):
'Easily iterate over a tab-delimited text file with a header line.'
def __init__(self, datafile, strip = 0):
_DelimitedFileReader.__init__(self, strip)
self.handle = open(datafile)
self.coldict = {}
i = 0
for h in self.handle.readline().strip().split("\t"):
self.coldict[h.lower()] = i
i = i + 1
def line_reader(handle):
return handle.readline()
self.line_reader = line_reader
def join(iterables, columns):
"""Given a list of DictedData (or anything else iterable that returns
dictionaries) and a list of columns to join on, produce an iterable
that gives the tuples from the first iterable joined on those columns
which each of the other iterables."""
hashes = []
for iterable in iterables[1:]:
hash = {}
for r in iterable:
key = [ r[x] for x in columns ]
value_columns = r.keys()[:]
for x in columns:
value_columns.remove(x)
values = dict([ (x, r[x]) for x in value_columns ])
hash[HashableList(key)] = values
hashes.append(hash)
for r in iterables[0]:
r = dict(r)
key = HashableList([ r[x] for x in columns ])
oops = 0
for h in hashes:
if key in h:
for k, v in h[key].items():
r[k] = v
else:
oops = 1
if oops:
continue
yield r
def init_filename_resolver(path):
global resolve_filename_map
resolve_filename_map = {}
for fname in dircache.listdir(path):
status = os.stat(path + "/" + fname)
if S_ISREG(status[ST_MODE]):
mtime = status[ST_MTIME]
if resolve_filename_map.has_key(fname.lower()):
entry = resolve_filename_map[fname.lower()]
if entry[1] < mtime:
resolve_filename_map[fname.lower()] = (fname, mtime)
else:
resolve_filename_map[fname.lower()] = (fname, mtime)
def set_up_html_logger(filename_html, filename_css, level=logging.INFO):
"""Set up the standard Python logging infrastructure to log in a basic
HTML format to the given file, and have that file point to some CSS."""
logfile = open(filename_html, "w")
logfile.write('<html><head><link rel="stylesheet" type="text/css" href="%s"/><title>Update log</title></head><body>' % filename_css)
levelmap = {
logging.INFO: '<a class="info">INFO</a>',
logging.DEBUG: '<a class="debug">DEBUG</a>',
logging.ERROR: '<a class="error">ERROR</a>',
logging.WARNING: '<a class="warning">WARNING</a>',
}
class HTMLFormatter(logging.Formatter):
def format(self, record):
t = time.strftime("%Y-%m-%d %H:%M:%S")
msg = record.msg.replace('\t', ' ')
return "<br><a class='time'>%s</a> %s <a class='msg'>%s</a>" % (t, levelmap[record.levelno], msg)
loghandler = logging.StreamHandler(logfile)
loghandler.setFormatter(HTMLFormatter())
logging.root.setLevel(level)
logging.root.addHandler(loghandler)
class ImmutableDict(dict):
'A hashable wrapper around the standard dict.'
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
def __setitem__(self, key, value):
raise NotImplementedError, "don't do that"
def __delitem__(self, key):
raise NotImplementedError, "don't do that"
def clear(self):
raise NotImplementedError, "don't do that"
def setdefault(self, k, default=None):
raise NotImplementedError, "don't do that"
def popitem(self):
raise NotImplementedError, "don't do that"
def update(self, other):
raise NotImplementedError, "don't do that"
def __hash__(self):
return hash(tuple(self.iteritems()))
class HashableList(list):
'A hashable wrapper around the standard list.'
def __init__(self, l):
if l:
self.list = tuple(l)
else:
self.list = []
def __str__(self):
return str(self.list)
def __hash__(self):
return hash(self.list)
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def __iter__(self):
return iter(self.list)
def di_pickle(o):
"""Given an object, which can be an integer, string, float, list,
tuple, set, frozenset, dictionary, ImmutableDict or any combination
thereof, return a string such that di_unpickle() will reconstruct
the object. The point, ofcourse, being that that di_unpickle() need
not be in this process on this computer, or even be di_unpickle()
itself but some function in another language entirely."""
if o == None:
return "None"
t = type(o)
if t is types.IntType or t is types.LongType:
return "i%d" % o
if t is types.BooleanType:
if o:
return "i1"
else:
return "i0"
if t is types.FloatType:
return "d%f" % o
if t is types.StringType:
return "s'%s'" % o.replace("'", "\\'")
if t is types.ListType:
return "L(%s)" % "".join([ di_pickle(x) for x in o])
if isinstance(o, HashableList):
return "L(%s)" % "".join([ di_pickle(x) for x in o.list])
if t is types.TupleType:
return "T(%s)" % "".join([ di_pickle(x) for x in o])
if isinstance(o, (set, frozenset)):
return "S(%s)" % "".join([ di_pickle(x) for x in o])
if t is types.DictType or isinstance(o, ImmutableDict):
return "D(%s)" % "".join([ "%s%s" % (di_pickle(k), di_pickle(v)) for k, v in o.items()])
raise "Unsupported type in di_pickle()", str(t)
def
|
di_unpickle
|
identifier_name
|
|
di_tools.py
|
for l in lines:
l = l.strip()
if l.startswith("Name="):
l = l[6:-1]
cols[l.lower()] = i
i = i + 1
return cols
class ColAccessor:
"""This is a helper class that DictData will return an instance of
when it's being iterated over. Its job is to return the relevant value
when an unknown fieldname (which should be a column name) is read."""
def __init__(self, cols, fields):
self.cols = cols
self.fields = fields
def __getitem__(self, i):
return self.fields[self.cols[i.lower()]]
def __getattr__(self, name):
return self.fields[self.cols[name.lower()]]
def keys(self):
return self.cols.keys()
def __str__(self):
d = {}
for cn in self.cols:
ci = self.cols[cn]
d[cn] = self.fields[ci]
return str(d)
def __repr__(self):
return self.__str__()
class _DelimitedFileReader:
def __init__(self, strip):
self.coldict = None
self.handle = None
self.line_reader = None
self.lineno = 0
self.strip = strip
self.original_line = None
def __iter__(self):
return self
def next(self):
done = 0
while done == 0:
self.original_line = self.line_reader(self.handle)
if self.original_line == None:
raise StopIteration
l = self.original_line.rstrip("\r\n")
if len(l) == 0:
raise StopIteration
self.lineno = self.lineno + 1
self.fields = l.split("\t")
if self.strip:
self.fields = [f.strip() for f in self.fields]
if len(self.fields) == len(self.coldict):
return ColAccessor(self.coldict, self.fields)
# Apparently for NewBalance we need to handle filenames flipping case at any
# drop. The segments lookup file, segment_lu.txt, may suddenly be called
# Segment_lu.txt. call init_resolve_filename() with the path where the feed
# files are, then pass every filename through resolve_filename() and it'll
# pick the one with the highest timestamp.
resolve_filename_map = None # after init_resolve_filename, this will
# contain a mapping from lower-cases filename to a tuple with
# actual filename and timestamp
class DictedData_cb(_DelimitedFileReader):
"""This is a generalized DictedData that takes a callback function to
get a line from some handle. This is useful if what you want to
iterate over is not a file. update.py uses this to iterate over rows
of tab-delimited text that is stored in a table."""
def __init__(self, dictfile, data_handle, line_reader, strip = 0):
DelimitedFileReader.__init__(self, strip)
if resolve_filename_map != None:
if dictfile[0] != '/':
dictfile = resolve_filename_map[dictfile.lower()][0]
self.coldict = parse_dict(open(dictfile).read())
self.handle = data_handle
self.line_reader = line_reader
self.strip = strip
class DictedData(DictedData_cb):
'Easily iterate over a tab-delimited text file with a dictionary.'
def __init__(self, dictfile, datafile, strip = 0):
if resolve_filename_map != None:
if datafile[0] != '/':
datafile = resolve_filename_map[datafile.lower()][0]
def line_reader(handle):
line = handle.readline()
return line
DictedData_cb.__init__(self, dictfile, open(datafile), line_reader, strip)
class FileWithHeaders(_DelimitedFileReader):
'Easily iterate over a tab-delimited text file with a header line.'
def __init__(self, datafile, strip = 0):
_DelimitedFileReader.__init__(self, strip)
self.handle = open(datafile)
self.coldict = {}
i = 0
for h in self.handle.readline().strip().split("\t"):
self.coldict[h.lower()] = i
i = i + 1
def line_reader(handle):
return handle.readline()
self.line_reader = line_reader
def join(iterables, columns):
"""Given a list of DictedData (or anything else iterable that returns
dictionaries) and a list of columns to join on, produce an iterable
that gives the tuples from the first iterable joined on those columns
which each of the other iterables."""
hashes = []
for iterable in iterables[1:]:
hash = {}
|
key = [ r[x] for x in columns ]
value_columns = r.keys()[:]
for x in columns:
value_columns.remove(x)
values = dict([ (x, r[x]) for x in value_columns ])
hash[HashableList(key)] = values
hashes.append(hash)
for r in iterables[0]:
r = dict(r)
key = HashableList([ r[x] for x in columns ])
oops = 0
for h in hashes:
if key in h:
for k, v in h[key].items():
r[k] = v
else:
oops = 1
if oops:
continue
yield r
def init_filename_resolver(path):
global resolve_filename_map
resolve_filename_map = {}
for fname in dircache.listdir(path):
status = os.stat(path + "/" + fname)
if S_ISREG(status[ST_MODE]):
mtime = status[ST_MTIME]
if resolve_filename_map.has_key(fname.lower()):
entry = resolve_filename_map[fname.lower()]
if entry[1] < mtime:
resolve_filename_map[fname.lower()] = (fname, mtime)
else:
resolve_filename_map[fname.lower()] = (fname, mtime)
def set_up_html_logger(filename_html, filename_css, level=logging.INFO):
"""Set up the standard Python logging infrastructure to log in a basic
HTML format to the given file, and have that file point to some CSS."""
logfile = open(filename_html, "w")
logfile.write('<html><head><link rel="stylesheet" type="text/css" href="%s"/><title>Update log</title></head><body>' % filename_css)
levelmap = {
logging.INFO: '<a class="info">INFO</a>',
logging.DEBUG: '<a class="debug">DEBUG</a>',
logging.ERROR: '<a class="error">ERROR</a>',
logging.WARNING: '<a class="warning">WARNING</a>',
}
class HTMLFormatter(logging.Formatter):
def format(self, record):
t = time.strftime("%Y-%m-%d %H:%M:%S")
msg = record.msg.replace('\t', ' ')
return "<br><a class='time'>%s</a> %s <a class='msg'>%s</a>" % (t, levelmap[record.levelno], msg)
loghandler = logging.StreamHandler(logfile)
loghandler.setFormatter(HTMLFormatter())
logging.root.setLevel(level)
logging.root.addHandler(loghandler)
class ImmutableDict(dict):
'A hashable wrapper around the standard dict.'
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
def __setitem__(self, key, value):
raise NotImplementedError, "don't do that"
def __delitem__(self, key):
raise NotImplementedError, "don't do that"
def clear(self):
raise NotImplementedError, "don't do that"
def setdefault(self, k, default=None):
raise NotImplementedError, "don't do that"
def popitem(self):
raise NotImplementedError, "don't do that"
def update(self, other):
raise NotImplementedError, "don't do that"
def __hash__(self):
return hash(tuple(self.iteritems()))
class HashableList(list):
'A hashable wrapper around the standard list.'
def __init__(self, l):
if l:
self.list = tuple(l)
else:
self.list = []
def __str__(self):
return str(self.list)
def __hash__(self):
return hash(self.list)
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def __iter__(self):
return iter(self.list)
def di_pickle(o):
"""Given an object, which can be an integer, string, float, list,
tuple, set, frozenset, dictionary, ImmutableDict or any combination
thereof, return a string such that di_unpickle() will reconstruct
the object. The point, ofcourse, being that that di_unpickle() need
not be in this process on this computer, or even be di_unpickle()
itself but some function in another language entirely."""
if o == None:
return "None"
t = type(o)
if t is types.IntType or t is types.LongType:
return "i%d" % o
if t is types.BooleanType:
if o:
return "i1"
else:
return "i0"
if t is types.FloatType:
return "d%f" % o
if t is types.StringType:
return "s'%s'" % o.replace("'", "\\'")
if t is types
|
for r in iterable:
|
random_line_split
|
di_tools.py
|
for l in lines:
l = l.strip()
if l.startswith("Name="):
l = l[6:-1]
cols[l.lower()] = i
i = i + 1
return cols
class ColAccessor:
"""This is a helper class that DictData will return an instance of
when it's being iterated over. Its job is to return the relevant value
when an unknown fieldname (which should be a column name) is read."""
def __init__(self, cols, fields):
self.cols = cols
self.fields = fields
def __getitem__(self, i):
return self.fields[self.cols[i.lower()]]
def __getattr__(self, name):
return self.fields[self.cols[name.lower()]]
def keys(self):
return self.cols.keys()
def __str__(self):
d = {}
for cn in self.cols:
ci = self.cols[cn]
d[cn] = self.fields[ci]
return str(d)
def __repr__(self):
return self.__str__()
class _DelimitedFileReader:
def __init__(self, strip):
self.coldict = None
self.handle = None
self.line_reader = None
self.lineno = 0
self.strip = strip
self.original_line = None
def __iter__(self):
return self
def next(self):
done = 0
while done == 0:
self.original_line = self.line_reader(self.handle)
if self.original_line == None:
raise StopIteration
l = self.original_line.rstrip("\r\n")
if len(l) == 0:
raise StopIteration
self.lineno = self.lineno + 1
self.fields = l.split("\t")
if self.strip:
self.fields = [f.strip() for f in self.fields]
if len(self.fields) == len(self.coldict):
return ColAccessor(self.coldict, self.fields)
# Apparently for NewBalance we need to handle filenames flipping case at any
# drop. The segments lookup file, segment_lu.txt, may suddenly be called
# Segment_lu.txt. call init_resolve_filename() with the path where the feed
# files are, then pass every filename through resolve_filename() and it'll
# pick the one with the highest timestamp.
resolve_filename_map = None # after init_resolve_filename, this will
# contain a mapping from lower-cases filename to a tuple with
# actual filename and timestamp
class DictedData_cb(_DelimitedFileReader):
"""This is a generalized DictedData that takes a callback function to
get a line from some handle. This is useful if what you want to
iterate over is not a file. update.py uses this to iterate over rows
of tab-delimited text that is stored in a table."""
def __init__(self, dictfile, data_handle, line_reader, strip = 0):
DelimitedFileReader.__init__(self, strip)
if resolve_filename_map != None:
if dictfile[0] != '/':
dictfile = resolve_filename_map[dictfile.lower()][0]
self.coldict = parse_dict(open(dictfile).read())
self.handle = data_handle
self.line_reader = line_reader
self.strip = strip
class DictedData(DictedData_cb):
'Easily iterate over a tab-delimited text file with a dictionary.'
def __init__(self, dictfile, datafile, strip = 0):
if resolve_filename_map != None:
if datafile[0] != '/':
datafile = resolve_filename_map[datafile.lower()][0]
def line_reader(handle):
line = handle.readline()
return line
DictedData_cb.__init__(self, dictfile, open(datafile), line_reader, strip)
class FileWithHeaders(_DelimitedFileReader):
'Easily iterate over a tab-delimited text file with a header line.'
def __init__(self, datafile, strip = 0):
_DelimitedFileReader.__init__(self, strip)
self.handle = open(datafile)
self.coldict = {}
i = 0
for h in self.handle.readline().strip().split("\t"):
self.coldict[h.lower()] = i
i = i + 1
def line_reader(handle):
return handle.readline()
self.line_reader = line_reader
def join(iterables, columns):
"""Given a list of DictedData (or anything else iterable that returns
dictionaries) and a list of columns to join on, produce an iterable
that gives the tuples from the first iterable joined on those columns
which each of the other iterables."""
hashes = []
for iterable in iterables[1:]:
hash = {}
for r in iterable:
key = [ r[x] for x in columns ]
value_columns = r.keys()[:]
for x in columns:
value_columns.remove(x)
values = dict([ (x, r[x]) for x in value_columns ])
hash[HashableList(key)] = values
hashes.append(hash)
for r in iterables[0]:
r = dict(r)
key = HashableList([ r[x] for x in columns ])
oops = 0
for h in hashes:
if key in h:
for k, v in h[key].items():
r[k] = v
else:
oops = 1
if oops:
continue
yield r
def init_filename_resolver(path):
global resolve_filename_map
resolve_filename_map = {}
for fname in dircache.listdir(path):
status = os.stat(path + "/" + fname)
if S_ISREG(status[ST_MODE]):
mtime = status[ST_MTIME]
if resolve_filename_map.has_key(fname.lower()):
entry = resolve_filename_map[fname.lower()]
if entry[1] < mtime:
resolve_filename_map[fname.lower()] = (fname, mtime)
else:
resolve_filename_map[fname.lower()] = (fname, mtime)
def set_up_html_logger(filename_html, filename_css, level=logging.INFO):
"""Set up the standard Python logging infrastructure to log in a basic
HTML format to the given file, and have that file point to some CSS."""
logfile = open(filename_html, "w")
logfile.write('<html><head><link rel="stylesheet" type="text/css" href="%s"/><title>Update log</title></head><body>' % filename_css)
levelmap = {
logging.INFO: '<a class="info">INFO</a>',
logging.DEBUG: '<a class="debug">DEBUG</a>',
logging.ERROR: '<a class="error">ERROR</a>',
logging.WARNING: '<a class="warning">WARNING</a>',
}
class HTMLFormatter(logging.Formatter):
def format(self, record):
t = time.strftime("%Y-%m-%d %H:%M:%S")
msg = record.msg.replace('\t', ' ')
return "<br><a class='time'>%s</a> %s <a class='msg'>%s</a>" % (t, levelmap[record.levelno], msg)
loghandler = logging.StreamHandler(logfile)
loghandler.setFormatter(HTMLFormatter())
logging.root.setLevel(level)
logging.root.addHandler(loghandler)
class ImmutableDict(dict):
'A hashable wrapper around the standard dict.'
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
def __setitem__(self, key, value):
raise NotImplementedError, "don't do that"
def __delitem__(self, key):
raise NotImplementedError, "don't do that"
def clear(self):
raise NotImplementedError, "don't do that"
def setdefault(self, k, default=None):
raise NotImplementedError, "don't do that"
def popitem(self):
raise NotImplementedError, "don't do that"
def update(self, other):
raise NotImplementedError, "don't do that"
def __hash__(self):
return hash(tuple(self.iteritems()))
class HashableList(list):
'A hashable wrapper around the standard list.'
def __init__(self, l):
if l:
self.list = tuple(l)
else:
self.list = []
def __str__(self):
return str(self.list)
def __hash__(self):
return hash(self.list)
def __len__(self):
return len(self.list)
def __getitem__(self, i):
|
def __iter__(self):
return iter(self.list)
def di_pickle(o):
"""Given an object, which can be an integer, string, float, list,
tuple, set, frozenset, dictionary, ImmutableDict or any combination
thereof, return a string such that di_unpickle() will reconstruct
the object. The point, ofcourse, being that that di_unpickle() need
not be in this process on this computer, or even be di_unpickle()
itself but some function in another language entirely."""
if o == None:
return "None"
t = type(o)
if t is types.IntType or t is types.LongType:
return "i%d" % o
if t is types.BooleanType:
if o:
return "i1"
else:
return "i0"
if t is types.FloatType:
return "d%f" % o
if t is types.StringType:
return "s'%s'" % o.replace("'", "\\'")
if t is
|
return self.list[i]
|
identifier_body
|
di_tools.py
|
resolve_filename() and it'll
# pick the one with the highest timestamp.
resolve_filename_map = None # after init_resolve_filename, this will
# contain a mapping from lower-cases filename to a tuple with
# actual filename and timestamp
class DictedData_cb(_DelimitedFileReader):
"""This is a generalized DictedData that takes a callback function to
get a line from some handle. This is useful if what you want to
iterate over is not a file. update.py uses this to iterate over rows
of tab-delimited text that is stored in a table."""
def __init__(self, dictfile, data_handle, line_reader, strip = 0):
DelimitedFileReader.__init__(self, strip)
if resolve_filename_map != None:
if dictfile[0] != '/':
dictfile = resolve_filename_map[dictfile.lower()][0]
self.coldict = parse_dict(open(dictfile).read())
self.handle = data_handle
self.line_reader = line_reader
self.strip = strip
class DictedData(DictedData_cb):
'Easily iterate over a tab-delimited text file with a dictionary.'
def __init__(self, dictfile, datafile, strip = 0):
if resolve_filename_map != None:
if datafile[0] != '/':
datafile = resolve_filename_map[datafile.lower()][0]
def line_reader(handle):
line = handle.readline()
return line
DictedData_cb.__init__(self, dictfile, open(datafile), line_reader, strip)
class FileWithHeaders(_DelimitedFileReader):
'Easily iterate over a tab-delimited text file with a header line.'
def __init__(self, datafile, strip = 0):
_DelimitedFileReader.__init__(self, strip)
self.handle = open(datafile)
self.coldict = {}
i = 0
for h in self.handle.readline().strip().split("\t"):
self.coldict[h.lower()] = i
i = i + 1
def line_reader(handle):
return handle.readline()
self.line_reader = line_reader
def join(iterables, columns):
"""Given a list of DictedData (or anything else iterable that returns
dictionaries) and a list of columns to join on, produce an iterable
that gives the tuples from the first iterable joined on those columns
which each of the other iterables."""
hashes = []
for iterable in iterables[1:]:
hash = {}
for r in iterable:
key = [ r[x] for x in columns ]
value_columns = r.keys()[:]
for x in columns:
value_columns.remove(x)
values = dict([ (x, r[x]) for x in value_columns ])
hash[HashableList(key)] = values
hashes.append(hash)
for r in iterables[0]:
r = dict(r)
key = HashableList([ r[x] for x in columns ])
oops = 0
for h in hashes:
if key in h:
for k, v in h[key].items():
r[k] = v
else:
oops = 1
if oops:
continue
yield r
def init_filename_resolver(path):
global resolve_filename_map
resolve_filename_map = {}
for fname in dircache.listdir(path):
status = os.stat(path + "/" + fname)
if S_ISREG(status[ST_MODE]):
mtime = status[ST_MTIME]
if resolve_filename_map.has_key(fname.lower()):
entry = resolve_filename_map[fname.lower()]
if entry[1] < mtime:
resolve_filename_map[fname.lower()] = (fname, mtime)
else:
resolve_filename_map[fname.lower()] = (fname, mtime)
def set_up_html_logger(filename_html, filename_css, level=logging.INFO):
"""Set up the standard Python logging infrastructure to log in a basic
HTML format to the given file, and have that file point to some CSS."""
logfile = open(filename_html, "w")
logfile.write('<html><head><link rel="stylesheet" type="text/css" href="%s"/><title>Update log</title></head><body>' % filename_css)
levelmap = {
logging.INFO: '<a class="info">INFO</a>',
logging.DEBUG: '<a class="debug">DEBUG</a>',
logging.ERROR: '<a class="error">ERROR</a>',
logging.WARNING: '<a class="warning">WARNING</a>',
}
class HTMLFormatter(logging.Formatter):
def format(self, record):
t = time.strftime("%Y-%m-%d %H:%M:%S")
msg = record.msg.replace('\t', ' ')
return "<br><a class='time'>%s</a> %s <a class='msg'>%s</a>" % (t, levelmap[record.levelno], msg)
loghandler = logging.StreamHandler(logfile)
loghandler.setFormatter(HTMLFormatter())
logging.root.setLevel(level)
logging.root.addHandler(loghandler)
class ImmutableDict(dict):
'A hashable wrapper around the standard dict.'
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
def __setitem__(self, key, value):
raise NotImplementedError, "don't do that"
def __delitem__(self, key):
raise NotImplementedError, "don't do that"
def clear(self):
raise NotImplementedError, "don't do that"
def setdefault(self, k, default=None):
raise NotImplementedError, "don't do that"
def popitem(self):
raise NotImplementedError, "don't do that"
def update(self, other):
raise NotImplementedError, "don't do that"
def __hash__(self):
return hash(tuple(self.iteritems()))
class HashableList(list):
'A hashable wrapper around the standard list.'
def __init__(self, l):
if l:
self.list = tuple(l)
else:
self.list = []
def __str__(self):
return str(self.list)
def __hash__(self):
return hash(self.list)
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def __iter__(self):
return iter(self.list)
def di_pickle(o):
"""Given an object, which can be an integer, string, float, list,
tuple, set, frozenset, dictionary, ImmutableDict or any combination
thereof, return a string such that di_unpickle() will reconstruct
the object. The point, ofcourse, being that that di_unpickle() need
not be in this process on this computer, or even be di_unpickle()
itself but some function in another language entirely."""
if o == None:
return "None"
t = type(o)
if t is types.IntType or t is types.LongType:
return "i%d" % o
if t is types.BooleanType:
if o:
return "i1"
else:
return "i0"
if t is types.FloatType:
return "d%f" % o
if t is types.StringType:
return "s'%s'" % o.replace("'", "\\'")
if t is types.ListType:
return "L(%s)" % "".join([ di_pickle(x) for x in o])
if isinstance(o, HashableList):
return "L(%s)" % "".join([ di_pickle(x) for x in o.list])
if t is types.TupleType:
return "T(%s)" % "".join([ di_pickle(x) for x in o])
if isinstance(o, (set, frozenset)):
return "S(%s)" % "".join([ di_pickle(x) for x in o])
if t is types.DictType or isinstance(o, ImmutableDict):
return "D(%s)" % "".join([ "%s%s" % (di_pickle(k), di_pickle(v)) for k, v in o.items()])
raise "Unsupported type in di_pickle()", str(t)
def di_unpickle(s):
"""This is the converse of di_pickle(), or anything that encodes like
it. Given a string, reconstruct the encoded object."""
def do_unpickle(s):
if s[:4] == "None":
return None, s[4:]
if s[0] == 'i':
j = 1
if s[j] == '-':
negative = 1
j = j + 1
else:
negative = 0
while s[j].isdigit():
j = j + 1
if negative:
return -int(s[2:j]), s[j:]
else:
return int(s[1:j]), s[j:]
if s[0] == 'd':
j = 1
if s[j] == '-':
negative = 1
j = j + 1
else:
negative = 0
while s[j].isdigit() or s[j] == '.':
j = j + 1
if negative:
return -float(s[2:j]), s[j:]
else:
return float(s[1:j]), s[j:]
if s[0] == 's':
if s[1] != "'":
raise "Malformed input in di_unpickle()"
j = 2
while s[j] != "'" or s[j - 1] == "\\":
|
j = j + 1
|
conditional_block
|
|
post.go
|
IF NOT EXISTS admins (
username text PRIMARY KEY,
password text NOT NULL
)`
stmt, err = db.Prepare(create_q)
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
// only these tables so far...
}
func initDbCmd() {
fmt.Print("initialising database...")
db := openSQL()
defer db.Close()
initDatabase(db)
fmt.Print(" done.\n")
}
func validBoardName(name string) bool {
ok, _ := regexp.MatchString("^[a-z0-9]{1,10}$", name)
if !ok {
return false
}
switch name {
case "static":
case "mod":
return false
}
return true
}
func makeNewBoard(db *sql.DB, dbi *newBoardInfo) {
// prepare schema
stmt, err := db.Prepare("CREATE SCHEMA IF NOT EXISTS $1")
panicErr(err)
_, err = stmt.Exec(dbi.Name) // result isn't very meaningful for us, we check err regardless
panicErr(err)
// prepare tables
create_q := `CREATE TABLE IF NOT EXISTS %s.posts (
id bigserial PRIMARY KEY,
thread bigint,
name text NOT NULL,
trip text NOT NULL,
subject text NOT NULL,
email text NOT NULL,
date bigint NOT NULL,
message text NOT NULL,
file text NOT NULL,
original text NOT NULL,
thumb text NOT NULL,
ip_addr inet
)`
stmt, err = db.Prepare(fmt.Sprintf(create_q, dbi.Name))
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
create_q = `CREATE INDEX ON %s.posts (thread)`
stmt, err = db.Prepare(fmt.Sprintf(create_q, dbi.Name))
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
create_q = `CREATE TABLE IF NOT EXISTS %s.threads (
id bigint PRIMARY KEY,
bump bigint NOT NULL,
bumpnum integer NOT NULL
)`
stmt, err = db.Prepare(fmt.Sprintf(create_q, dbi.Name))
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
// create dir tree
err = os.MkdirAll(pathBoardDir(dbi.Name), os.ModePerm)
panicErr(err)
err = os.MkdirAll(pathSrcDir(dbi.Name), os.ModePerm)
panicErr(err)
err = os.MkdirAll(pathThumbDir(dbi.Name), os.ModePerm)
panicErr(err)
err = os.MkdirAll(pathStaticDir(dbi.Name), os.ModePerm)
panicErr(err)
// insert to board list
create_q = `INSERT INTO boards (name, description, info) VALUES ($1, $2, $3)`
stmt, err = db.Prepare(create_q)
panicErr(err)
_, err = stmt.Exec(dbi.Name, dbi.Desc, dbi.Info)
panicErr(err)
// we're done
}
func deleteBoard(db *sql.DB, name string) bool {
var bname string
err := db.QueryRow("DELETE FROM boards WHERE name=$1 RETURNING name", name).Scan(&bname)
if err == sql.ErrNoRows {
// already deleted or invalid name, we have nothing to do there
return false
}
panicErr(err)
stmt, err := db.Prepare("DROP SCHEMA IF EXISTS $1")
panicErr(err)
_, err = stmt.Exec(bname)
panicErr(err)
os.RemoveAll(pathBoardDir(name))
return true
}
func postNewBoard(w http.ResponseWriter, r *http.Request) {
var nbi newBoardInfo
r.ParseForm()
bname, ok := r.Form["name"]
if !ok {
http.Error(w, "400 bad request: no name field", 400)
return
}
nbi.Name = bname[0]
if !validBoardName(nbi.Name) {
http.Error(w, "400 bad request: invalid board name", 400)
return
}
bdesc, ok := r.Form["desc"]
if !ok {
http.Error(w, "400 bad request: no desc field", 400)
return
}
nbi.Desc = bdesc[0]
binfo, ok := r.Form["info"]
if !ok {
http.Error(w, "400 bad request: no info field", 400)
return
}
nbi.Info = binfo[0]
db := openSQL()
defer db.Close()
makeNewBoard(db, &nbi)
execTemplate(w, "boardcreated", &nbi)
}
func postDelBoard(w http.ResponseWriter, r *http.Request) {
var board string
bname, ok := r.Form["name"]
if !ok {
http.Error(w, "400 bad request: no name field", 400)
return
}
board = bname[0]
db := openSQL()
defer db.Close()
ok = deleteBoard(db, board)
if !ok {
http.Error(w, "500 internal server error: board deletion failed", 500)
return
}
execTemplate(w, "boarddeleted", &board)
}
// postinfo for writing
type wPostInfo struct {
Name string
Trip string
Subject string
Email string
Message string
File string
Original string // original filename
Thumb string
}
type postResult struct {
Board string
Thread uint64
Post uint64
}
func (r *postResult) HasThread() bool {
return r.Thread != 0
}
func (r *postResult) IsThread() bool {
return r.Thread == r.Post
}
func acceptPost(w http.ResponseWriter, r *http.Request, p *wPostInfo, board string, isop bool) bool {
var err error
err = r.ParseMultipartForm(1 << 20)
if err != nil {
http.Error(w, fmt.Sprintf("400 bad request: ParseMultipartForm failed: %s", err), 400)
return false
}
pname, ok := r.Form["name"]
if !ok {
http.Error(w, "400 bad request: has no name field", 400)
return false
}
p.Name, p.Trip = MakeTrip(pname[0])
psubject, ok := r.Form["subject"]
if !ok {
http.Error(w, "400 bad request: has no subject field", 400)
return false
}
p.Subject = psubject[0]
pemail, ok := r.Form["email"]
if !ok {
http.Error(w, "400 bad request: has no email field", 400)
return false
}
p.Email = pemail[0]
pmessage, ok := r.Form["message"]
if !ok {
http.Error(w, "400 bad request: has no message field", 400)
return false
}
p.Message = pmessage[0]
f, h, err := r.FormFile("file")
if err == nil
|
http.Error(w, "file type not allowed", 403) // 403 Forbidden
return false
}
if size > maxSize {
http.Error(w, "file too big", 403) // 403 Forbidden
return false
}
fname := strconv.FormatInt(uniqueTimestamp(), 10) + ext
fullname := pathSrcFile(board, fname)
tmpname := pathSrcFile(board, ".tmp."+fname)
nf, err := os.OpenFile(tmpname, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
http.Error(w, fmt.Sprintf("500 internal server error: %s", err), 500)
return false
}
io.Copy(nf, f)
nf.Close()
os.Rename(tmpname, fullname) // atomic :^)
p.File = fname
p.Original = h.Filename
tname, err := makeThumb(fullname, fname, board, ext, mt, isop)
if err != nil {
fmt.Printf("error generating thumb for %s: %s\n", fname, err)
}
p.Thumb = tname
}
return true
}
func postNewThread(w http.ResponseWriter, r *http.Request, board string) {
var p wPostInfo
db := openSQL()
defer db.Close()
var bname string
var maxthreads sql.NullInt64
err
|
{
defer f.Close()
size, err := f.Seek(0, os.SEEK_END)
if err != nil {
http.Error(w, fmt.Sprintf("500 internal server error: %s", err), 500)
return false
}
_, err = f.Seek(0, os.SEEK_SET)
if err != nil {
http.Error(w, fmt.Sprintf("500 internal server error: %s", err), 500)
return false
}
ext := filepath.Ext(h.Filename)
mt := mime.TypeByExtension(ext)
if mt != "" {
mt, _, _ = mime.ParseMediaType(mt)
}
maxSize, ok := allowedTypes[mt]
if !ok {
|
conditional_block
|
post.go
|
IF NOT EXISTS admins (
username text PRIMARY KEY,
password text NOT NULL
)`
stmt, err = db.Prepare(create_q)
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
// only these tables so far...
}
func initDbCmd() {
fmt.Print("initialising database...")
db := openSQL()
defer db.Close()
initDatabase(db)
fmt.Print(" done.\n")
}
func validBoardName(name string) bool {
ok, _ := regexp.MatchString("^[a-z0-9]{1,10}$", name)
if !ok {
return false
}
switch name {
case "static":
case "mod":
return false
}
return true
}
func makeNewBoard(db *sql.DB, dbi *newBoardInfo) {
// prepare schema
stmt, err := db.Prepare("CREATE SCHEMA IF NOT EXISTS $1")
panicErr(err)
_, err = stmt.Exec(dbi.Name) // result isn't very meaningful for us, we check err regardless
panicErr(err)
// prepare tables
create_q := `CREATE TABLE IF NOT EXISTS %s.posts (
id bigserial PRIMARY KEY,
thread bigint,
name text NOT NULL,
trip text NOT NULL,
subject text NOT NULL,
email text NOT NULL,
date bigint NOT NULL,
message text NOT NULL,
file text NOT NULL,
original text NOT NULL,
thumb text NOT NULL,
ip_addr inet
)`
stmt, err = db.Prepare(fmt.Sprintf(create_q, dbi.Name))
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
create_q = `CREATE INDEX ON %s.posts (thread)`
stmt, err = db.Prepare(fmt.Sprintf(create_q, dbi.Name))
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
create_q = `CREATE TABLE IF NOT EXISTS %s.threads (
id bigint PRIMARY KEY,
bump bigint NOT NULL,
bumpnum integer NOT NULL
)`
stmt, err = db.Prepare(fmt.Sprintf(create_q, dbi.Name))
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
// create dir tree
err = os.MkdirAll(pathBoardDir(dbi.Name), os.ModePerm)
panicErr(err)
err = os.MkdirAll(pathSrcDir(dbi.Name), os.ModePerm)
panicErr(err)
err = os.MkdirAll(pathThumbDir(dbi.Name), os.ModePerm)
panicErr(err)
err = os.MkdirAll(pathStaticDir(dbi.Name), os.ModePerm)
panicErr(err)
// insert to board list
create_q = `INSERT INTO boards (name, description, info) VALUES ($1, $2, $3)`
stmt, err = db.Prepare(create_q)
panicErr(err)
_, err = stmt.Exec(dbi.Name, dbi.Desc, dbi.Info)
panicErr(err)
// we're done
}
func deleteBoard(db *sql.DB, name string) bool {
var bname string
err := db.QueryRow("DELETE FROM boards WHERE name=$1 RETURNING name", name).Scan(&bname)
if err == sql.ErrNoRows {
// already deleted or invalid name, we have nothing to do there
return false
}
panicErr(err)
stmt, err := db.Prepare("DROP SCHEMA IF EXISTS $1")
panicErr(err)
_, err = stmt.Exec(bname)
panicErr(err)
os.RemoveAll(pathBoardDir(name))
return true
}
func postNewBoard(w http.ResponseWriter, r *http.Request) {
var nbi newBoardInfo
r.ParseForm()
bname, ok := r.Form["name"]
if !ok {
http.Error(w, "400 bad request: no name field", 400)
return
}
nbi.Name = bname[0]
if !validBoardName(nbi.Name) {
http.Error(w, "400 bad request: invalid board name", 400)
return
}
bdesc, ok := r.Form["desc"]
if !ok {
http.Error(w, "400 bad request: no desc field", 400)
return
}
nbi.Desc = bdesc[0]
binfo, ok := r.Form["info"]
if !ok {
http.Error(w, "400 bad request: no info field", 400)
return
}
nbi.Info = binfo[0]
db := openSQL()
defer db.Close()
makeNewBoard(db, &nbi)
execTemplate(w, "boardcreated", &nbi)
}
func postDelBoard(w http.ResponseWriter, r *http.Request) {
var board string
bname, ok := r.Form["name"]
if !ok {
http.Error(w, "400 bad request: no name field", 400)
return
}
board = bname[0]
db := openSQL()
defer db.Close()
ok = deleteBoard(db, board)
if !ok {
http.Error(w, "500 internal server error: board deletion failed", 500)
return
}
execTemplate(w, "boarddeleted", &board)
}
// postinfo for writing
type wPostInfo struct {
Name string
Trip string
Subject string
Email string
Message string
File string
Original string // original filename
Thumb string
}
type postResult struct {
Board string
Thread uint64
Post uint64
}
func (r *postResult) HasThread() bool
|
func (r *postResult) IsThread() bool {
return r.Thread == r.Post
}
func acceptPost(w http.ResponseWriter, r *http.Request, p *wPostInfo, board string, isop bool) bool {
var err error
err = r.ParseMultipartForm(1 << 20)
if err != nil {
http.Error(w, fmt.Sprintf("400 bad request: ParseMultipartForm failed: %s", err), 400)
return false
}
pname, ok := r.Form["name"]
if !ok {
http.Error(w, "400 bad request: has no name field", 400)
return false
}
p.Name, p.Trip = MakeTrip(pname[0])
psubject, ok := r.Form["subject"]
if !ok {
http.Error(w, "400 bad request: has no subject field", 400)
return false
}
p.Subject = psubject[0]
pemail, ok := r.Form["email"]
if !ok {
http.Error(w, "400 bad request: has no email field", 400)
return false
}
p.Email = pemail[0]
pmessage, ok := r.Form["message"]
if !ok {
http.Error(w, "400 bad request: has no message field", 400)
return false
}
p.Message = pmessage[0]
f, h, err := r.FormFile("file")
if err == nil {
defer f.Close()
size, err := f.Seek(0, os.SEEK_END)
if err != nil {
http.Error(w, fmt.Sprintf("500 internal server error: %s", err), 500)
return false
}
_, err = f.Seek(0, os.SEEK_SET)
if err != nil {
http.Error(w, fmt.Sprintf("500 internal server error: %s", err), 500)
return false
}
ext := filepath.Ext(h.Filename)
mt := mime.TypeByExtension(ext)
if mt != "" {
mt, _, _ = mime.ParseMediaType(mt)
}
maxSize, ok := allowedTypes[mt]
if !ok {
http.Error(w, "file type not allowed", 403) // 403 Forbidden
return false
}
if size > maxSize {
http.Error(w, "file too big", 403) // 403 Forbidden
return false
}
fname := strconv.FormatInt(uniqueTimestamp(), 10) + ext
fullname := pathSrcFile(board, fname)
tmpname := pathSrcFile(board, ".tmp."+fname)
nf, err := os.OpenFile(tmpname, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
http.Error(w, fmt.Sprintf("500 internal server error: %s", err), 500)
return false
}
io.Copy(nf, f)
nf.Close()
os.Rename(tmpname, fullname) // atomic :^)
p.File = fname
p.Original = h.Filename
tname, err := makeThumb(fullname, fname, board, ext, mt, isop)
if err != nil {
fmt.Printf("error generating thumb for %s: %s\n", fname, err)
}
p.Thumb = tname
}
return true
}
func postNewThread(w http.ResponseWriter, r *http.Request, board string) {
var p wPostInfo
db := openSQL()
defer db.Close()
var bname string
var maxthreads sql.NullInt64
|
{
return r.Thread != 0
}
|
identifier_body
|
post.go
|
IF NOT EXISTS admins (
username text PRIMARY KEY,
password text NOT NULL
)`
stmt, err = db.Prepare(create_q)
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
// only these tables so far...
}
func initDbCmd() {
fmt.Print("initialising database...")
db := openSQL()
defer db.Close()
initDatabase(db)
fmt.Print(" done.\n")
}
func validBoardName(name string) bool {
ok, _ := regexp.MatchString("^[a-z0-9]{1,10}$", name)
if !ok {
return false
}
switch name {
case "static":
case "mod":
return false
}
return true
}
func makeNewBoard(db *sql.DB, dbi *newBoardInfo) {
// prepare schema
stmt, err := db.Prepare("CREATE SCHEMA IF NOT EXISTS $1")
panicErr(err)
_, err = stmt.Exec(dbi.Name) // result isn't very meaningful for us, we check err regardless
panicErr(err)
// prepare tables
create_q := `CREATE TABLE IF NOT EXISTS %s.posts (
id bigserial PRIMARY KEY,
thread bigint,
name text NOT NULL,
trip text NOT NULL,
subject text NOT NULL,
email text NOT NULL,
date bigint NOT NULL,
message text NOT NULL,
file text NOT NULL,
original text NOT NULL,
thumb text NOT NULL,
ip_addr inet
)`
stmt, err = db.Prepare(fmt.Sprintf(create_q, dbi.Name))
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
create_q = `CREATE INDEX ON %s.posts (thread)`
stmt, err = db.Prepare(fmt.Sprintf(create_q, dbi.Name))
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
create_q = `CREATE TABLE IF NOT EXISTS %s.threads (
id bigint PRIMARY KEY,
bump bigint NOT NULL,
bumpnum integer NOT NULL
)`
stmt, err = db.Prepare(fmt.Sprintf(create_q, dbi.Name))
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
// create dir tree
err = os.MkdirAll(pathBoardDir(dbi.Name), os.ModePerm)
panicErr(err)
err = os.MkdirAll(pathSrcDir(dbi.Name), os.ModePerm)
panicErr(err)
err = os.MkdirAll(pathThumbDir(dbi.Name), os.ModePerm)
panicErr(err)
err = os.MkdirAll(pathStaticDir(dbi.Name), os.ModePerm)
panicErr(err)
// insert to board list
create_q = `INSERT INTO boards (name, description, info) VALUES ($1, $2, $3)`
stmt, err = db.Prepare(create_q)
panicErr(err)
_, err = stmt.Exec(dbi.Name, dbi.Desc, dbi.Info)
panicErr(err)
// we're done
}
func deleteBoard(db *sql.DB, name string) bool {
var bname string
err := db.QueryRow("DELETE FROM boards WHERE name=$1 RETURNING name", name).Scan(&bname)
if err == sql.ErrNoRows {
// already deleted or invalid name, we have nothing to do there
return false
}
panicErr(err)
stmt, err := db.Prepare("DROP SCHEMA IF EXISTS $1")
panicErr(err)
_, err = stmt.Exec(bname)
panicErr(err)
os.RemoveAll(pathBoardDir(name))
return true
}
func postNewBoard(w http.ResponseWriter, r *http.Request) {
var nbi newBoardInfo
r.ParseForm()
bname, ok := r.Form["name"]
if !ok {
http.Error(w, "400 bad request: no name field", 400)
return
}
nbi.Name = bname[0]
if !validBoardName(nbi.Name) {
http.Error(w, "400 bad request: invalid board name", 400)
return
}
bdesc, ok := r.Form["desc"]
if !ok {
http.Error(w, "400 bad request: no desc field", 400)
return
}
nbi.Desc = bdesc[0]
binfo, ok := r.Form["info"]
if !ok {
http.Error(w, "400 bad request: no info field", 400)
return
}
nbi.Info = binfo[0]
db := openSQL()
defer db.Close()
makeNewBoard(db, &nbi)
execTemplate(w, "boardcreated", &nbi)
}
func
|
(w http.ResponseWriter, r *http.Request) {
var board string
bname, ok := r.Form["name"]
if !ok {
http.Error(w, "400 bad request: no name field", 400)
return
}
board = bname[0]
db := openSQL()
defer db.Close()
ok = deleteBoard(db, board)
if !ok {
http.Error(w, "500 internal server error: board deletion failed", 500)
return
}
execTemplate(w, "boarddeleted", &board)
}
// postinfo for writing
type wPostInfo struct {
Name string
Trip string
Subject string
Email string
Message string
File string
Original string // original filename
Thumb string
}
type postResult struct {
Board string
Thread uint64
Post uint64
}
func (r *postResult) HasThread() bool {
return r.Thread != 0
}
func (r *postResult) IsThread() bool {
return r.Thread == r.Post
}
func acceptPost(w http.ResponseWriter, r *http.Request, p *wPostInfo, board string, isop bool) bool {
var err error
err = r.ParseMultipartForm(1 << 20)
if err != nil {
http.Error(w, fmt.Sprintf("400 bad request: ParseMultipartForm failed: %s", err), 400)
return false
}
pname, ok := r.Form["name"]
if !ok {
http.Error(w, "400 bad request: has no name field", 400)
return false
}
p.Name, p.Trip = MakeTrip(pname[0])
psubject, ok := r.Form["subject"]
if !ok {
http.Error(w, "400 bad request: has no subject field", 400)
return false
}
p.Subject = psubject[0]
pemail, ok := r.Form["email"]
if !ok {
http.Error(w, "400 bad request: has no email field", 400)
return false
}
p.Email = pemail[0]
pmessage, ok := r.Form["message"]
if !ok {
http.Error(w, "400 bad request: has no message field", 400)
return false
}
p.Message = pmessage[0]
f, h, err := r.FormFile("file")
if err == nil {
defer f.Close()
size, err := f.Seek(0, os.SEEK_END)
if err != nil {
http.Error(w, fmt.Sprintf("500 internal server error: %s", err), 500)
return false
}
_, err = f.Seek(0, os.SEEK_SET)
if err != nil {
http.Error(w, fmt.Sprintf("500 internal server error: %s", err), 500)
return false
}
ext := filepath.Ext(h.Filename)
mt := mime.TypeByExtension(ext)
if mt != "" {
mt, _, _ = mime.ParseMediaType(mt)
}
maxSize, ok := allowedTypes[mt]
if !ok {
http.Error(w, "file type not allowed", 403) // 403 Forbidden
return false
}
if size > maxSize {
http.Error(w, "file too big", 403) // 403 Forbidden
return false
}
fname := strconv.FormatInt(uniqueTimestamp(), 10) + ext
fullname := pathSrcFile(board, fname)
tmpname := pathSrcFile(board, ".tmp."+fname)
nf, err := os.OpenFile(tmpname, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
http.Error(w, fmt.Sprintf("500 internal server error: %s", err), 500)
return false
}
io.Copy(nf, f)
nf.Close()
os.Rename(tmpname, fullname) // atomic :^)
p.File = fname
p.Original = h.Filename
tname, err := makeThumb(fullname, fname, board, ext, mt, isop)
if err != nil {
fmt.Printf("error generating thumb for %s: %s\n", fname, err)
}
p.Thumb = tname
}
return true
}
func postNewThread(w http.ResponseWriter, r *http.Request, board string) {
var p wPostInfo
db := openSQL()
defer db.Close()
var bname string
var maxthreads sql.NullInt64
err
|
postDelBoard
|
identifier_name
|
post.go
|
IF NOT EXISTS admins (
username text PRIMARY KEY,
password text NOT NULL
)`
stmt, err = db.Prepare(create_q)
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
// only these tables so far...
}
func initDbCmd() {
fmt.Print("initialising database...")
db := openSQL()
defer db.Close()
initDatabase(db)
fmt.Print(" done.\n")
}
func validBoardName(name string) bool {
ok, _ := regexp.MatchString("^[a-z0-9]{1,10}$", name)
if !ok {
return false
}
switch name {
case "static":
case "mod":
return false
}
return true
}
func makeNewBoard(db *sql.DB, dbi *newBoardInfo) {
// prepare schema
stmt, err := db.Prepare("CREATE SCHEMA IF NOT EXISTS $1")
panicErr(err)
_, err = stmt.Exec(dbi.Name) // result isn't very meaningful for us, we check err regardless
panicErr(err)
// prepare tables
create_q := `CREATE TABLE IF NOT EXISTS %s.posts (
id bigserial PRIMARY KEY,
thread bigint,
name text NOT NULL,
trip text NOT NULL,
subject text NOT NULL,
email text NOT NULL,
date bigint NOT NULL,
message text NOT NULL,
file text NOT NULL,
original text NOT NULL,
thumb text NOT NULL,
ip_addr inet
)`
stmt, err = db.Prepare(fmt.Sprintf(create_q, dbi.Name))
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
create_q = `CREATE INDEX ON %s.posts (thread)`
stmt, err = db.Prepare(fmt.Sprintf(create_q, dbi.Name))
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
create_q = `CREATE TABLE IF NOT EXISTS %s.threads (
id bigint PRIMARY KEY,
bump bigint NOT NULL,
bumpnum integer NOT NULL
)`
stmt, err = db.Prepare(fmt.Sprintf(create_q, dbi.Name))
panicErr(err)
_, err = stmt.Exec()
panicErr(err)
// create dir tree
err = os.MkdirAll(pathBoardDir(dbi.Name), os.ModePerm)
panicErr(err)
err = os.MkdirAll(pathSrcDir(dbi.Name), os.ModePerm)
panicErr(err)
err = os.MkdirAll(pathThumbDir(dbi.Name), os.ModePerm)
panicErr(err)
err = os.MkdirAll(pathStaticDir(dbi.Name), os.ModePerm)
panicErr(err)
// insert to board list
create_q = `INSERT INTO boards (name, description, info) VALUES ($1, $2, $3)`
stmt, err = db.Prepare(create_q)
panicErr(err)
_, err = stmt.Exec(dbi.Name, dbi.Desc, dbi.Info)
panicErr(err)
// we're done
}
func deleteBoard(db *sql.DB, name string) bool {
var bname string
err := db.QueryRow("DELETE FROM boards WHERE name=$1 RETURNING name", name).Scan(&bname)
if err == sql.ErrNoRows {
// already deleted or invalid name, we have nothing to do there
return false
}
panicErr(err)
stmt, err := db.Prepare("DROP SCHEMA IF EXISTS $1")
panicErr(err)
_, err = stmt.Exec(bname)
panicErr(err)
os.RemoveAll(pathBoardDir(name))
return true
}
func postNewBoard(w http.ResponseWriter, r *http.Request) {
var nbi newBoardInfo
r.ParseForm()
bname, ok := r.Form["name"]
if !ok {
http.Error(w, "400 bad request: no name field", 400)
return
}
nbi.Name = bname[0]
if !validBoardName(nbi.Name) {
http.Error(w, "400 bad request: invalid board name", 400)
return
}
bdesc, ok := r.Form["desc"]
if !ok {
http.Error(w, "400 bad request: no desc field", 400)
return
}
nbi.Desc = bdesc[0]
binfo, ok := r.Form["info"]
if !ok {
http.Error(w, "400 bad request: no info field", 400)
return
}
nbi.Info = binfo[0]
db := openSQL()
defer db.Close()
makeNewBoard(db, &nbi)
execTemplate(w, "boardcreated", &nbi)
}
func postDelBoard(w http.ResponseWriter, r *http.Request) {
var board string
bname, ok := r.Form["name"]
if !ok {
http.Error(w, "400 bad request: no name field", 400)
return
}
board = bname[0]
db := openSQL()
defer db.Close()
ok = deleteBoard(db, board)
if !ok {
http.Error(w, "500 internal server error: board deletion failed", 500)
return
}
execTemplate(w, "boarddeleted", &board)
}
// postinfo for writing
type wPostInfo struct {
Name string
Trip string
Subject string
Email string
Message string
File string
Original string // original filename
Thumb string
}
type postResult struct {
Board string
Thread uint64
Post uint64
}
func (r *postResult) HasThread() bool {
return r.Thread != 0
}
func (r *postResult) IsThread() bool {
return r.Thread == r.Post
}
func acceptPost(w http.ResponseWriter, r *http.Request, p *wPostInfo, board string, isop bool) bool {
var err error
err = r.ParseMultipartForm(1 << 20)
if err != nil {
http.Error(w, fmt.Sprintf("400 bad request: ParseMultipartForm failed: %s", err), 400)
return false
}
pname, ok := r.Form["name"]
if !ok {
http.Error(w, "400 bad request: has no name field", 400)
return false
}
p.Name, p.Trip = MakeTrip(pname[0])
psubject, ok := r.Form["subject"]
if !ok {
http.Error(w, "400 bad request: has no subject field", 400)
return false
}
p.Subject = psubject[0]
pemail, ok := r.Form["email"]
if !ok {
http.Error(w, "400 bad request: has no email field", 400)
return false
}
p.Email = pemail[0]
pmessage, ok := r.Form["message"]
if !ok {
http.Error(w, "400 bad request: has no message field", 400)
return false
}
p.Message = pmessage[0]
f, h, err := r.FormFile("file")
if err == nil {
defer f.Close()
size, err := f.Seek(0, os.SEEK_END)
if err != nil {
http.Error(w, fmt.Sprintf("500 internal server error: %s", err), 500)
return false
}
_, err = f.Seek(0, os.SEEK_SET)
if err != nil {
http.Error(w, fmt.Sprintf("500 internal server error: %s", err), 500)
return false
}
ext := filepath.Ext(h.Filename)
mt := mime.TypeByExtension(ext)
if mt != "" {
mt, _, _ = mime.ParseMediaType(mt)
}
maxSize, ok := allowedTypes[mt]
if !ok {
http.Error(w, "file type not allowed", 403) // 403 Forbidden
return false
}
if size > maxSize {
http.Error(w, "file too big", 403) // 403 Forbidden
return false
}
fname := strconv.FormatInt(uniqueTimestamp(), 10) + ext
fullname := pathSrcFile(board, fname)
tmpname := pathSrcFile(board, ".tmp."+fname)
nf, err := os.OpenFile(tmpname, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
http.Error(w, fmt.Sprintf("500 internal server error: %s", err), 500)
return false
}
io.Copy(nf, f)
nf.Close()
os.Rename(tmpname, fullname) // atomic :^)
p.File = fname
p.Original = h.Filename
tname, err := makeThumb(fullname, fname, board, ext, mt, isop)
if err != nil {
fmt.Printf("error generating thumb for %s: %s\n", fname, err)
}
p.Thumb = tname
}
return true
}
|
db := openSQL()
defer db.Close()
var bname string
var maxthreads sql.NullInt64
err
|
func postNewThread(w http.ResponseWriter, r *http.Request, board string) {
var p wPostInfo
|
random_line_split
|
motor_tools.py
|
except serial.SerialException:
print "%s is not a connected port, trying next.\n" %port
if x_port_flag is False or y_port_flag is False:
print "Connection failed waiting 5 seconds and trying again.\n"
try:
self.con1.close()
sleep(1)
self.con2.close()
sleep(1)
except:
pass
if self.connection_attempts < 4:
sleep(5)
self.connection_attempts += 1
self.connect_to_ports()
else:
raise Exception, "The x or y motor has not been connected, 4 attempts have been made.\n"
def _loop_structure(self, con, pat):
'''A method used to complete all the queries in this class. The mdrive motor
echos all sent commands and returns values as a list. This method uses regex to
sift out the desired information for that list. Notice the pattern sent in is a
regex pattern pertinent to the task at hand.'''
sleep(0.1)
re_obj = re.compile(pat)
readback = con.readlines()
for item in readback:
if re_obj.match(item) is not None:
return item
return "unknown"
def assign_serial_num(self, sn, x_port, y_port, con):
if sn == '269120375' and y_port is False:
y_port = True
self.motor_y = Motor(con)
elif sn == '074130197' and x_port is False:
x_port = True
self.motor_x = Motor(con)
return x_port, y_port
class Motor(object):
'''This is a collection of methods which control an Mdrive 23 step motor. '''
def __init__(self, serial_con):
#self.con = serial.Serial(None, 9600, timeout = 0, writeTimeout = 0)
self.con = serial_con
self.codetools = CodeTools()
self._steps_per_rev = {'256':51200,'128':25600,'64':12800,'32':6400,
'16':3200,'8':1600,'4':800,'2':400,'1':200,
'250':50000,'200':40000,'125':25000,'100':2000,
'50':10000,'25':5000,'10':2000,'5':1000}
self._meters_per_rev = 0.005
'''5mm per 1 full revolution '''
self.Error_codes = {20:'Tried to set unknown variable or flag',
21:'Tried to set an incorrect value',
30:'Unknown label or user variable',
24:'Illegal data entered',
25: 'Tried to set a read only variable'
}
def main(self, acceleration, max_vel, init_vel):
'''Sets the basic parameters of the motor. '''
self.clear_error()
self.MicroStep = self._get_ms()
self._set_var('S1','3,0,0') # limit switch for home
self._set_var('S2','2,0,0')# limit switch for farthest end
self._set_var('LM', 2)# decel ramp, stops all motion that dir
self._set_var('A',acceleration) # acceleration
self._set_var('VM',max_vel)# max velocity
self._set_var('VI',init_vel)# initial velocity
# when a motor is instantiated, use the current P for the linear dist.
self.CurrentPos = self._calculate_pos(self._get_current_step())
def _check_reached_limit_switch(self):
'''Query the motor to determine if either limit switch has been reached. Returns
True is a limit has been tripped or False if it has not. '''
error_status = False
self.con.write('PR ER\r\n')#check if reached limit switch
sleep(0.1)
pat = '83\r\n|84\r\n' # boolean "or" in the regex
out_put = self._loop_structure(pat)
if out_put == '83\r\n' or out_put == '84\r\n':
error_status = True
if out_put == '84\r\n':
print "Reached limit switch at HOME. \n"
self.y_lock = True
elif out_put == '83\r\n':
print "Reached limit switch at farthest end. \n"
return error_status
def clear_error(self):
'''Query the motor for errors, return the error code and clear the errors. '''
print "clearing errors \n"
# self._set_var('ER', 0, True) # error code
self.write('PR ER')
self.write('ER 0') # not using _set_var() b/c there's not = sign used for this op
# sleep(0.1)
# self.write('PR EF')
# sleep(0.1)
# self.con.readlines()
def _set_step(self, step):
'''Set the number of steps that the motor uses for a full rotation. '''
if not isinstance(step,int):
print "Enter an integer only."
return
self._set_var('P',step)
self.CurrentPos = self._calculate_pos(step)
def _calculate_pos(self, steps):
'''Calculate the position of the motor using the conversion factors.
X steps * 1 rev/Y steps * 5 mm/1 rev '''
CurrentPos = steps * (1.0/(self._steps_per_rev[str(self.MicroStep)])) * self._meters_per_rev
'''position = X steps * 1 rev/Y steps * 5 mm/1 rev '''
return CurrentPos
def _get_ms(self):
'''Query the motor for the current microstep setting. '''
self.flush()
sleep(0.1)
self.con.write('PR MS\r\n')
sleep(0.1)
pat = '[0-9]+\r\n'
ms = self._loop_structure(pat)
return int(ms.strip('\n').strip('\r'))
def _get_current_step(self):
'''Return the current step or position 'P' of the motor. '''
self.flush()
self.con.write('PR P\r\n')# P is really the step
pat = '\-*[0-9]+\r\n'
output = self._loop_structure(pat)
return int(output.strip('\r\n'))
def _calc_steps(self,linear_dist):
'''How many steps it takes to get to a linear dist input.'''
steps = float(linear_dist)/self._meters_per_rev * float((self._steps_per_rev[str(self.MicroStep)]))
return int(round(steps))
def _motor_stopped(self):
'''Used as a poll of the location of a motor, so that the program can spit out the
new position after the motor has arrived. It also checks if a limit switch has been
tripped. '''
Flag = True
self.con.flushOutput()# readlines() flushes the output too
sleep(0.1)
while Flag:
self.con.write('PR MP\r\n') # flag for moving to position
sleep(0.5)
list = self.con.readlines()
if list[2].strip('\r\n') == '0':
Flag = False
def return_home(self):
'''Return the motor to home position, which is at the limit switch in the bottom left corner. '''
self.clear_error()
error_status = False
self.con.write('SL -51200')# 5mm per second
while error_status == False:
error_status = self._check_reached_limit_switch()
self.write('SL 0') # stop the slew movement
sleep(0.1)
self.clear_error()
sleep(0.1)
self.set_pos_as_start()
def move_rel(self, linear_dist):
'''Tell the motor to move a linear distance as a relative position.'''
steps = self._calc_steps(linear_dist)
sleep(0.1)
self.write('MR %i' %steps, echk = True)
self._motor_stopped()
self._CurrentStep = self._get_current_step()
self.CurrentPos = float(self._calculate_pos(self._CurrentStep))
print "New Pos: %s " %self.codetools.ToSI(self.CurrentPos)
limit_flag = self._check_reached_limit_switch()
if limit_flag:
self.clear_error()
def write(self,
|
self.con2.port = '/dev/ttyUSB%s' %port
self.con2.open()
sleep(2)
sn = self._get_sn(self.con2)
if sn == 'unknown':
sn = self._get_sn(self.con2)
x_port_flag, y_port_flag = self.assign_serial_num(sn, x_port_flag, y_port_flag, self.con2)
|
conditional_block
|
|
motor_tools.py
|
a connected port, trying next.\n" %port
if x_port_flag is False or y_port_flag is False:
print "Connection failed waiting 5 seconds and trying again.\n"
try:
self.con1.close()
sleep(1)
self.con2.close()
sleep(1)
except:
pass
if self.connection_attempts < 4:
sleep(5)
self.connection_attempts += 1
self.connect_to_ports()
else:
raise Exception, "The x or y motor has not been connected, 4 attempts have been made.\n"
def _loop_structure(self, con, pat):
'''A method used to complete all the queries in this class. The mdrive motor
echos all sent commands and returns values as a list. This method uses regex to
sift out the desired information for that list. Notice the pattern sent in is a
regex pattern pertinent to the task at hand.'''
sleep(0.1)
re_obj = re.compile(pat)
readback = con.readlines()
for item in readback:
if re_obj.match(item) is not None:
return item
return "unknown"
def assign_serial_num(self, sn, x_port, y_port, con):
if sn == '269120375' and y_port is False:
y_port = True
self.motor_y = Motor(con)
elif sn == '074130197' and x_port is False:
x_port = True
self.motor_x = Motor(con)
return x_port, y_port
class Motor(object):
'''This is a collection of methods which control an Mdrive 23 step motor. '''
def __init__(self, serial_con):
#self.con = serial.Serial(None, 9600, timeout = 0, writeTimeout = 0)
self.con = serial_con
self.codetools = CodeTools()
self._steps_per_rev = {'256':51200,'128':25600,'64':12800,'32':6400,
'16':3200,'8':1600,'4':800,'2':400,'1':200,
'250':50000,'200':40000,'125':25000,'100':2000,
'50':10000,'25':5000,'10':2000,'5':1000}
self._meters_per_rev = 0.005
'''5mm per 1 full revolution '''
self.Error_codes = {20:'Tried to set unknown variable or flag',
21:'Tried to set an incorrect value',
30:'Unknown label or user variable',
24:'Illegal data entered',
25: 'Tried to set a read only variable'
}
def main(self, acceleration, max_vel, init_vel):
'''Sets the basic parameters of the motor. '''
self.clear_error()
self.MicroStep = self._get_ms()
self._set_var('S1','3,0,0') # limit switch for home
self._set_var('S2','2,0,0')# limit switch for farthest end
self._set_var('LM', 2)# decel ramp, stops all motion that dir
self._set_var('A',acceleration) # acceleration
self._set_var('VM',max_vel)# max velocity
self._set_var('VI',init_vel)# initial velocity
# when a motor is instantiated, use the current P for the linear dist.
self.CurrentPos = self._calculate_pos(self._get_current_step())
def _check_reached_limit_switch(self):
'''Query the motor to determine if either limit switch has been reached. Returns
True is a limit has been tripped or False if it has not. '''
error_status = False
self.con.write('PR ER\r\n')#check if reached limit switch
sleep(0.1)
pat = '83\r\n|84\r\n' # boolean "or" in the regex
out_put = self._loop_structure(pat)
if out_put == '83\r\n' or out_put == '84\r\n':
error_status = True
if out_put == '84\r\n':
print "Reached limit switch at HOME. \n"
self.y_lock = True
elif out_put == '83\r\n':
print "Reached limit switch at farthest end. \n"
return error_status
def clear_error(self):
'''Query the motor for errors, return the error code and clear the errors. '''
print "clearing errors \n"
# self._set_var('ER', 0, True) # error code
self.write('PR ER')
self.write('ER 0') # not using _set_var() b/c there's not = sign used for this op
# sleep(0.1)
# self.write('PR EF')
# sleep(0.1)
# self.con.readlines()
def _set_step(self, step):
'''Set the number of steps that the motor uses for a full rotation. '''
if not isinstance(step,int):
print "Enter an integer only."
return
self._set_var('P',step)
self.CurrentPos = self._calculate_pos(step)
def _calculate_pos(self, steps):
'''Calculate the position of the motor using the conversion factors.
X steps * 1 rev/Y steps * 5 mm/1 rev '''
CurrentPos = steps * (1.0/(self._steps_per_rev[str(self.MicroStep)])) * self._meters_per_rev
'''position = X steps * 1 rev/Y steps * 5 mm/1 rev '''
return CurrentPos
def _get_ms(self):
'''Query the motor for the current microstep setting. '''
self.flush()
sleep(0.1)
self.con.write('PR MS\r\n')
sleep(0.1)
pat = '[0-9]+\r\n'
ms = self._loop_structure(pat)
return int(ms.strip('\n').strip('\r'))
|
'''Return the current step or position 'P' of the motor. '''
self.flush()
self.con.write('PR P\r\n')# P is really the step
pat = '\-*[0-9]+\r\n'
output = self._loop_structure(pat)
return int(output.strip('\r\n'))
def _calc_steps(self,linear_dist):
'''How many steps it takes to get to a linear dist input.'''
steps = float(linear_dist)/self._meters_per_rev * float((self._steps_per_rev[str(self.MicroStep)]))
return int(round(steps))
def _motor_stopped(self):
'''Used as a poll of the location of a motor, so that the program can spit out the
new position after the motor has arrived. It also checks if a limit switch has been
tripped. '''
Flag = True
self.con.flushOutput()# readlines() flushes the output too
sleep(0.1)
while Flag:
self.con.write('PR MP\r\n') # flag for moving to position
sleep(0.5)
list = self.con.readlines()
if list[2].strip('\r\n') == '0':
Flag = False
def return_home(self):
'''Return the motor to home position, which is at the limit switch in the bottom left corner. '''
self.clear_error()
error_status = False
self.con.write('SL -51200')# 5mm per second
while error_status == False:
error_status = self._check_reached_limit_switch()
self.write('SL 0') # stop the slew movement
sleep(0.1)
self.clear_error()
sleep(0.1)
self.set_pos_as_start()
def move_rel(self, linear_dist):
'''Tell the motor to move a linear distance as a relative position.'''
steps = self._calc_steps(linear_dist)
sleep(0.1)
self.write('MR %i' %steps, echk = True)
self._motor_stopped()
self._CurrentStep = self._get_current_step()
self.CurrentPos = float(self._calculate_pos(self._CurrentStep))
print "New Pos: %s " %self.codetools.ToSI(self.CurrentPos)
limit_flag = self._check_reached_limit_switch()
if limit_flag:
self.clear_error()
def write(self, arg, echk = False):
'''Write a command to the motor where the lines feed and carriage return are automatically included.
This is unlike the primitive class pySerial where you must send the \r\n . '''
self.con.write("%s\r\n" %arg)
sleep(0.1)
list = self.con.readlines()
print list
if echk == True:
self._check_for_mcode_error()
def _set
|
def _get_current_step(self):
|
random_line_split
|
motor_tools.py
|
connected port, trying next.\n" %port
if x_port_flag is False or y_port_flag is False:
print "Connection failed waiting 5 seconds and trying again.\n"
try:
self.con1.close()
sleep(1)
self.con2.close()
sleep(1)
except:
pass
if self.connection_attempts < 4:
sleep(5)
self.connection_attempts += 1
self.connect_to_ports()
else:
raise Exception, "The x or y motor has not been connected, 4 attempts have been made.\n"
def _loop_structure(self, con, pat):
'''A method used to complete all the queries in this class. The mdrive motor
echos all sent commands and returns values as a list. This method uses regex to
sift out the desired information for that list. Notice the pattern sent in is a
regex pattern pertinent to the task at hand.'''
sleep(0.1)
re_obj = re.compile(pat)
readback = con.readlines()
for item in readback:
if re_obj.match(item) is not None:
return item
return "unknown"
def assign_serial_num(self, sn, x_port, y_port, con):
if sn == '269120375' and y_port is False:
y_port = True
self.motor_y = Motor(con)
elif sn == '074130197' and x_port is False:
x_port = True
self.motor_x = Motor(con)
return x_port, y_port
class Motor(object):
'''This is a collection of methods which control an Mdrive 23 step motor. '''
def __init__(self, serial_con):
#self.con = serial.Serial(None, 9600, timeout = 0, writeTimeout = 0)
self.con = serial_con
self.codetools = CodeTools()
self._steps_per_rev = {'256':51200,'128':25600,'64':12800,'32':6400,
'16':3200,'8':1600,'4':800,'2':400,'1':200,
'250':50000,'200':40000,'125':25000,'100':2000,
'50':10000,'25':5000,'10':2000,'5':1000}
self._meters_per_rev = 0.005
'''5mm per 1 full revolution '''
self.Error_codes = {20:'Tried to set unknown variable or flag',
21:'Tried to set an incorrect value',
30:'Unknown label or user variable',
24:'Illegal data entered',
25: 'Tried to set a read only variable'
}
def main(self, acceleration, max_vel, init_vel):
'''Sets the basic parameters of the motor. '''
self.clear_error()
self.MicroStep = self._get_ms()
self._set_var('S1','3,0,0') # limit switch for home
self._set_var('S2','2,0,0')# limit switch for farthest end
self._set_var('LM', 2)# decel ramp, stops all motion that dir
self._set_var('A',acceleration) # acceleration
self._set_var('VM',max_vel)# max velocity
self._set_var('VI',init_vel)# initial velocity
# when a motor is instantiated, use the current P for the linear dist.
self.CurrentPos = self._calculate_pos(self._get_current_step())
def _check_reached_limit_switch(self):
'''Query the motor to determine if either limit switch has been reached. Returns
True is a limit has been tripped or False if it has not. '''
error_status = False
self.con.write('PR ER\r\n')#check if reached limit switch
sleep(0.1)
pat = '83\r\n|84\r\n' # boolean "or" in the regex
out_put = self._loop_structure(pat)
if out_put == '83\r\n' or out_put == '84\r\n':
error_status = True
if out_put == '84\r\n':
print "Reached limit switch at HOME. \n"
self.y_lock = True
elif out_put == '83\r\n':
print "Reached limit switch at farthest end. \n"
return error_status
def clear_error(self):
'''Query the motor for errors, return the error code and clear the errors. '''
print "clearing errors \n"
# self._set_var('ER', 0, True) # error code
self.write('PR ER')
self.write('ER 0') # not using _set_var() b/c there's not = sign used for this op
# sleep(0.1)
# self.write('PR EF')
# sleep(0.1)
# self.con.readlines()
def _set_step(self, step):
'''Set the number of steps that the motor uses for a full rotation. '''
if not isinstance(step,int):
print "Enter an integer only."
return
self._set_var('P',step)
self.CurrentPos = self._calculate_pos(step)
def _calculate_pos(self, steps):
'''Calculate the position of the motor using the conversion factors.
X steps * 1 rev/Y steps * 5 mm/1 rev '''
CurrentPos = steps * (1.0/(self._steps_per_rev[str(self.MicroStep)])) * self._meters_per_rev
'''position = X steps * 1 rev/Y steps * 5 mm/1 rev '''
return CurrentPos
def _get_ms(self):
'''Query the motor for the current microstep setting. '''
self.flush()
sleep(0.1)
self.con.write('PR MS\r\n')
sleep(0.1)
pat = '[0-9]+\r\n'
ms = self._loop_structure(pat)
return int(ms.strip('\n').strip('\r'))
def _get_current_step(self):
'''Return the current step or position 'P' of the motor. '''
self.flush()
self.con.write('PR P\r\n')# P is really the step
pat = '\-*[0-9]+\r\n'
output = self._loop_structure(pat)
return int(output.strip('\r\n'))
def _calc_steps(self,linear_dist):
'''How many steps it takes to get to a linear dist input.'''
steps = float(linear_dist)/self._meters_per_rev * float((self._steps_per_rev[str(self.MicroStep)]))
return int(round(steps))
def _motor_stopped(self):
'''Used as a poll of the location of a motor, so that the program can spit out the
new position after the motor has arrived. It also checks if a limit switch has been
tripped. '''
Flag = True
self.con.flushOutput()# readlines() flushes the output too
sleep(0.1)
while Flag:
self.con.write('PR MP\r\n') # flag for moving to position
sleep(0.5)
list = self.con.readlines()
if list[2].strip('\r\n') == '0':
Flag = False
def return_home(self):
'''Return the motor to home position, which is at the limit switch in the bottom left corner. '''
self.clear_error()
error_status = False
self.con.write('SL -51200')# 5mm per second
while error_status == False:
error_status = self._check_reached_limit_switch()
self.write('SL 0') # stop the slew movement
sleep(0.1)
self.clear_error()
sleep(0.1)
self.set_pos_as_start()
def move_rel(self, linear_dist):
'''Tell the motor to move a linear distance as a relative position.'''
steps = self._calc_steps(linear_dist)
sleep(0.1)
self.write('MR %i' %steps, echk = True)
self._motor_stopped()
self._CurrentStep = self._get_current_step()
self.CurrentPos = float(self._calculate_pos(self._CurrentStep))
print "New Pos: %s " %self.codetools.ToSI(self.CurrentPos)
limit_flag = self._check_reached_limit_switch()
if limit_flag:
self.clear_error()
def
|
(self, arg, echk = False):
'''Write a command to the motor where the lines feed and carriage return are automatically included.
This is unlike the primitive class pySerial where you must send the \r\n . '''
self.con.write("%s\r\n" %arg)
sleep(0.1)
list = self.con.readlines()
print list
if echk == True:
self._check_for_mcode_error()
def _set_var
|
write
|
identifier_name
|
motor_tools.py
|
_set_var() b/c there's not = sign used for this op
# sleep(0.1)
# self.write('PR EF')
# sleep(0.1)
# self.con.readlines()
def _set_step(self, step):
'''Set the number of steps that the motor uses for a full rotation. '''
if not isinstance(step,int):
print "Enter an integer only."
return
self._set_var('P',step)
self.CurrentPos = self._calculate_pos(step)
def _calculate_pos(self, steps):
'''Calculate the position of the motor using the conversion factors.
X steps * 1 rev/Y steps * 5 mm/1 rev '''
CurrentPos = steps * (1.0/(self._steps_per_rev[str(self.MicroStep)])) * self._meters_per_rev
'''position = X steps * 1 rev/Y steps * 5 mm/1 rev '''
return CurrentPos
def _get_ms(self):
'''Query the motor for the current microstep setting. '''
self.flush()
sleep(0.1)
self.con.write('PR MS\r\n')
sleep(0.1)
pat = '[0-9]+\r\n'
ms = self._loop_structure(pat)
return int(ms.strip('\n').strip('\r'))
def _get_current_step(self):
'''Return the current step or position 'P' of the motor. '''
self.flush()
self.con.write('PR P\r\n')# P is really the step
pat = '\-*[0-9]+\r\n'
output = self._loop_structure(pat)
return int(output.strip('\r\n'))
def _calc_steps(self,linear_dist):
'''How many steps it takes to get to a linear dist input.'''
steps = float(linear_dist)/self._meters_per_rev * float((self._steps_per_rev[str(self.MicroStep)]))
return int(round(steps))
def _motor_stopped(self):
'''Used as a poll of the location of a motor, so that the program can spit out the
new position after the motor has arrived. It also checks if a limit switch has been
tripped. '''
Flag = True
self.con.flushOutput()# readlines() flushes the output too
sleep(0.1)
while Flag:
self.con.write('PR MP\r\n') # flag for moving to position
sleep(0.5)
list = self.con.readlines()
if list[2].strip('\r\n') == '0':
Flag = False
def return_home(self):
'''Return the motor to home position, which is at the limit switch in the bottom left corner. '''
self.clear_error()
error_status = False
self.con.write('SL -51200')# 5mm per second
while error_status == False:
error_status = self._check_reached_limit_switch()
self.write('SL 0') # stop the slew movement
sleep(0.1)
self.clear_error()
sleep(0.1)
self.set_pos_as_start()
def move_rel(self, linear_dist):
'''Tell the motor to move a linear distance as a relative position.'''
steps = self._calc_steps(linear_dist)
sleep(0.1)
self.write('MR %i' %steps, echk = True)
self._motor_stopped()
self._CurrentStep = self._get_current_step()
self.CurrentPos = float(self._calculate_pos(self._CurrentStep))
print "New Pos: %s " %self.codetools.ToSI(self.CurrentPos)
limit_flag = self._check_reached_limit_switch()
if limit_flag:
self.clear_error()
def write(self, arg, echk = False):
'''Write a command to the motor where the lines feed and carriage return are automatically included.
This is unlike the primitive class pySerial where you must send the \r\n . '''
self.con.write("%s\r\n" %arg)
sleep(0.1)
list = self.con.readlines()
print list
if echk == True:
self._check_for_mcode_error()
def _set_var(self, var, val, echk = False):
string = '\r%(var)s=%(val)s\r' %{'var':var,'val':val}
self.con.write(string)
sleep(0.1)
if echk == True:
self._check_for_mcode_error()
sleep(0.1)
output = self.write('PR %s' %var)# mdrive echoes input
def set_micro_step(self, val, echk = True):
'''A micro step is a division of a 2pi rotation into smaller steps.
Default is 256 which is the finest resolution. THere are choices in powers of two and
decimal amounts. '''
self._set_var('MS', str(val), True)
sleep(0.2)
self.MicroStep = self._get_ms()
def set_device_name(self, name, echk = False):
'''No longer used. All motor names are "!" '''
pat = '[A-Z]'
if not re.match(pat, name):
print "Names must be one Char only.\n"
return
self._set_var('DN','"%s"' %name)
sleep(0.2)
self.DeviceName = self._getDeviceName()
def flush(self):
'''Flush the input and output buffers of the motor. '''
self.con.flushInput()
sleep(0.1)
self.con.flushOutput()
sleep(0.1)
def _check_for_mcode_error(self):
self.con.flushOutput()
sleep(0.1)
self.con.write('PR EF\r\n')
sleep(0.1)
list = self.con.readlines() #output: ['PR EF\r\n', '\n', '1\r\n', '>']
if list[2].strip('\r\n') == '1':
self._get_error_code()
#pat = '.*\?.*'
#item = self._loop_structure(pat)
#if item is not "unknown":
# self._get_error_code()
def _get_error_code(self):
self.con.write('PR ER\r\n')
pat = '[1-9]+\r\n'
error_code = self._loop_structure(pat)
error_code = int(error_code.replace('\r\n',''))
if self.Error_codes.has_key(error_code):
print self.Error_codes[error_code]
else:
print "Error code %i was raised by the motor." %error_code
def _getDeviceName(self):
self.flush()
sleep(0.1)
self.con.write('PR DN\r\n')
sleep(0.2)
# the name returns like: '"Name"\r\n'
pat = '"[A-Z]"\r\n|"!"\r\n'
DeviceName = self._loop_structure(pat)
return DeviceName.strip('\n').strip('\r')
def _loop_structure(self, pat):
'''A method used to complete all the queries in this class. The mdrive motor
echos all sent commands and returns values as a list. This method uses regex to
sift out the desired information for that list. Notice the pattern sent in is a
regex pattern pertinent to the task at hand.'''
sleep(0.1)
re_obj = re.compile(pat)
readback = self.con.readlines()
for item in readback:
if re_obj.match(item) is not None:
return item
return "unknown"
def reset(self):
'''Flush the input and output buffer, clear errors and send Control C. '''
sleep(0.1)
self.flush()
sleep(0.1)
self.clear_error()
sleep(0.1)
print "Returning to factory defaults and rebooting motor.\n"
self.write('FD')
sleep(3)
self._send_control_C()
self._CurrentStep = 0
self.CurrentPos = 0
def _send_control_C(self):
self.write('\03')
sleep(2)
print self.con.readlines()
def close(self):
'''Flush the motors and close the serial connection. '''
self.flush()
sleep(0.1)
self.con.close()
bool = self.con.isOpen()
if bool is False:
print "Motor port closed"
else:
print "Closing motor failed, still connected"
def open(self):
'''Open a connection to a motor. Uses pySerial instance called self.con.'''
#sleep(0.3)
self.con.open()
def set_pos_as_start(self):
'''Using the limit swictches, the motors return to home,
then this pos is set as the start.'''
self._set_var('P', 0, True)
self._CurrentStep = 0
self.CurrentPos = 0
def move_absolute(self, pos):
|
'''Move the motor to an absolute position wrt to the limit switch HOME. '''
steps = self._calc_steps(pos)
self.con.write('MA %i\r\n' %steps)
sleep(0.1)
self._motor_stopped()
self._CurrentStep = self._get_current_step()
self.CurrentPos = float(self._calculate_pos(self._CurrentStep))
|
identifier_body
|
|
parser.py
|
_text',
# 'regex', 'regex_re']
def __init__(self, rule_text):
self.raw_rule_text = rule_text
self.regex_re = None
rule_text = rule_text.strip()
self.is_comment = rule_text.startswith(('!', '[Adblock'))
if self.is_comment:
self.is_html_rule = self.is_exception = False
else:
self.is_html_rule = '##' in rule_text or '#@#' in rule_text # or rule_text.startswith('#')
self.is_exception = rule_text.startswith('@@') or '#@#' in rule_text
if self.is_exception and not self.is_html_rule:
rule_text = rule_text[2:]
if not self.is_comment and '$' in rule_text:
rule_text, options_text = rule_text.split('$', 1)
self.raw_options = self._split_options(options_text)
self.options = dict(self._parse_option(opt) for opt in self.raw_options)
else:
self.raw_options = []
self.options = {}
self._options_keys = frozenset(self.options.keys()) - set(['match-case'])
self.rule_text = rule_text
if self.is_comment:
self.regex = ''
elif self.is_html_rule:
url, selector = self.rule_text.split('#@#' if self.is_exception else '##')
self.regex = self.rule_to_regex(url) if url else ''
self.html_selector = selector
else:
self.regex = self.rule_to_regex(rule_text)
def match_url(self, url, options=None):
"""
Return if this rule matches the URL.
What to do if rule is matched is up to developer. Most likely
``.is_exception`` attribute should be taken in account.
"""
options = options or {}
for optname in self.options:
if optname == 'match-case': # TODO
continue
if optname not in options:
raise ValueError("Rule requires option %s" % optname)
if optname == 'domain':
if not self._domain_matches(options['domain']):
return False
continue
if options[optname] != self.options[optname]:
return False
return self._url_matches(url)
def _domain_matches(self, domain):
domain_rules = self.options['domain']
for domain in _domain_variants(domain):
if domain in domain_rules:
return domain_rules[domain]
return not any(domain_rules.values())
def _url_matches(self, url):
if self.regex_re is None:
self.regex_re = re.compile(self.regex)
return bool(self.regex_re.search(url))
def matching_supported(self, options=None):
"""
Return whether this rule can return meaningful result,
given the `options` dict. If some options are missing,
then rule shouldn't be matched against, and this function
returns False.
No options:
>>> rule = AdblockRule("swf|")
>>> rule.matching_supported({})
True
Option is used in the rule, but its value is not available
at matching time:
>>> rule = AdblockRule("swf|$third-party")
>>> rule.matching_supported({})
False
Option is used in the rule, and option value is available
at matching time:
>>> rule = AdblockRule("swf|$third-party")
>>> rule.matching_supported({'domain': 'example.com', 'third-party': False})
True
Rule is a comment:
>>> rule = AdblockRule("!this is not a rule")
>>> rule.matching_supported({})
False
"""
if self.is_comment:
return False
if self.is_html_rule: # HTML rules are not supported yet
return False
options = options or {}
keys = set(options.keys())
if not keys.issuperset(self._options_keys):
# some of the required options are not given
return False
return True
@classmethod
def _split_options(cls, options_text):
return cls.OPTIONS_SPLIT_RE.split(options_text)
@classmethod
|
parts = domains.replace(',', '|').split('|')
return dict(cls._parse_option_negation(p) for p in parts)
@classmethod
def _parse_option_negation(cls, text):
return (text.lstrip('~'), not text.startswith('~'))
@classmethod
def _parse_option(cls, text):
if text.startswith("domain="):
return ("domain", cls._parse_domain_option(text))
return cls._parse_option_negation(text)
@classmethod
def rule_to_regex(cls, rule):
"""
Convert AdBlock rule to a regular expression.
"""
if not rule:
raise ValueError("Invalid rule")
# return rule
# escape special regex characters
rule = re.sub(r"([.$+?{}()\[\]\\])", r"\\\1", rule)
# XXX: the resulting regex must use non-capturing groups (?:
# for performance reasons; also, there is a limit on number
# of capturing groups, no using them would prevent building
# a single regex out of several rules.
# Separator character ^ matches anything but a letter, a digit, or
# one of the following: _ - . %. The end of the address is also
# accepted as separator.
rule = rule.replace("^", "(?:[^\w\d_\-.%]|$)")
# * symbol
rule = rule.replace("*", ".*")
# | in the end means the end of the address
if rule[-1] == '|':
rule = rule[:-1] + '$'
# || in the beginning means beginning of the domain name
if rule[:2] == '||':
# XXX: it is better to use urlparse for such things,
# but urlparse doesn't give us a single regex.
# Regex is based on http://tools.ietf.org/html/rfc3986#appendix-B
if len(rule) > 2:
# | | complete part |
# | scheme | of the domain |
rule = r"^(?:[^:/?#]+:)?(?://(?:[^/?#]*\.)?)?" + rule[2:]
elif rule[0] == '|':
# | in the beginning means start of the address
rule = '^' + rule[1:]
# other | symbols should be escaped
# we have "|$" in our regexp - do not touch it
rule = re.sub("(\|)[^$]", r"\|", rule)
return rule
def __repr__(self):
return "AdblockRule(%r)" % self.raw_rule_text
def __str__(self):
if self.is_html_rule:
return (('un-hide' if self.is_exception else 'hide')
+ ' elements matching CSS selector: {}'.format(self.html_selector))
elif self.is_comment:
return ('Comment: {}'.format(self.rule_text))
template = '{b_w}{options} requests{domains} to {url}'
domain_text = ''
if 'domain' in self.options:
for domain, status in self.options['domain'].items():
domain_text = domain_text + (' from ' if status else ' not from ') + domain
if self.options:
explanations = {
'object-subrequest': 'plugin (i.e. Flash)',
'subdocument': 'embedded page (iframe)',
'document': 'this page',
}
options_text = ''
for option, status in self.options.items():
if option == 'domain':
continue
else:
options_text = (options_text + ('not ' if not status else ' ') +
(explanations[option] if option in explanations else option))
else:
options_text = ' all'
url = ''.join([char for char in self.rule_text if char not in '@|^'])
entries = {
'b_w': 'whitelist' if self.is_exception else 'blacklist',
'options': options_text,
'domains': domain_text,
'url': url if url not in ['https://', 'http://'] else 'anywhere',
}
return template.format(**entries)
class AdblockRules(object):
"""
AdblockRules is a class for checking URLs against multiple AdBlock rules.
It is more efficient to use AdblockRules instead of creating AdblockRule
instances manually and checking them one-by-one because AdblockRules
optimizes some common cases.
"""
def __init__(self, rules, supported_options=None, skip_unsupported_rules=True,
use_re2='auto', max_mem=256*1024*1024, rule_cls=AdblockRule):
if supported_options is None:
self.supported_options = rule_cls.BINARY_OPTIONS + ['domain']
else:
self.supported_options = supported_options
self.uses_re2 = _is_re2_supported() if use_re2 == 'auto' else use_re2
self.re2_max_mem = max_mem
self.rule_cls = rule_cls
self.skip_unsupported_rules = skip_unsupported_rules
_params = dict((opt, True) for opt in self.supported_options)
self.rules = [
r for r in (
r if isinstance(r, rule_cls) else rule_cls(r)
for r in rules
|
def _parse_domain_option(cls, text):
domains = text[len('domain='):]
|
random_line_split
|
parser.py
|
_text',
# 'regex', 'regex_re']
def __init__(self, rule_text):
self.raw_rule_text = rule_text
self.regex_re = None
rule_text = rule_text.strip()
self.is_comment = rule_text.startswith(('!', '[Adblock'))
if self.is_comment:
self.is_html_rule = self.is_exception = False
else:
self.is_html_rule = '##' in rule_text or '#@#' in rule_text # or rule_text.startswith('#')
self.is_exception = rule_text.startswith('@@') or '#@#' in rule_text
if self.is_exception and not self.is_html_rule:
rule_text = rule_text[2:]
if not self.is_comment and '$' in rule_text:
rule_text, options_text = rule_text.split('$', 1)
self.raw_options = self._split_options(options_text)
self.options = dict(self._parse_option(opt) for opt in self.raw_options)
else:
self.raw_options = []
self.options = {}
self._options_keys = frozenset(self.options.keys()) - set(['match-case'])
self.rule_text = rule_text
if self.is_comment:
self.regex = ''
elif self.is_html_rule:
url, selector = self.rule_text.split('#@#' if self.is_exception else '##')
self.regex = self.rule_to_regex(url) if url else ''
self.html_selector = selector
else:
self.regex = self.rule_to_regex(rule_text)
def match_url(self, url, options=None):
"""
Return if this rule matches the URL.
What to do if rule is matched is up to developer. Most likely
``.is_exception`` attribute should be taken in account.
"""
options = options or {}
for optname in self.options:
if optname == 'match-case': # TODO
continue
if optname not in options:
raise ValueError("Rule requires option %s" % optname)
if optname == 'domain':
if not self._domain_matches(options['domain']):
return False
continue
if options[optname] != self.options[optname]:
return False
return self._url_matches(url)
def _domain_matches(self, domain):
domain_rules = self.options['domain']
for domain in _domain_variants(domain):
if domain in domain_rules:
return domain_rules[domain]
return not any(domain_rules.values())
def _url_matches(self, url):
if self.regex_re is None:
self.regex_re = re.compile(self.regex)
return bool(self.regex_re.search(url))
def matching_supported(self, options=None):
"""
Return whether this rule can return meaningful result,
given the `options` dict. If some options are missing,
then rule shouldn't be matched against, and this function
returns False.
No options:
>>> rule = AdblockRule("swf|")
>>> rule.matching_supported({})
True
Option is used in the rule, but its value is not available
at matching time:
>>> rule = AdblockRule("swf|$third-party")
>>> rule.matching_supported({})
False
Option is used in the rule, and option value is available
at matching time:
>>> rule = AdblockRule("swf|$third-party")
>>> rule.matching_supported({'domain': 'example.com', 'third-party': False})
True
Rule is a comment:
>>> rule = AdblockRule("!this is not a rule")
>>> rule.matching_supported({})
False
"""
if self.is_comment:
return False
if self.is_html_rule: # HTML rules are not supported yet
return False
options = options or {}
keys = set(options.keys())
if not keys.issuperset(self._options_keys):
# some of the required options are not given
return False
return True
@classmethod
def _split_options(cls, options_text):
return cls.OPTIONS_SPLIT_RE.split(options_text)
@classmethod
def _parse_domain_option(cls, text):
domains = text[len('domain='):]
parts = domains.replace(',', '|').split('|')
return dict(cls._parse_option_negation(p) for p in parts)
@classmethod
def _parse_option_negation(cls, text):
return (text.lstrip('~'), not text.startswith('~'))
@classmethod
def
|
(cls, text):
if text.startswith("domain="):
return ("domain", cls._parse_domain_option(text))
return cls._parse_option_negation(text)
@classmethod
def rule_to_regex(cls, rule):
"""
Convert AdBlock rule to a regular expression.
"""
if not rule:
raise ValueError("Invalid rule")
# return rule
# escape special regex characters
rule = re.sub(r"([.$+?{}()\[\]\\])", r"\\\1", rule)
# XXX: the resulting regex must use non-capturing groups (?:
# for performance reasons; also, there is a limit on number
# of capturing groups, no using them would prevent building
# a single regex out of several rules.
# Separator character ^ matches anything but a letter, a digit, or
# one of the following: _ - . %. The end of the address is also
# accepted as separator.
rule = rule.replace("^", "(?:[^\w\d_\-.%]|$)")
# * symbol
rule = rule.replace("*", ".*")
# | in the end means the end of the address
if rule[-1] == '|':
rule = rule[:-1] + '$'
# || in the beginning means beginning of the domain name
if rule[:2] == '||':
# XXX: it is better to use urlparse for such things,
# but urlparse doesn't give us a single regex.
# Regex is based on http://tools.ietf.org/html/rfc3986#appendix-B
if len(rule) > 2:
# | | complete part |
# | scheme | of the domain |
rule = r"^(?:[^:/?#]+:)?(?://(?:[^/?#]*\.)?)?" + rule[2:]
elif rule[0] == '|':
# | in the beginning means start of the address
rule = '^' + rule[1:]
# other | symbols should be escaped
# we have "|$" in our regexp - do not touch it
rule = re.sub("(\|)[^$]", r"\|", rule)
return rule
def __repr__(self):
return "AdblockRule(%r)" % self.raw_rule_text
def __str__(self):
if self.is_html_rule:
return (('un-hide' if self.is_exception else 'hide')
+ ' elements matching CSS selector: {}'.format(self.html_selector))
elif self.is_comment:
return ('Comment: {}'.format(self.rule_text))
template = '{b_w}{options} requests{domains} to {url}'
domain_text = ''
if 'domain' in self.options:
for domain, status in self.options['domain'].items():
domain_text = domain_text + (' from ' if status else ' not from ') + domain
if self.options:
explanations = {
'object-subrequest': 'plugin (i.e. Flash)',
'subdocument': 'embedded page (iframe)',
'document': 'this page',
}
options_text = ''
for option, status in self.options.items():
if option == 'domain':
continue
else:
options_text = (options_text + ('not ' if not status else ' ') +
(explanations[option] if option in explanations else option))
else:
options_text = ' all'
url = ''.join([char for char in self.rule_text if char not in '@|^'])
entries = {
'b_w': 'whitelist' if self.is_exception else 'blacklist',
'options': options_text,
'domains': domain_text,
'url': url if url not in ['https://', 'http://'] else 'anywhere',
}
return template.format(**entries)
class AdblockRules(object):
"""
AdblockRules is a class for checking URLs against multiple AdBlock rules.
It is more efficient to use AdblockRules instead of creating AdblockRule
instances manually and checking them one-by-one because AdblockRules
optimizes some common cases.
"""
def __init__(self, rules, supported_options=None, skip_unsupported_rules=True,
use_re2='auto', max_mem=256*1024*1024, rule_cls=AdblockRule):
if supported_options is None:
self.supported_options = rule_cls.BINARY_OPTIONS + ['domain']
else:
self.supported_options = supported_options
self.uses_re2 = _is_re2_supported() if use_re2 == 'auto' else use_re2
self.re2_max_mem = max_mem
self.rule_cls = rule_cls
self.skip_unsupported_rules = skip_unsupported_rules
_params = dict((opt, True) for opt in self.supported_options)
self.rules = [
r for r in (
r if isinstance(r, rule_cls) else rule_cls(r)
for r in
|
_parse_option
|
identifier_name
|
parser.py
|
_text',
# 'regex', 'regex_re']
def __init__(self, rule_text):
self.raw_rule_text = rule_text
self.regex_re = None
rule_text = rule_text.strip()
self.is_comment = rule_text.startswith(('!', '[Adblock'))
if self.is_comment:
self.is_html_rule = self.is_exception = False
else:
self.is_html_rule = '##' in rule_text or '#@#' in rule_text # or rule_text.startswith('#')
self.is_exception = rule_text.startswith('@@') or '#@#' in rule_text
if self.is_exception and not self.is_html_rule:
rule_text = rule_text[2:]
if not self.is_comment and '$' in rule_text:
rule_text, options_text = rule_text.split('$', 1)
self.raw_options = self._split_options(options_text)
self.options = dict(self._parse_option(opt) for opt in self.raw_options)
else:
self.raw_options = []
self.options = {}
self._options_keys = frozenset(self.options.keys()) - set(['match-case'])
self.rule_text = rule_text
if self.is_comment:
self.regex = ''
elif self.is_html_rule:
url, selector = self.rule_text.split('#@#' if self.is_exception else '##')
self.regex = self.rule_to_regex(url) if url else ''
self.html_selector = selector
else:
self.regex = self.rule_to_regex(rule_text)
def match_url(self, url, options=None):
"""
Return if this rule matches the URL.
What to do if rule is matched is up to developer. Most likely
``.is_exception`` attribute should be taken in account.
"""
options = options or {}
for optname in self.options:
if optname == 'match-case': # TODO
continue
if optname not in options:
raise ValueError("Rule requires option %s" % optname)
if optname == 'domain':
if not self._domain_matches(options['domain']):
return False
continue
if options[optname] != self.options[optname]:
return False
return self._url_matches(url)
def _domain_matches(self, domain):
domain_rules = self.options['domain']
for domain in _domain_variants(domain):
if domain in domain_rules:
return domain_rules[domain]
return not any(domain_rules.values())
def _url_matches(self, url):
if self.regex_re is None:
self.regex_re = re.compile(self.regex)
return bool(self.regex_re.search(url))
def matching_supported(self, options=None):
|
>>> rule.matching_supported({'domain': 'example.com', 'third-party': False})
True
Rule is a comment:
>>> rule = AdblockRule("!this is not a rule")
>>> rule.matching_supported({})
False
"""
if self.is_comment:
return False
if self.is_html_rule: # HTML rules are not supported yet
return False
options = options or {}
keys = set(options.keys())
if not keys.issuperset(self._options_keys):
# some of the required options are not given
return False
return True
@classmethod
def _split_options(cls, options_text):
return cls.OPTIONS_SPLIT_RE.split(options_text)
@classmethod
def _parse_domain_option(cls, text):
domains = text[len('domain='):]
parts = domains.replace(',', '|').split('|')
return dict(cls._parse_option_negation(p) for p in parts)
@classmethod
def _parse_option_negation(cls, text):
return (text.lstrip('~'), not text.startswith('~'))
@classmethod
def _parse_option(cls, text):
if text.startswith("domain="):
return ("domain", cls._parse_domain_option(text))
return cls._parse_option_negation(text)
@classmethod
def rule_to_regex(cls, rule):
"""
Convert AdBlock rule to a regular expression.
"""
if not rule:
raise ValueError("Invalid rule")
# return rule
# escape special regex characters
rule = re.sub(r"([.$+?{}()\[\]\\])", r"\\\1", rule)
# XXX: the resulting regex must use non-capturing groups (?:
# for performance reasons; also, there is a limit on number
# of capturing groups, no using them would prevent building
# a single regex out of several rules.
# Separator character ^ matches anything but a letter, a digit, or
# one of the following: _ - . %. The end of the address is also
# accepted as separator.
rule = rule.replace("^", "(?:[^\w\d_\-.%]|$)")
# * symbol
rule = rule.replace("*", ".*")
# | in the end means the end of the address
if rule[-1] == '|':
rule = rule[:-1] + '$'
# || in the beginning means beginning of the domain name
if rule[:2] == '||':
# XXX: it is better to use urlparse for such things,
# but urlparse doesn't give us a single regex.
# Regex is based on http://tools.ietf.org/html/rfc3986#appendix-B
if len(rule) > 2:
# | | complete part |
# | scheme | of the domain |
rule = r"^(?:[^:/?#]+:)?(?://(?:[^/?#]*\.)?)?" + rule[2:]
elif rule[0] == '|':
# | in the beginning means start of the address
rule = '^' + rule[1:]
# other | symbols should be escaped
# we have "|$" in our regexp - do not touch it
rule = re.sub("(\|)[^$]", r"\|", rule)
return rule
def __repr__(self):
return "AdblockRule(%r)" % self.raw_rule_text
def __str__(self):
if self.is_html_rule:
return (('un-hide' if self.is_exception else 'hide')
+ ' elements matching CSS selector: {}'.format(self.html_selector))
elif self.is_comment:
return ('Comment: {}'.format(self.rule_text))
template = '{b_w}{options} requests{domains} to {url}'
domain_text = ''
if 'domain' in self.options:
for domain, status in self.options['domain'].items():
domain_text = domain_text + (' from ' if status else ' not from ') + domain
if self.options:
explanations = {
'object-subrequest': 'plugin (i.e. Flash)',
'subdocument': 'embedded page (iframe)',
'document': 'this page',
}
options_text = ''
for option, status in self.options.items():
if option == 'domain':
continue
else:
options_text = (options_text + ('not ' if not status else ' ') +
(explanations[option] if option in explanations else option))
else:
options_text = ' all'
url = ''.join([char for char in self.rule_text if char not in '@|^'])
entries = {
'b_w': 'whitelist' if self.is_exception else 'blacklist',
'options': options_text,
'domains': domain_text,
'url': url if url not in ['https://', 'http://'] else 'anywhere',
}
return template.format(**entries)
class AdblockRules(object):
"""
AdblockRules is a class for checking URLs against multiple AdBlock rules.
It is more efficient to use AdblockRules instead of creating AdblockRule
instances manually and checking them one-by-one because AdblockRules
optimizes some common cases.
"""
def __init__(self, rules, supported_options=None, skip_unsupported_rules=True,
use_re2='auto', max_mem=256*1024*1024, rule_cls=AdblockRule):
if supported_options is None:
self.supported_options = rule_cls.BINARY_OPTIONS + ['domain']
else:
self.supported_options = supported_options
self.uses_re2 = _is_re2_supported() if use_re2 == 'auto' else use_re2
self.re2_max_mem = max_mem
self.rule_cls = rule_cls
self.skip_unsupported_rules = skip_unsupported_rules
_params = dict((opt, True) for opt in self.supported_options)
self.rules = [
r for r in (
r if isinstance(r, rule_cls) else rule_cls(r)
for r in
|
"""
Return whether this rule can return meaningful result,
given the `options` dict. If some options are missing,
then rule shouldn't be matched against, and this function
returns False.
No options:
>>> rule = AdblockRule("swf|")
>>> rule.matching_supported({})
True
Option is used in the rule, but its value is not available
at matching time:
>>> rule = AdblockRule("swf|$third-party")
>>> rule.matching_supported({})
False
Option is used in the rule, and option value is available
at matching time:
>>> rule = AdblockRule("swf|$third-party")
|
identifier_body
|
parser.py
|
:
if optname == 'match-case': # TODO
continue
if optname not in options:
raise ValueError("Rule requires option %s" % optname)
if optname == 'domain':
if not self._domain_matches(options['domain']):
return False
continue
if options[optname] != self.options[optname]:
return False
return self._url_matches(url)
def _domain_matches(self, domain):
domain_rules = self.options['domain']
for domain in _domain_variants(domain):
if domain in domain_rules:
return domain_rules[domain]
return not any(domain_rules.values())
def _url_matches(self, url):
if self.regex_re is None:
self.regex_re = re.compile(self.regex)
return bool(self.regex_re.search(url))
def matching_supported(self, options=None):
"""
Return whether this rule can return meaningful result,
given the `options` dict. If some options are missing,
then rule shouldn't be matched against, and this function
returns False.
No options:
>>> rule = AdblockRule("swf|")
>>> rule.matching_supported({})
True
Option is used in the rule, but its value is not available
at matching time:
>>> rule = AdblockRule("swf|$third-party")
>>> rule.matching_supported({})
False
Option is used in the rule, and option value is available
at matching time:
>>> rule = AdblockRule("swf|$third-party")
>>> rule.matching_supported({'domain': 'example.com', 'third-party': False})
True
Rule is a comment:
>>> rule = AdblockRule("!this is not a rule")
>>> rule.matching_supported({})
False
"""
if self.is_comment:
return False
if self.is_html_rule: # HTML rules are not supported yet
return False
options = options or {}
keys = set(options.keys())
if not keys.issuperset(self._options_keys):
# some of the required options are not given
return False
return True
@classmethod
def _split_options(cls, options_text):
return cls.OPTIONS_SPLIT_RE.split(options_text)
@classmethod
def _parse_domain_option(cls, text):
domains = text[len('domain='):]
parts = domains.replace(',', '|').split('|')
return dict(cls._parse_option_negation(p) for p in parts)
@classmethod
def _parse_option_negation(cls, text):
return (text.lstrip('~'), not text.startswith('~'))
@classmethod
def _parse_option(cls, text):
if text.startswith("domain="):
return ("domain", cls._parse_domain_option(text))
return cls._parse_option_negation(text)
@classmethod
def rule_to_regex(cls, rule):
"""
Convert AdBlock rule to a regular expression.
"""
if not rule:
raise ValueError("Invalid rule")
# return rule
# escape special regex characters
rule = re.sub(r"([.$+?{}()\[\]\\])", r"\\\1", rule)
# XXX: the resulting regex must use non-capturing groups (?:
# for performance reasons; also, there is a limit on number
# of capturing groups, no using them would prevent building
# a single regex out of several rules.
# Separator character ^ matches anything but a letter, a digit, or
# one of the following: _ - . %. The end of the address is also
# accepted as separator.
rule = rule.replace("^", "(?:[^\w\d_\-.%]|$)")
# * symbol
rule = rule.replace("*", ".*")
# | in the end means the end of the address
if rule[-1] == '|':
rule = rule[:-1] + '$'
# || in the beginning means beginning of the domain name
if rule[:2] == '||':
# XXX: it is better to use urlparse for such things,
# but urlparse doesn't give us a single regex.
# Regex is based on http://tools.ietf.org/html/rfc3986#appendix-B
if len(rule) > 2:
# | | complete part |
# | scheme | of the domain |
rule = r"^(?:[^:/?#]+:)?(?://(?:[^/?#]*\.)?)?" + rule[2:]
elif rule[0] == '|':
# | in the beginning means start of the address
rule = '^' + rule[1:]
# other | symbols should be escaped
# we have "|$" in our regexp - do not touch it
rule = re.sub("(\|)[^$]", r"\|", rule)
return rule
def __repr__(self):
return "AdblockRule(%r)" % self.raw_rule_text
def __str__(self):
if self.is_html_rule:
return (('un-hide' if self.is_exception else 'hide')
+ ' elements matching CSS selector: {}'.format(self.html_selector))
elif self.is_comment:
return ('Comment: {}'.format(self.rule_text))
template = '{b_w}{options} requests{domains} to {url}'
domain_text = ''
if 'domain' in self.options:
for domain, status in self.options['domain'].items():
domain_text = domain_text + (' from ' if status else ' not from ') + domain
if self.options:
explanations = {
'object-subrequest': 'plugin (i.e. Flash)',
'subdocument': 'embedded page (iframe)',
'document': 'this page',
}
options_text = ''
for option, status in self.options.items():
if option == 'domain':
continue
else:
options_text = (options_text + ('not ' if not status else ' ') +
(explanations[option] if option in explanations else option))
else:
options_text = ' all'
url = ''.join([char for char in self.rule_text if char not in '@|^'])
entries = {
'b_w': 'whitelist' if self.is_exception else 'blacklist',
'options': options_text,
'domains': domain_text,
'url': url if url not in ['https://', 'http://'] else 'anywhere',
}
return template.format(**entries)
class AdblockRules(object):
"""
AdblockRules is a class for checking URLs against multiple AdBlock rules.
It is more efficient to use AdblockRules instead of creating AdblockRule
instances manually and checking them one-by-one because AdblockRules
optimizes some common cases.
"""
def __init__(self, rules, supported_options=None, skip_unsupported_rules=True,
use_re2='auto', max_mem=256*1024*1024, rule_cls=AdblockRule):
if supported_options is None:
self.supported_options = rule_cls.BINARY_OPTIONS + ['domain']
else:
self.supported_options = supported_options
self.uses_re2 = _is_re2_supported() if use_re2 == 'auto' else use_re2
self.re2_max_mem = max_mem
self.rule_cls = rule_cls
self.skip_unsupported_rules = skip_unsupported_rules
_params = dict((opt, True) for opt in self.supported_options)
self.rules = [
r for r in (
r if isinstance(r, rule_cls) else rule_cls(r)
for r in rules
)
if r.regex and r.matching_supported(_params)
]
# "advanced" rules are rules with options,
# "basic" rules are rules without options
advanced_rules, basic_rules = split_data(self.rules, lambda r: r.options)
# Rules with domain option are handled separately:
# if user passes a domain we can discard all rules which
# require another domain. So we build an index:
# {domain: [rules_which_require_it]}, and only check
# rules which require our domain. If a rule doesn't require any
# domain.
# TODO: what about ~rules? Should we match them earlier?
domain_required_rules, non_domain_rules = split_data(
advanced_rules,
lambda r: (
'domain' in r.options
and any(r.options["domain"].values())
)
)
# split rules into blacklists and whitelists
self.blacklist, self.whitelist = self._split_bw(basic_rules)
_combined = partial(_combined_regex, use_re2=self.uses_re2, max_mem=max_mem)
self.blacklist_re = _combined([r.regex for r in self.blacklist])
self.whitelist_re = _combined([r.regex for r in self.whitelist])
self.blacklist_with_options, self.whitelist_with_options = \
self._split_bw(non_domain_rules)
self.blacklist_require_domain, self.whitelist_require_domain = \
self._split_bw_domain(domain_required_rules)
def should_block(self, url, options=None):
# TODO: group rules with similar options and match them in bigger steps
options = options or {}
if self._is_whitelisted(url, options):
return False
if self._is_blacklisted(url, options):
|
return True
|
conditional_block
|
|
lookup_ref_delta_objects.rs
|
use std::convert::TryInto;
use gix_hash::ObjectId;
use crate::data::{entry::Header, input};
/// An iterator to resolve thin packs on the fly.
pub struct LookupRefDeltaObjectsIter<I, LFn> {
/// The inner iterator whose entries we will resolve.
pub inner: I,
lookup: LFn,
/// The cached delta to provide next time we are called, it's the delta to go with the base we just resolved in its place.
next_delta: Option<input::Entry>,
/// Fuse to stop iteration after first missing object.
error: bool,
/// The overall pack-offset we accumulated thus far. Each inserted entry offsets all following
/// objects by its length. We need to determine exactly where the object was inserted to see if its affected at all.
inserted_entry_length_at_offset: Vec<Change>,
/// The sum of all entries added so far, as a cache to avoid recomputation
inserted_entries_length_in_bytes: i64,
buf: Vec<u8>,
}
impl<I, LFn> LookupRefDeltaObjectsIter<I, LFn>
where
I: Iterator<Item = Result<input::Entry, input::Error>>,
LFn: for<'a> FnMut(ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>>,
{
/// Create a new instance wrapping `iter` and using `lookup` as function to retrieve objects that will serve as bases
/// for ref deltas seen while traversing `iter`.
pub fn new(iter: I, lookup: LFn) -> Self {
LookupRefDeltaObjectsIter {
inner: iter,
lookup,
error: false,
inserted_entry_length_at_offset: Vec::new(),
inserted_entries_length_in_bytes: 0,
next_delta: None,
buf: Vec::new(),
}
}
fn shifted_pack_offset(&self, pack_offset: u64) -> u64 {
let new_ofs = pack_offset as i64 + self.inserted_entries_length_in_bytes;
new_ofs.try_into().expect("offset value is never becomes negative")
}
/// positive `size_change` values mean an object grew or was more commonly, was inserted. Negative values
/// mean the object shrunk, usually because there header changed from ref-deltas to ofs deltas.
fn track_change(
&mut self,
shifted_pack_offset: u64,
pack_offset: u64,
size_change: i64,
oid: impl Into<Option<ObjectId>>,
) {
if size_change == 0 {
return;
}
self.inserted_entry_length_at_offset.push(Change {
shifted_pack_offset,
pack_offset,
size_change_in_bytes: size_change,
oid: oid.into().unwrap_or_else(||
// NOTE: this value acts as sentinel and the actual hash kind doesn't matter.
gix_hash::Kind::Sha1.null()),
});
self.inserted_entries_length_in_bytes += size_change;
}
fn shift_entry_and_point_to_base_by_offset(&mut self, entry: &mut input::Entry, base_distance: u64) {
let pack_offset = entry.pack_offset;
entry.pack_offset = self.shifted_pack_offset(pack_offset);
entry.header = Header::OfsDelta { base_distance };
let previous_header_size = entry.header_size;
entry.header_size = entry.header.size(entry.decompressed_size) as u16;
let change = entry.header_size as i64 - previous_header_size as i64;
entry.crc32 = Some(entry.compute_crc32());
self.track_change(entry.pack_offset, pack_offset, change, None);
}
}
impl<I, LFn> Iterator for LookupRefDeltaObjectsIter<I, LFn>
where
I: Iterator<Item = Result<input::Entry, input::Error>>,
LFn: for<'a> FnMut(ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>>,
{
type Item = Result<input::Entry, input::Error>;
fn next(&mut self) -> Option<Self::Item> {
if self.error {
return None;
}
if let Some(delta) = self.next_delta.take() {
return Some(Ok(delta));
}
match self.inner.next() {
Some(Ok(mut entry)) => match entry.header {
Header::RefDelta { base_id } => {
match self.inserted_entry_length_at_offset.iter().rfind(|e| e.oid == base_id) {
None => {
let base_entry = match (self.lookup)(base_id, &mut self.buf) {
Some(obj) => {
let current_pack_offset = entry.pack_offset;
let mut entry = match input::Entry::from_data_obj(&obj, 0) {
Ok(e) => e,
Err(err) => return Some(Err(err)),
};
entry.pack_offset = self.shifted_pack_offset(current_pack_offset);
self.track_change(
entry.pack_offset,
current_pack_offset,
entry.bytes_in_pack() as i64,
base_id,
);
entry
}
None => {
self.error = true;
return Some(Err(input::Error::NotFound { object_id: base_id }));
}
};
{
self.shift_entry_and_point_to_base_by_offset(&mut entry, base_entry.bytes_in_pack());
self.next_delta = Some(entry);
}
Some(Ok(base_entry))
}
|
Some(Ok(entry))
}
}
}
_ => {
if self.inserted_entries_length_in_bytes != 0 {
if let Header::OfsDelta { base_distance } = entry.header {
// We have to find the new distance based on the previous distance to the base, using the absolute
// pack offset computed from it as stored in `base_pack_offset`.
let base_pack_offset = entry
.pack_offset
.checked_sub(base_distance)
.expect("distance to be in range of pack");
match self
.inserted_entry_length_at_offset
.binary_search_by_key(&base_pack_offset, |c| c.pack_offset)
{
Ok(index) => {
let index = {
let maybe_index_of_actual_entry = index + 1;
self.inserted_entry_length_at_offset
.get(maybe_index_of_actual_entry)
.and_then(|c| {
(c.pack_offset == base_pack_offset)
.then_some(maybe_index_of_actual_entry)
})
.unwrap_or(index)
};
let new_distance = self
.shifted_pack_offset(entry.pack_offset)
.checked_sub(self.inserted_entry_length_at_offset[index].shifted_pack_offset)
.expect("a base that is behind us in the pack");
self.shift_entry_and_point_to_base_by_offset(&mut entry, new_distance);
}
Err(index) => {
let change_since_offset = self.inserted_entry_length_at_offset[index..]
.iter()
.map(|c| c.size_change_in_bytes)
.sum::<i64>();
let new_distance: u64 = {
(base_distance as i64 + change_since_offset)
.try_into()
.expect("it still points behind us")
};
self.shift_entry_and_point_to_base_by_offset(&mut entry, new_distance);
}
}
} else {
// Offset this entry by all changes (positive or negative) that we saw thus far.
entry.pack_offset = self.shifted_pack_offset(entry.pack_offset);
}
}
Some(Ok(entry))
}
},
other => other,
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (min, max) = self.inner.size_hint();
max.map_or_else(|| (min * 2, None), |max| (min, Some(max * 2)))
}
}
#[derive(Debug)]
struct Change {
/// The original pack offset as mentioned in the entry we saw. This is used to find this as base object if deltas refer to it by
/// old offset.
pack_offset: u64,
/// The new pack offset that is the shifted location of the pack entry in the pack.
shifted_pack_offset: u64,
/// The size change of the entry header, negative values denote shrinking, positive denote growing.
size_change_in_bytes: i64,
/// The object id of the entry responsible for the change, or null if it's an entry just for tracking an insertion.
oid: ObjectId,
}
|
Some(base_entry) => {
let base_distance =
self.shifted_pack_offset(entry.pack_offset) - base_entry.shifted_pack_offset;
self.shift_entry_and_point_to_base_by_offset(&mut entry, base_distance);
|
random_line_split
|
lookup_ref_delta_objects.rs
|
use std::convert::TryInto;
use gix_hash::ObjectId;
use crate::data::{entry::Header, input};
/// An iterator to resolve thin packs on the fly.
pub struct LookupRefDeltaObjectsIter<I, LFn> {
/// The inner iterator whose entries we will resolve.
pub inner: I,
lookup: LFn,
/// The cached delta to provide next time we are called, it's the delta to go with the base we just resolved in its place.
next_delta: Option<input::Entry>,
/// Fuse to stop iteration after first missing object.
error: bool,
/// The overall pack-offset we accumulated thus far. Each inserted entry offsets all following
/// objects by its length. We need to determine exactly where the object was inserted to see if its affected at all.
inserted_entry_length_at_offset: Vec<Change>,
/// The sum of all entries added so far, as a cache to avoid recomputation
inserted_entries_length_in_bytes: i64,
buf: Vec<u8>,
}
impl<I, LFn> LookupRefDeltaObjectsIter<I, LFn>
where
I: Iterator<Item = Result<input::Entry, input::Error>>,
LFn: for<'a> FnMut(ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>>,
{
/// Create a new instance wrapping `iter` and using `lookup` as function to retrieve objects that will serve as bases
/// for ref deltas seen while traversing `iter`.
pub fn new(iter: I, lookup: LFn) -> Self {
LookupRefDeltaObjectsIter {
inner: iter,
lookup,
error: false,
inserted_entry_length_at_offset: Vec::new(),
inserted_entries_length_in_bytes: 0,
next_delta: None,
buf: Vec::new(),
}
}
fn shifted_pack_offset(&self, pack_offset: u64) -> u64 {
let new_ofs = pack_offset as i64 + self.inserted_entries_length_in_bytes;
new_ofs.try_into().expect("offset value is never becomes negative")
}
/// positive `size_change` values mean an object grew or was more commonly, was inserted. Negative values
/// mean the object shrunk, usually because there header changed from ref-deltas to ofs deltas.
fn track_change(
&mut self,
shifted_pack_offset: u64,
pack_offset: u64,
size_change: i64,
oid: impl Into<Option<ObjectId>>,
) {
if size_change == 0 {
return;
}
self.inserted_entry_length_at_offset.push(Change {
shifted_pack_offset,
pack_offset,
size_change_in_bytes: size_change,
oid: oid.into().unwrap_or_else(||
// NOTE: this value acts as sentinel and the actual hash kind doesn't matter.
gix_hash::Kind::Sha1.null()),
});
self.inserted_entries_length_in_bytes += size_change;
}
fn shift_entry_and_point_to_base_by_offset(&mut self, entry: &mut input::Entry, base_distance: u64) {
let pack_offset = entry.pack_offset;
entry.pack_offset = self.shifted_pack_offset(pack_offset);
entry.header = Header::OfsDelta { base_distance };
let previous_header_size = entry.header_size;
entry.header_size = entry.header.size(entry.decompressed_size) as u16;
let change = entry.header_size as i64 - previous_header_size as i64;
entry.crc32 = Some(entry.compute_crc32());
self.track_change(entry.pack_offset, pack_offset, change, None);
}
}
impl<I, LFn> Iterator for LookupRefDeltaObjectsIter<I, LFn>
where
I: Iterator<Item = Result<input::Entry, input::Error>>,
LFn: for<'a> FnMut(ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>>,
{
type Item = Result<input::Entry, input::Error>;
fn next(&mut self) -> Option<Self::Item> {
if self.error {
return None;
}
if let Some(delta) = self.next_delta.take() {
return Some(Ok(delta));
}
match self.inner.next() {
Some(Ok(mut entry)) => match entry.header {
Header::RefDelta { base_id } => {
match self.inserted_entry_length_at_offset.iter().rfind(|e| e.oid == base_id) {
None => {
let base_entry = match (self.lookup)(base_id, &mut self.buf) {
Some(obj) => {
let current_pack_offset = entry.pack_offset;
let mut entry = match input::Entry::from_data_obj(&obj, 0) {
Ok(e) => e,
Err(err) => return Some(Err(err)),
};
entry.pack_offset = self.shifted_pack_offset(current_pack_offset);
self.track_change(
entry.pack_offset,
current_pack_offset,
entry.bytes_in_pack() as i64,
base_id,
);
entry
}
None => {
self.error = true;
return Some(Err(input::Error::NotFound { object_id: base_id }));
}
};
{
self.shift_entry_and_point_to_base_by_offset(&mut entry, base_entry.bytes_in_pack());
self.next_delta = Some(entry);
}
Some(Ok(base_entry))
}
Some(base_entry) => {
let base_distance =
self.shifted_pack_offset(entry.pack_offset) - base_entry.shifted_pack_offset;
self.shift_entry_and_point_to_base_by_offset(&mut entry, base_distance);
Some(Ok(entry))
}
}
}
_ => {
if self.inserted_entries_length_in_bytes != 0 {
if let Header::OfsDelta { base_distance } = entry.header {
// We have to find the new distance based on the previous distance to the base, using the absolute
// pack offset computed from it as stored in `base_pack_offset`.
let base_pack_offset = entry
.pack_offset
.checked_sub(base_distance)
.expect("distance to be in range of pack");
match self
.inserted_entry_length_at_offset
.binary_search_by_key(&base_pack_offset, |c| c.pack_offset)
{
Ok(index) => {
let index = {
let maybe_index_of_actual_entry = index + 1;
self.inserted_entry_length_at_offset
.get(maybe_index_of_actual_entry)
.and_then(|c| {
(c.pack_offset == base_pack_offset)
.then_some(maybe_index_of_actual_entry)
})
.unwrap_or(index)
};
let new_distance = self
.shifted_pack_offset(entry.pack_offset)
.checked_sub(self.inserted_entry_length_at_offset[index].shifted_pack_offset)
.expect("a base that is behind us in the pack");
self.shift_entry_and_point_to_base_by_offset(&mut entry, new_distance);
}
Err(index) => {
let change_since_offset = self.inserted_entry_length_at_offset[index..]
.iter()
.map(|c| c.size_change_in_bytes)
.sum::<i64>();
let new_distance: u64 = {
(base_distance as i64 + change_since_offset)
.try_into()
.expect("it still points behind us")
};
self.shift_entry_and_point_to_base_by_offset(&mut entry, new_distance);
}
}
} else {
// Offset this entry by all changes (positive or negative) that we saw thus far.
entry.pack_offset = self.shifted_pack_offset(entry.pack_offset);
}
}
Some(Ok(entry))
}
},
other => other,
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (min, max) = self.inner.size_hint();
max.map_or_else(|| (min * 2, None), |max| (min, Some(max * 2)))
}
}
#[derive(Debug)]
struct
|
{
/// The original pack offset as mentioned in the entry we saw. This is used to find this as base object if deltas refer to it by
/// old offset.
pack_offset: u64,
/// The new pack offset that is the shifted location of the pack entry in the pack.
shifted_pack_offset: u64,
/// The size change of the entry header, negative values denote shrinking, positive denote growing.
size_change_in_bytes: i64,
/// The object id of the entry responsible for the change, or null if it's an entry just for tracking an insertion.
oid: ObjectId,
}
|
Change
|
identifier_name
|
lookup_ref_delta_objects.rs
|
use std::convert::TryInto;
use gix_hash::ObjectId;
use crate::data::{entry::Header, input};
/// An iterator to resolve thin packs on the fly.
pub struct LookupRefDeltaObjectsIter<I, LFn> {
/// The inner iterator whose entries we will resolve.
pub inner: I,
lookup: LFn,
/// The cached delta to provide next time we are called, it's the delta to go with the base we just resolved in its place.
next_delta: Option<input::Entry>,
/// Fuse to stop iteration after first missing object.
error: bool,
/// The overall pack-offset we accumulated thus far. Each inserted entry offsets all following
/// objects by its length. We need to determine exactly where the object was inserted to see if its affected at all.
inserted_entry_length_at_offset: Vec<Change>,
/// The sum of all entries added so far, as a cache to avoid recomputation
inserted_entries_length_in_bytes: i64,
buf: Vec<u8>,
}
impl<I, LFn> LookupRefDeltaObjectsIter<I, LFn>
where
I: Iterator<Item = Result<input::Entry, input::Error>>,
LFn: for<'a> FnMut(ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>>,
{
/// Create a new instance wrapping `iter` and using `lookup` as function to retrieve objects that will serve as bases
/// for ref deltas seen while traversing `iter`.
pub fn new(iter: I, lookup: LFn) -> Self {
LookupRefDeltaObjectsIter {
inner: iter,
lookup,
error: false,
inserted_entry_length_at_offset: Vec::new(),
inserted_entries_length_in_bytes: 0,
next_delta: None,
buf: Vec::new(),
}
}
fn shifted_pack_offset(&self, pack_offset: u64) -> u64 {
let new_ofs = pack_offset as i64 + self.inserted_entries_length_in_bytes;
new_ofs.try_into().expect("offset value is never becomes negative")
}
/// positive `size_change` values mean an object grew or was more commonly, was inserted. Negative values
/// mean the object shrunk, usually because there header changed from ref-deltas to ofs deltas.
fn track_change(
&mut self,
shifted_pack_offset: u64,
pack_offset: u64,
size_change: i64,
oid: impl Into<Option<ObjectId>>,
) {
if size_change == 0 {
return;
}
self.inserted_entry_length_at_offset.push(Change {
shifted_pack_offset,
pack_offset,
size_change_in_bytes: size_change,
oid: oid.into().unwrap_or_else(||
// NOTE: this value acts as sentinel and the actual hash kind doesn't matter.
gix_hash::Kind::Sha1.null()),
});
self.inserted_entries_length_in_bytes += size_change;
}
fn shift_entry_and_point_to_base_by_offset(&mut self, entry: &mut input::Entry, base_distance: u64) {
let pack_offset = entry.pack_offset;
entry.pack_offset = self.shifted_pack_offset(pack_offset);
entry.header = Header::OfsDelta { base_distance };
let previous_header_size = entry.header_size;
entry.header_size = entry.header.size(entry.decompressed_size) as u16;
let change = entry.header_size as i64 - previous_header_size as i64;
entry.crc32 = Some(entry.compute_crc32());
self.track_change(entry.pack_offset, pack_offset, change, None);
}
}
impl<I, LFn> Iterator for LookupRefDeltaObjectsIter<I, LFn>
where
I: Iterator<Item = Result<input::Entry, input::Error>>,
LFn: for<'a> FnMut(ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>>,
{
type Item = Result<input::Entry, input::Error>;
fn next(&mut self) -> Option<Self::Item> {
if self.error {
return None;
}
if let Some(delta) = self.next_delta.take() {
return Some(Ok(delta));
}
match self.inner.next() {
Some(Ok(mut entry)) => match entry.header {
Header::RefDelta { base_id } => {
match self.inserted_entry_length_at_offset.iter().rfind(|e| e.oid == base_id) {
None => {
let base_entry = match (self.lookup)(base_id, &mut self.buf) {
Some(obj) => {
let current_pack_offset = entry.pack_offset;
let mut entry = match input::Entry::from_data_obj(&obj, 0) {
Ok(e) => e,
Err(err) => return Some(Err(err)),
};
entry.pack_offset = self.shifted_pack_offset(current_pack_offset);
self.track_change(
entry.pack_offset,
current_pack_offset,
entry.bytes_in_pack() as i64,
base_id,
);
entry
}
None => {
self.error = true;
return Some(Err(input::Error::NotFound { object_id: base_id }));
}
};
{
self.shift_entry_and_point_to_base_by_offset(&mut entry, base_entry.bytes_in_pack());
self.next_delta = Some(entry);
}
Some(Ok(base_entry))
}
Some(base_entry) => {
let base_distance =
self.shifted_pack_offset(entry.pack_offset) - base_entry.shifted_pack_offset;
self.shift_entry_and_point_to_base_by_offset(&mut entry, base_distance);
Some(Ok(entry))
}
}
}
_ => {
if self.inserted_entries_length_in_bytes != 0 {
if let Header::OfsDelta { base_distance } = entry.header {
// We have to find the new distance based on the previous distance to the base, using the absolute
// pack offset computed from it as stored in `base_pack_offset`.
let base_pack_offset = entry
.pack_offset
.checked_sub(base_distance)
.expect("distance to be in range of pack");
match self
.inserted_entry_length_at_offset
.binary_search_by_key(&base_pack_offset, |c| c.pack_offset)
{
Ok(index) => {
let index = {
let maybe_index_of_actual_entry = index + 1;
self.inserted_entry_length_at_offset
.get(maybe_index_of_actual_entry)
.and_then(|c| {
(c.pack_offset == base_pack_offset)
.then_some(maybe_index_of_actual_entry)
})
.unwrap_or(index)
};
let new_distance = self
.shifted_pack_offset(entry.pack_offset)
.checked_sub(self.inserted_entry_length_at_offset[index].shifted_pack_offset)
.expect("a base that is behind us in the pack");
self.shift_entry_and_point_to_base_by_offset(&mut entry, new_distance);
}
Err(index) => {
let change_since_offset = self.inserted_entry_length_at_offset[index..]
.iter()
.map(|c| c.size_change_in_bytes)
.sum::<i64>();
let new_distance: u64 = {
(base_distance as i64 + change_since_offset)
.try_into()
.expect("it still points behind us")
};
self.shift_entry_and_point_to_base_by_offset(&mut entry, new_distance);
}
}
} else {
// Offset this entry by all changes (positive or negative) that we saw thus far.
entry.pack_offset = self.shifted_pack_offset(entry.pack_offset);
}
}
Some(Ok(entry))
}
},
other => other,
}
}
fn size_hint(&self) -> (usize, Option<usize>)
|
}
#[derive(Debug)]
struct Change {
/// The original pack offset as mentioned in the entry we saw. This is used to find this as base object if deltas refer to it by
/// old offset.
pack_offset: u64,
/// The new pack offset that is the shifted location of the pack entry in the pack.
shifted_pack_offset: u64,
/// The size change of the entry header, negative values denote shrinking, positive denote growing.
size_change_in_bytes: i64,
/// The object id of the entry responsible for the change, or null if it's an entry just for tracking an insertion.
oid: ObjectId,
}
|
{
let (min, max) = self.inner.size_hint();
max.map_or_else(|| (min * 2, None), |max| (min, Some(max * 2)))
}
|
identifier_body
|
ospf_sh_request_list.pb.go
|
return ""
}
func (m *OspfShRequestList_KEYS) GetInterfaceName() string {
if m != nil {
return m.InterfaceName
}
return ""
}
func (m *OspfShRequestList_KEYS) GetNeighborAddress() string {
if m != nil {
return m.NeighborAddress
}
return ""
}
type OspfShLsaSum struct {
HeaderLsaType string `protobuf:"bytes,1,opt,name=header_lsa_type,json=headerLsaType,proto3" json:"header_lsa_type,omitempty"`
HeaderLsaAge uint32 `protobuf:"varint,2,opt,name=header_lsa_age,json=headerLsaAge,proto3" json:"header_lsa_age,omitempty"`
HeaderLsId string `protobuf:"bytes,3,opt,name=header_ls_id,json=headerLsId,proto3" json:"header_ls_id,omitempty"`
HeaderAdvertisingRouter string `protobuf:"bytes,4,opt,name=header_advertising_router,json=headerAdvertisingRouter,proto3" json:"header_advertising_router,omitempty"`
HeaderSequenceNumber uint32 `protobuf:"varint,5,opt,name=header_sequence_number,json=headerSequenceNumber,proto3" json:"header_sequence_number,omitempty"`
HeaderLsaChecksum uint32 `protobuf:"varint,6,opt,name=header_lsa_checksum,json=headerLsaChecksum,proto3" json:"header_lsa_checksum,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *OspfShLsaSum) Reset() { *m = OspfShLsaSum{} }
func (m *OspfShLsaSum) String() string { return proto.CompactTextString(m) }
func (*OspfShLsaSum) ProtoMessage() {}
func (*OspfShLsaSum) Descriptor() ([]byte, []int) {
return fileDescriptor_e4609c816fd64cee, []int{1}
}
func (m *OspfShLsaSum) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OspfShLsaSum.Unmarshal(m, b)
}
func (m *OspfShLsaSum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OspfShLsaSum.Marshal(b, m, deterministic)
}
func (m *OspfShLsaSum) XXX_Merge(src proto.Message) {
xxx_messageInfo_OspfShLsaSum.Merge(m, src)
}
func (m *OspfShLsaSum) XXX_Size() int {
return xxx_messageInfo_OspfShLsaSum.Size(m)
}
func (m *OspfShLsaSum) XXX_DiscardUnknown() {
xxx_messageInfo_OspfShLsaSum.DiscardUnknown(m)
}
var xxx_messageInfo_OspfShLsaSum proto.InternalMessageInfo
func (m *OspfShLsaSum) GetHeaderLsaType() string {
if m != nil {
return m.HeaderLsaType
}
return ""
}
func (m *OspfShLsaSum) GetHeaderLsaAge() uint32 {
if m != nil {
return m.HeaderLsaAge
}
return 0
}
func (m *OspfShLsaSum) GetHeaderLsId() string {
if m != nil {
return m.HeaderLsId
}
return ""
}
func (m *OspfShLsaSum) GetHeaderAdvertisingRouter() string {
if m != nil {
return m.HeaderAdvertisingRouter
}
return ""
}
func (m *OspfShLsaSum) GetHeaderSequenceNumber() uint32 {
if m != nil {
return m.HeaderSequenceNumber
}
return 0
}
func (m *OspfShLsaSum) GetHeaderLsaChecksum() uint32 {
if m != nil {
return m.HeaderLsaChecksum
}
return 0
}
type OspfShRequestList struct {
RequestNeighborId string `protobuf:"bytes,50,opt,name=request_neighbor_id,json=requestNeighborId,proto3" json:"request_neighbor_id,omitempty"`
RequestNeighborAddress string `protobuf:"bytes,51,opt,name=request_neighbor_address,json=requestNeighborAddress,proto3" json:"request_neighbor_address,omitempty"`
RequestInterfaceName string `protobuf:"bytes,52,opt,name=request_interface_name,json=requestInterfaceName,proto3" json:"request_interface_name,omitempty"`
Request []*OspfShLsaSum `protobuf:"bytes,53,rep,name=request,proto3" json:"request,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *OspfShRequestList) Reset() { *m = OspfShRequestList{} }
func (m *OspfShRequestList) String() string { return proto.CompactTextString(m) }
func (*OspfShRequestList) ProtoMessage() {}
func (*OspfShRequestList) Descriptor() ([]byte, []int) {
return fileDescriptor_e4609c816fd64cee, []int{2}
}
func (m *OspfShRequestList) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OspfShRequestList.Unmarshal(m, b)
}
func (m *OspfShRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OspfShRequestList.Marshal(b, m, deterministic)
}
func (m *OspfShRequestList) XXX_Merge(src proto.Message) {
xxx_messageInfo_OspfShRequestList.Merge(m, src)
}
func (m *OspfShRequestList) XXX_Size() int {
return xxx_messageInfo_OspfShRequestList.Size(m)
}
func (m *OspfShRequestList) XXX_DiscardUnknown() {
xxx_messageInfo_OspfShRequestList.DiscardUnknown(m)
}
var xxx_messageInfo_OspfShRequestList proto.InternalMessageInfo
func (m *OspfShRequestList) GetRequestNeighborId() string {
if m != nil {
return m.RequestNeighborId
}
return ""
}
func (m *OspfShRequestList) GetRequestNeighborAddress() string {
if m != nil {
return m.RequestNeighborAddress
}
return ""
}
func (m *OspfShRequestList) GetRequestInterfaceName() string {
if m != nil {
return m.RequestInterfaceName
}
return ""
}
func (m *OspfShRequestList) GetRequest() []*OspfShLsaSum {
if m != nil {
return m.Request
}
return nil
}
func init() {
proto.RegisterType((*OspfShRequestList_KEYS)(nil), "cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.vrfs.vrf.adjacency_information.requests.request.ospf_sh_request_list_KEYS")
proto.RegisterType((*OspfShLsaSum)(nil), "cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.vrfs.vrf.adjacency_information.requests.request.ospf_sh_lsa_sum")
proto.RegisterType((*OspfShRequestList)(nil), "cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.vrfs.vrf.adjacency_information.requests.request.ospf_sh_request_list")
}
func init() { proto.RegisterFile("ospf_sh_request_list.proto", fileDescriptor_e4609c816fd64cee) }
var fileDescriptor_e4609c816fd64cee = []byte{
// 451 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x93, 0x4f, 0x6f, 0xd3, 0x30,
0x18, 0x87, 0xd5, 0x0e, 0x36, 0x78, 0xd7, 0xae, 0xcc, 0x54, 0x23, 0xe5, 0x54, 0x2a, 0x40, 0xe3,
0x92, 0xc3, 0x56, 0x24, 0xc4, 0xad, 0x42, 0x1c, 0x2a, 0x50, 0x0f, 0x1d, 0x17, 0x4e, 0x96, 0x1b,
0xbf, 0x69, 0x0c, 0x4b, 0x1c, 0xfc,
|
{
return m.VrfName
}
|
conditional_block
|
|
ospf_sh_request_list.pb.go
|
RequestList) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OspfShRequestList.Unmarshal(m, b)
}
func (m *OspfShRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OspfShRequestList.Marshal(b, m, deterministic)
}
func (m *OspfShRequestList) XXX_Merge(src proto.Message) {
xxx_messageInfo_OspfShRequestList.Merge(m, src)
}
func (m *OspfShRequestList) XXX_Size() int {
return xxx_messageInfo_OspfShRequestList.Size(m)
}
func (m *OspfShRequestList) XXX_DiscardUnknown() {
xxx_messageInfo_OspfShRequestList.DiscardUnknown(m)
}
var xxx_messageInfo_OspfShRequestList proto.InternalMessageInfo
func (m *OspfShRequestList) GetRequestNeighborId() string {
if m != nil {
return m.RequestNeighborId
}
return ""
}
func (m *OspfShRequestList) GetRequestNeighborAddress() string {
if m != nil {
return m.RequestNeighborAddress
}
return ""
}
func (m *OspfShRequestList) GetRequestInterfaceName() string {
if m != nil {
return m.RequestInterfaceName
}
return ""
}
func (m *OspfShRequestList) GetRequest() []*OspfShLsaSum {
if m != nil {
return m.Request
}
return nil
}
func init() {
proto.RegisterType((*OspfShRequestList_KEYS)(nil), "cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.vrfs.vrf.adjacency_information.requests.request.ospf_sh_request_list_KEYS")
proto.RegisterType((*OspfShLsaSum)(nil), "cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.vrfs.vrf.adjacency_information.requests.request.ospf_sh_lsa_sum")
proto.RegisterType((*OspfShRequestList)(nil), "cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.vrfs.vrf.adjacency_information.requests.request.ospf_sh_request_list")
}
func init() { proto.RegisterFile("ospf_sh_request_list.proto", fileDescriptor_e4609c816fd64cee) }
var fileDescriptor_e4609c816fd64cee = []byte{
// 451 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x93, 0x4f, 0x6f, 0xd3, 0x30,
0x18, 0x87, 0xd5, 0x0e, 0x36, 0x78, 0xd7, 0xae, 0xcc, 0x54, 0x23, 0xe5, 0x54, 0x2a, 0x40, 0xe3,
0x92, 0xc3, 0x56, 0x24, 0xc4, 0xad, 0x42, 0x1c, 0x2a, 0x50, 0x0f, 0x1d, 0x17, 0x4e, 0x96, 0x1b,
0xbf, 0x69, 0x0c, 0x4b, 0x1c, 0xfc, 0x3a, 0x11, 0xfd, 0x10, 0x7c, 0x0a, 0x8e, 0x7c, 0x0f, 0x3e,
0x17, 0x8a, 0x63, 0x87, 0x51, 0x76, 0xdd, 0x25, 0x72, 0xde, 0xe7, 0x79, 0x9d, 0x5f, 0xfc, 0x07,
0x9e, 0x6a, 0x2a, 0x53, 0x4e, 0x19, 0x37, 0xf8, 0xad, 0x42, 0xb2, 0xfc, 0x5a, 0x91, 0x8d, 0x4b,
0xa3, 0xad, 0x66, 0x9b, 0x44, 0x51, 0xa2, 0xb9, 0xd2, 0xc4, 0xbf, 0x1b, 0xae, 0xca, 0x7a, 0xce,
0x9d, 0xad, 0x4b, 0x34, 0x71, 0x33, 0x6a, 0xbc, 0x04, 0x89, 0x90, 0xc2, 0x28, 0xae, 0x4d, 0xea,
0x1e, 0xb1, 0x90, 0x5f, 0x44, 0x82, 0x45, 0xb2, 0xe3, 0xaa, 0x48, 0xb5, 0xc9, 0x85, 0x55, 0xba,
0x88, 0xfd, 0x57, 0x28, 0x0c, 0x66, 0xbf, 0x7a, 0x30, 0xb9, 0x2d, 0x02, 0xff, 0xf0, 0xfe, 0xf3,
0x15, 0x7b, 0x06, 0x03, 0x3f, 0x31, 0x2f, 0x44, 0x8e, 0x51, 0x6f, 0xda, 0x3b, 0x7f, 0xb8, 0x3e,
0xf6, 0xb5, 0x95, 0xc8, 0x91, 0x4d, 0xe0, 0x41, 0x6d, 0xd2, 0x16, 0xf7, 0x1d, 0x3e, 0xaa, 0x4d,
0xea, 0xd0, 0x0b, 0x38, 0x51, 0x85, 0x45, 0x93, 0x8a, 0x04, 0x5b, 0xe1, 0xc0, 0x09, 0xc3, 0xae,
0xea, 0xb4, 0x57, 0xf0, 0xa8, 0x40, 0xb5, 0xcd, 0x36, 0xda, 0x70, 0x21, 0xa5, 0x41, 0xa2, 0xe8,
|
0x9e, 0x13, 0x47, 0xa1, 0xbe, 0x68, 0xcb, 0xb3, 0x9f, 0x7d, 0x18, 0x85, 0xb4, 0xd7, 0x24, 0x38,
0x55, 0x39, 0x7b, 0x09, 0xa3, 0x0c, 0x85, 0x44, 0xe3, 0x2a, 0x76, 0x57, 0x86, 0x98, 0xc3, 0xb6,
0xfc, 0x91, 0xc4, 0xa7, 0x5d, 0x89, 0xec, 0x39, 0x9c, 0xdc, 0xf0, 0xc4, 0xb6, 0x8d, 0x3b, 0x5c,
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.