file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
fixture.go
|
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceNoSignIn",
Desc: "User is not signed in (with GAIA) to CrOS but fixture requires control of an Android phone. Does not skip OOBE",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, true, true}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
SignInProfileTestExtensionManifestKey,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceOnboardedNoLock",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with default Cross Device features enabled. Doesn't lock the fixture before starting the test",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupSmartLockLogin",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, false, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
// lacros fixtures
testing.AddFixture(&testing.Fixture{
Name: "lacrosCrossdeviceOnboardedAllFeatures",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with all Cross Device features enabled with lacros enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{true, true, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return lacrosfixt.NewConfig().Opts()
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
}
type crossdeviceFixture struct {
fOpt chrome.OptionsCallback // Function to generate Chrome Options
cr *chrome.Chrome
tconn *chrome.TestConn
kb *input.KeyboardEventWriter
androidDevice *AndroidDevice
androidAttributes *AndroidAttributes
crosAttributes *crossdevicecommon.CrosAttributes
btsnoopCmd *testexec.Cmd
logMarker *logsaver.Marker // Marker for per-test log.
allFeatures bool
saveAndroidScreenRecordingOnError func(context.Context, func() bool) error
saveScreenRecording bool
lockFixture bool
noSignIn bool
logcatStartTime adb.LogcatTimestamp
downloadsPath string
}
// FixtData holds information made available to tests that specify this Fixture.
type FixtData struct {
// Chrome is the running chrome instance.
Chrome *chrome.Chrome
// TestConn is a connection to the test extension.
TestConn *chrome.TestConn
// Connection to the lock screen test extension.
LoginConn *chrome.TestConn
// AndroidDevice is an object for interacting with the connected Android device's Multidevice Snippet.
AndroidDevice *AndroidDevice
// The credentials to be used on both chromebook and phone.
Username string
Password string
// The options used to start Chrome sessions.
ChromeOptions []chrome.Option
}
func (f *crossdeviceFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{}
|
defer androidDevice.DumpLogs(cleanupCtx, s.OutDir(), "fixture_setup_persistent_logcat.txt")
// Set default chrome options.
opts, err := f.fOpt(ctx, s)
if err != nil {
s.Fatal("Failed to obtain Chrome options: ", err)
}
tags := []string{
"*nearby*=3",
"*cryptauth*=3",
"*device_sync*=3",
"*multidevice*=3",
"*secure_channel*=3",
"*phonehub*=3",
"*blue*=3",
"ble_*=3",
}
opts = append(opts, chrome.ExtraArgs("--enable-logging", "--vmodule="+strings.Join(tags, ",")))
opts = append(opts, chrome.EnableFeatures("PhoneHubCameraRoll", "SmartLockUIRevamp", "OobeQuickStart"))
customUser, userOk := s.Var(customCrOSUsername)
customPass, passOk := s.Var(customCrOSPassword)
if userOk && passOk {
s.Log("Logging in with user-provided credentials")
crosUsername = customUser
crosPassword = customPass
} else {
s.Log("Logging in with default GAIA credentials")
}
if f.noSignIn {
opts = append(opts, chrome.DontSkipOOBEAfterLogin())
} else {
opts = append(opts, chrome.GAIALogin(chrome.Creds{User: crosUsername, Pass: crosPassword}))
}
if val, ok := s.Var(KeepStateVar); ok {
b, err := strconv.ParseBool(val)
if err != nil {
s.Fatalf("Unable to convert %v var to bool: %v", KeepStateVar, err)
}
if b {
opts = append(opts, chrome.KeepState())
}
}
cr, err := chrome.New(
ctx,
opts...,
)
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
f.cr = cr
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Creating test API connection failed: ", err)
}
f.tconn = tconn
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "fixture")
// Capture a bug report on the Android phone if any onboarding/setup fails.
defer func() {
if s.HasError() {
if err := BugReport(ctx, androidDevice.Device, s.OutDir()); err != nil {
s.Log("Failed to save Android bug report: ", err)
}
}
}()
// Capture btsnoop logs during fixture setup to have adequate logging during the onboarding phase.
btsnoopCmd := bluetooth.StartBTSnoopLogging(ctx, filepath.Join(s.OutDir(), "crossdevice-fixture-btsnoop.log"))
if err := btsnoopCmd.Start(); err != nil {
s.Fatal("Failed to start btsnoop logging: ", err)
}
defer btsnoopCmd.Wait()
defer btsnoopCmd.Kill()
// Enable bluetooth debug logging.
levels := bluetooth.LogVerbosity{
Bluez: true,
Kernel: true,
}
if err := bluetooth.SetDebugLogLevels(ctx, levels); err != nil {
|
{
// Android device from parent fixture.
androidDevice := s.ParentValue().(*FixtData).AndroidDevice
f.androidDevice = androidDevice
// Credentials to use (same as Android).
crosUsername := s.ParentValue().(*FixtData).Username
crosPassword := s.ParentValue().(*FixtData).Password
// Allocate time for logging and saving a screenshot and bugreport in case of failure.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second+BugReportDuration)
defer cancel()
// Save logcat so we have Android logs even if fixture setup fails.
startTime, err := androidDevice.Device.LatestLogcatTimestamp(ctx)
if err != nil {
s.Fatal("Failed to get latest logcat timestamp: ", err)
}
defer androidDevice.Device.DumpLogcatFromTimestamp(cleanupCtx, filepath.Join(s.OutDir(), "fixture_setup_logcat.txt"), startTime)
|
identifier_body
|
process.go
|
() []*Goroutine {
return p.goroutines
}
// Stats returns a breakdown of the program's memory use by category.
func (p *Process) Stats() *Stats {
return p.stats
}
// BuildVersion returns the Go version that was used to build the inferior binary.
func (p *Process) BuildVersion() string {
return p.buildVersion
}
func (p *Process) Globals() []*Root {
return p.globals
}
// FindFunc returns the function which contains the code at address pc, if any.
func (p *Process) FindFunc(pc core.Address) *Func {
return p.funcTab.find(pc)
}
func (p *Process) findType(name string) *Type {
s := p.runtimeNameMap[name]
if len(s) == 0 {
panic("can't find type " + name)
}
return s[0]
}
// Core takes a loaded core file and extracts Go information from it.
func Core(proc *core.Process) (p *Process, err error) {
// Make sure we have DWARF info.
if _, err := proc.DWARF(); err != nil {
return nil, fmt.Errorf("error reading dwarf: %w", err)
}
// Guard against failures of proc.Read* routines.
/*
defer func() {
e := recover()
if e == nil {
return
}
p = nil
if x, ok := e.(error); ok {
err = x
return
}
panic(e) // Not an error, re-panic it.
}()
*/
p = &Process{
proc: proc,
runtimeMap: map[core.Address]*Type{},
dwarfMap: map[dwarf.Type]*Type{},
}
// Initialize everything that just depends on DWARF.
p.readDWARFTypes()
p.readRuntimeConstants()
p.readGlobals()
// Find runtime globals we care about. Initialize regions for them.
p.rtGlobals = map[string]region{}
for _, g := range p.globals {
if strings.HasPrefix(g.Name, "runtime.") {
p.rtGlobals[g.Name[8:]] = region{p: p, a: g.Addr, typ: g.Type}
}
}
// Read all the data that depend on runtime globals.
p.buildVersion = p.rtGlobals["buildVersion"].String()
// runtime._type varint name length encoding, and mheap curArena
// counting changed behavior in 1.17 without explicitly related type
// changes, making the difference difficult to detect. As a workaround,
// we check on the version explicitly.
//
// Go 1.17 added runtime._func.flag, so use that as a sentinal for this
// version.
p.is117OrGreater = p.findType("runtime._func").HasField("flag")
p.readModules()
p.readHeap()
p.readGs()
p.readStackVars() // needs to be after readGs.
p.markObjects() // needs to be after readGlobals, readStackVars.
return p, nil
}
type arena struct {
heapMin core.Address
heapMax core.Address
bitmapMin core.Address
bitmapMax core.Address
spanTableMin core.Address
spanTableMax core.Address
}
func (p *Process) getArenaBaseOffset() int64 {
if x, ok := p.rtConstants["arenaBaseOffsetUintptr"]; ok { // go1.15+
// arenaBaseOffset changed sign in 1.15. Callers treat this
// value as it was specified in 1.14, so we negate it here.
return -x
}
return p.rtConstants["arenaBaseOffset"]
}
func (p *Process) readHeap() {
ptrSize := p.proc.PtrSize()
logPtrSize := p.proc.LogPtrSize()
p.pageTable = map[core.Address]*pageTableEntry{}
mheap := p.rtGlobals["mheap_"]
var arenas []arena
if mheap.HasField("spans") {
// go 1.9 or 1.10. There is a single arena.
arenaStart := core.Address(mheap.Field("arena_start").Uintptr())
arenaUsed := core.Address(mheap.Field("arena_used").Uintptr())
arenaEnd := core.Address(mheap.Field("arena_end").Uintptr())
bitmapEnd := core.Address(mheap.Field("bitmap").Uintptr())
bitmapStart := bitmapEnd.Add(-int64(mheap.Field("bitmap_mapped").Uintptr()))
spanTableStart := mheap.Field("spans").SlicePtr().Address()
spanTableEnd := spanTableStart.Add(mheap.Field("spans").SliceCap() * ptrSize)
arenas = append(arenas, arena{
heapMin: arenaStart,
heapMax: arenaEnd,
bitmapMin: bitmapStart,
bitmapMax: bitmapEnd,
spanTableMin: spanTableStart,
spanTableMax: spanTableEnd,
})
// Copy pointer bits to heap info.
// Note that the pointer bits are stored backwards.
for a := arenaStart; a < arenaUsed; a = a.Add(ptrSize) {
off := a.Sub(arenaStart) >> logPtrSize
if p.proc.ReadUint8(bitmapEnd.Add(-(off>>2)-1))>>uint(off&3)&1 != 0 {
p.setHeapPtr(a)
}
}
} else {
// go 1.11+. Has multiple arenas.
arenaSize := p.rtConstants["heapArenaBytes"]
if arenaSize%heapInfoSize != 0 {
panic("arenaSize not a multiple of heapInfoSize")
}
arenaBaseOffset := p.getArenaBaseOffset()
if ptrSize == 4 && arenaBaseOffset != 0 {
panic("arenaBaseOffset must be 0 for 32-bit inferior")
}
level1Table := mheap.Field("arenas")
level1size := level1Table.ArrayLen()
for level1 := int64(0); level1 < level1size; level1++ {
ptr := level1Table.ArrayIndex(level1)
if ptr.Address() == 0 {
continue
}
level2table := ptr.Deref()
level2size := level2table.ArrayLen()
for level2 := int64(0); level2 < level2size; level2++ {
ptr = level2table.ArrayIndex(level2)
if ptr.Address() == 0 {
continue
}
a := ptr.Deref()
min := core.Address(arenaSize*(level2+level1*level2size) - arenaBaseOffset)
max := min.Add(arenaSize)
bitmap := a.Field("bitmap")
oneBitBitmap := a.HasField("noMorePtrs") // Starting in 1.20.
spans := a.Field("spans")
arenas = append(arenas, arena{
heapMin: min,
heapMax: max,
bitmapMin: bitmap.a,
bitmapMax: bitmap.a.Add(bitmap.ArrayLen()),
spanTableMin: spans.a,
spanTableMax: spans.a.Add(spans.ArrayLen() * ptrSize),
})
// Copy out ptr/nonptr bits
n := bitmap.ArrayLen()
for i := int64(0); i < n; i++ {
if oneBitBitmap {
// The array uses 1 bit per word of heap. See mbitmap.go for
// more information.
m := bitmap.ArrayIndex(i).Uintptr()
bits := 8 * ptrSize
for j := int64(0); j < bits; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*bits + j) * ptrSize))
}
}
} else {
// The nth byte is composed of 4 object bits and 4 live/dead
// bits. We ignore the 4 live/dead bits, which are on the
// high order side of the byte.
//
// See mbitmap.go for more information on the format of
// the bitmap field of heapArena.
m := bitmap.ArrayIndex(i).Uint8()
for j := int64(0); j < 4; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*4 + j) * ptrSize))
}
}
}
}
}
}
}
p.readSpans(mheap, arenas)
}
func (p *Process) readSpans(mheap region, arenas []arena) {
var all int64
var text int64
var readOnly int64
var heap int64
var spanTable int64
var bitmap int64
var data int64
var bss int64 // also includes mmap'd regions
for _, m := range p.proc.Mappings() {
size := m.Size()
all += size
switch m.Perm() {
case core.Read:
readOnly += size
case core.Read | core.Exec:
text += size
case core.Read | core.Write:
if m
|
Goroutines
|
identifier_name
|
|
process.go
|
()))
spanTableStart := mheap.Field("spans").SlicePtr().Address()
spanTableEnd := spanTableStart.Add(mheap.Field("spans").SliceCap() * ptrSize)
arenas = append(arenas, arena{
heapMin: arenaStart,
heapMax: arenaEnd,
bitmapMin: bitmapStart,
bitmapMax: bitmapEnd,
spanTableMin: spanTableStart,
spanTableMax: spanTableEnd,
})
// Copy pointer bits to heap info.
// Note that the pointer bits are stored backwards.
for a := arenaStart; a < arenaUsed; a = a.Add(ptrSize) {
off := a.Sub(arenaStart) >> logPtrSize
if p.proc.ReadUint8(bitmapEnd.Add(-(off>>2)-1))>>uint(off&3)&1 != 0 {
p.setHeapPtr(a)
}
}
} else {
// go 1.11+. Has multiple arenas.
arenaSize := p.rtConstants["heapArenaBytes"]
if arenaSize%heapInfoSize != 0 {
panic("arenaSize not a multiple of heapInfoSize")
}
arenaBaseOffset := p.getArenaBaseOffset()
if ptrSize == 4 && arenaBaseOffset != 0 {
panic("arenaBaseOffset must be 0 for 32-bit inferior")
}
level1Table := mheap.Field("arenas")
level1size := level1Table.ArrayLen()
for level1 := int64(0); level1 < level1size; level1++
|
arenas = append(arenas, arena{
heapMin: min,
heapMax: max,
bitmapMin: bitmap.a,
bitmapMax: bitmap.a.Add(bitmap.ArrayLen()),
spanTableMin: spans.a,
spanTableMax: spans.a.Add(spans.ArrayLen() * ptrSize),
})
// Copy out ptr/nonptr bits
n := bitmap.ArrayLen()
for i := int64(0); i < n; i++ {
if oneBitBitmap {
// The array uses 1 bit per word of heap. See mbitmap.go for
// more information.
m := bitmap.ArrayIndex(i).Uintptr()
bits := 8 * ptrSize
for j := int64(0); j < bits; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*bits + j) * ptrSize))
}
}
} else {
// The nth byte is composed of 4 object bits and 4 live/dead
// bits. We ignore the 4 live/dead bits, which are on the
// high order side of the byte.
//
// See mbitmap.go for more information on the format of
// the bitmap field of heapArena.
m := bitmap.ArrayIndex(i).Uint8()
for j := int64(0); j < 4; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*4 + j) * ptrSize))
}
}
}
}
}
}
}
p.readSpans(mheap, arenas)
}
func (p *Process) readSpans(mheap region, arenas []arena) {
var all int64
var text int64
var readOnly int64
var heap int64
var spanTable int64
var bitmap int64
var data int64
var bss int64 // also includes mmap'd regions
for _, m := range p.proc.Mappings() {
size := m.Size()
all += size
switch m.Perm() {
case core.Read:
readOnly += size
case core.Read | core.Exec:
text += size
case core.Read | core.Write:
if m.CopyOnWrite() {
// Check if m.file == text's file? That could distinguish
// data segment from mmapped file.
data += size
break
}
attribute := func(x, y core.Address, p *int64) {
a := x.Max(m.Min())
b := y.Min(m.Max())
if a < b {
*p += b.Sub(a)
size -= b.Sub(a)
}
}
for _, a := range arenas {
attribute(a.heapMin, a.heapMax, &heap)
attribute(a.bitmapMin, a.bitmapMax, &bitmap)
attribute(a.spanTableMin, a.spanTableMax, &spanTable)
}
// Any other anonymous mapping is bss.
// TODO: how to distinguish original bss from anonymous mmap?
bss += size
default:
panic("weird mapping " + m.Perm().String())
}
}
if !p.is117OrGreater && mheap.HasField("curArena") {
// 1.13.3 and up have curArena. Subtract unallocated space in
// the current arena from the heap.
//
// As of 1.17, the runtime does this automatically
// (https://go.dev/cl/270537).
ca := mheap.Field("curArena")
unused := int64(ca.Field("end").Uintptr() - ca.Field("base").Uintptr())
heap -= unused
all -= unused
}
pageSize := p.rtConstants["_PageSize"]
// Span types
spanInUse := uint8(p.rtConstants["_MSpanInUse"])
spanManual := uint8(p.rtConstants["_MSpanManual"])
spanDead := uint8(p.rtConstants["_MSpanDead"])
spanFree := uint8(p.rtConstants["_MSpanFree"])
// Process spans.
if pageSize%heapInfoSize != 0 {
panic(fmt.Sprintf("page size not a multiple of %d", heapInfoSize))
}
allspans := mheap.Field("allspans")
var freeSpanSize int64
var releasedSpanSize int64
var manualSpanSize int64
var inUseSpanSize int64
var allocSize int64
var freeSize int64
var spanRoundSize int64
var manualAllocSize int64
var manualFreeSize int64
n := allspans.SliceLen()
for i := int64(0); i < n; i++ {
s := allspans.SliceIndex(i).Deref()
min := core.Address(s.Field("startAddr").Uintptr())
elemSize := int64(s.Field("elemsize").Uintptr())
nPages := int64(s.Field("npages").Uintptr())
spanSize := nPages * pageSize
max := min.Add(spanSize)
for a := min; a != max; a = a.Add(pageSize) {
if !p.proc.Readable(a) {
// Sometimes allocated but not yet touched pages or
// MADV_DONTNEEDed pages are not written
// to the core file. Don't count these pages toward
// space usage (otherwise it can look like the heap
// is larger than the total memory used).
spanSize -= pageSize
}
}
st := s.Field("state")
if st.IsStruct() && st.HasField("s") { // go1.14+
st = st.Field("s")
}
if st.IsStruct() && st.HasField("value") { // go1.20+
st = st.Field("value")
}
switch st.Uint8() {
case spanInUse:
inUseSpanSize += spanSize
n := int64(s.Field("nelems").Uintptr())
// An object is allocated if it is marked as
// allocated or it is below freeindex.
x := s.Field("allocBits").Address()
alloc := make([]bool, n)
for i := int64(0); i < n; i++ {
alloc[i] = p.proc.ReadUint8(x.Add(i/8))>>uint(i%8)&1 != 0
}
k := int64(s.Field("freeindex").Uintptr())
for i := int64(0); i
|
{
ptr := level1Table.ArrayIndex(level1)
if ptr.Address() == 0 {
continue
}
level2table := ptr.Deref()
level2size := level2table.ArrayLen()
for level2 := int64(0); level2 < level2size; level2++ {
ptr = level2table.ArrayIndex(level2)
if ptr.Address() == 0 {
continue
}
a := ptr.Deref()
min := core.Address(arenaSize*(level2+level1*level2size) - arenaBaseOffset)
max := min.Add(arenaSize)
bitmap := a.Field("bitmap")
oneBitBitmap := a.HasField("noMorePtrs") // Starting in 1.20.
spans := a.Field("spans")
|
conditional_block
|
process.go
|
, nil},
&Stats{"released", releasedSpanSize, nil},
}},
}},
&Stats{"ptr bitmap", bitmap, nil},
&Stats{"span table", spanTable, nil},
}}
var check func(*Stats)
check = func(s *Stats) {
if len(s.Children) == 0 {
return
}
var sum int64
for _, c := range s.Children {
sum += c.Size
}
if sum != s.Size {
panic(fmt.Sprintf("check failed for %s: %d vs %d", s.Name, s.Size, sum))
}
for _, c := range s.Children {
check(c)
}
}
check(p.stats)
}
func (p *Process) readGs() {
// TODO: figure out how to "flush" running Gs.
allgs := p.rtGlobals["allgs"]
n := allgs.SliceLen()
for i := int64(0); i < n; i++ {
r := allgs.SliceIndex(i).Deref()
g := p.readG(r)
if g == nil {
continue
}
p.goroutines = append(p.goroutines, g)
}
}
func (p *Process) readG(r region) *Goroutine {
g := &Goroutine{r: r}
stk := r.Field("stack")
g.stackSize = int64(stk.Field("hi").Uintptr() - stk.Field("lo").Uintptr())
var osT *core.Thread // os thread working on behalf of this G (if any).
mp := r.Field("m")
if mp.Address() != 0 {
m := mp.Deref()
pid := m.Field("procid").Uint64()
// TODO check that m.curg points to g?
for _, t := range p.proc.Threads() {
if t.Pid() == pid {
osT = t
}
}
}
st := r.Field("atomicstatus")
if st.IsStruct() && st.HasField("value") { // go1.20+
st = st.Field("value")
}
status := st.Uint32()
status &^= uint32(p.rtConstants["_Gscan"])
var sp, pc core.Address
switch status {
case uint32(p.rtConstants["_Gidle"]):
return g
case uint32(p.rtConstants["_Grunnable"]), uint32(p.rtConstants["_Gwaiting"]):
sched := r.Field("sched")
sp = core.Address(sched.Field("sp").Uintptr())
pc = core.Address(sched.Field("pc").Uintptr())
case uint32(p.rtConstants["_Grunning"]):
sp = osT.SP()
pc = osT.PC()
// TODO: back up to the calling frame?
case uint32(p.rtConstants["_Gsyscall"]):
sp = core.Address(r.Field("syscallsp").Uintptr())
pc = core.Address(r.Field("syscallpc").Uintptr())
// TODO: or should we use the osT registers?
case uint32(p.rtConstants["_Gdead"]):
return nil
// TODO: copystack, others?
default:
// Unknown state. We can't read the frames, so just bail now.
// TODO: make this switch complete and then panic here.
// TODO: or just return nil?
return g
}
for {
f, err := p.readFrame(sp, pc)
if err != nil {
fmt.Printf("warning: giving up on backtrace: %v\n", err)
break
}
if f.f.name == "runtime.goexit" {
break
}
if len(g.frames) > 0 {
g.frames[len(g.frames)-1].parent = f
}
g.frames = append(g.frames, f)
if f.f.name == "runtime.sigtrampgo" {
// Continue traceback at location where the signal
// interrupted normal execution.
ctxt := p.proc.ReadPtr(sp.Add(16)) // 3rd arg
//ctxt is a *ucontext
mctxt := ctxt.Add(5 * 8)
// mctxt is a *mcontext
sp = p.proc.ReadPtr(mctxt.Add(15 * 8))
pc = p.proc.ReadPtr(mctxt.Add(16 * 8))
// TODO: totally arch-dependent!
} else {
sp = f.max
pc = core.Address(p.proc.ReadUintptr(sp - 8)) // TODO:amd64 only
}
if pc == 0 {
// TODO: when would this happen?
break
}
if f.f.name == "runtime.systemstack" {
// switch over to goroutine stack
sched := r.Field("sched")
sp = core.Address(sched.Field("sp").Uintptr())
pc = core.Address(sched.Field("pc").Uintptr())
}
}
return g
}
func (p *Process) readFrame(sp, pc core.Address) (*Frame, error) {
f := p.funcTab.find(pc)
if f == nil {
return nil, fmt.Errorf("cannot find func for pc=%#x", pc)
}
off := pc.Sub(f.entry)
size, err := f.frameSize.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read frame size at pc=%#x: %v", pc, err)
}
size += p.proc.PtrSize() // TODO: on amd64, the pushed return address
frame := &Frame{f: f, pc: pc, min: sp, max: sp.Add(size)}
// Find live ptrs in locals
live := map[core.Address]bool{}
if x := int(p.rtConstants["_FUNCDATA_LocalsPointerMaps"]); x < len(f.funcdata) {
addr := f.funcdata[x]
// TODO: Ideally we should have the same frame size check as
// runtime.getStackSize to detect errors when we are missing
// the stackmap.
if addr != 0 {
locals := region{p: p, a: addr, typ: p.findType("runtime.stackmap")}
n := locals.Field("n").Int32() // # of bitmaps
nbit := locals.Field("nbit").Int32() // # of bits per bitmap
idx, err := f.stackMap.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read stack map at pc=%#x: %v", pc, err)
}
if idx < 0 {
idx = 0
}
if idx < int64(n) {
bits := locals.Field("bytedata").a.Add(int64(nbit+7) / 8 * idx)
base := frame.max.Add(-16).Add(-int64(nbit) * p.proc.PtrSize())
// TODO: -16 for amd64. Return address and parent's frame pointer
for i := int64(0); i < int64(nbit); i++ {
if p.proc.ReadUint8(bits.Add(i/8))>>uint(i&7)&1 != 0 {
live[base.Add(i*p.proc.PtrSize())] = true
}
}
}
}
}
// Same for args
if x := int(p.rtConstants["_FUNCDATA_ArgsPointerMaps"]); x < len(f.funcdata) {
addr := f.funcdata[x]
if addr != 0 {
args := region{p: p, a: addr, typ: p.findType("runtime.stackmap")}
n := args.Field("n").Int32() // # of bitmaps
nbit := args.Field("nbit").Int32() // # of bits per bitmap
idx, err := f.stackMap.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read stack map at pc=%#x: %v", pc, err)
}
if idx < 0 {
idx = 0
}
if idx < int64(n) {
bits := args.Field("bytedata").a.Add(int64(nbit+7) / 8 * idx)
base := frame.max
// TODO: add to base for LR archs.
for i := int64(0); i < int64(nbit); i++ {
if p.proc.ReadUint8(bits.Add(i/8))>>uint(i&7)&1 != 0 {
live[base.Add(i*p.proc.PtrSize())] = true
}
}
}
}
}
frame.Live = live
return frame, nil
}
// A Stats struct is the node of a tree representing the entire memory
// usage of the Go program. Children of a node break its usage down
// by category.
// We maintain the invariant that, if there are children,
// Size == sum(c.Size for c in Children).
type Stats struct {
Name string
Size int64
Children []*Stats
}
func (s *Stats) Child(name string) *Stats
|
{
for _, c := range s.Children {
if c.Name == name {
return c
}
}
return nil
}
|
identifier_body
|
|
process.go
|
bitmapStart,
bitmapMax: bitmapEnd,
spanTableMin: spanTableStart,
spanTableMax: spanTableEnd,
})
// Copy pointer bits to heap info.
// Note that the pointer bits are stored backwards.
for a := arenaStart; a < arenaUsed; a = a.Add(ptrSize) {
off := a.Sub(arenaStart) >> logPtrSize
if p.proc.ReadUint8(bitmapEnd.Add(-(off>>2)-1))>>uint(off&3)&1 != 0 {
p.setHeapPtr(a)
}
}
} else {
// go 1.11+. Has multiple arenas.
arenaSize := p.rtConstants["heapArenaBytes"]
if arenaSize%heapInfoSize != 0 {
panic("arenaSize not a multiple of heapInfoSize")
}
arenaBaseOffset := p.getArenaBaseOffset()
if ptrSize == 4 && arenaBaseOffset != 0 {
panic("arenaBaseOffset must be 0 for 32-bit inferior")
}
level1Table := mheap.Field("arenas")
level1size := level1Table.ArrayLen()
for level1 := int64(0); level1 < level1size; level1++ {
ptr := level1Table.ArrayIndex(level1)
if ptr.Address() == 0 {
continue
}
level2table := ptr.Deref()
level2size := level2table.ArrayLen()
for level2 := int64(0); level2 < level2size; level2++ {
ptr = level2table.ArrayIndex(level2)
if ptr.Address() == 0 {
continue
}
a := ptr.Deref()
min := core.Address(arenaSize*(level2+level1*level2size) - arenaBaseOffset)
max := min.Add(arenaSize)
bitmap := a.Field("bitmap")
oneBitBitmap := a.HasField("noMorePtrs") // Starting in 1.20.
spans := a.Field("spans")
arenas = append(arenas, arena{
heapMin: min,
heapMax: max,
bitmapMin: bitmap.a,
bitmapMax: bitmap.a.Add(bitmap.ArrayLen()),
spanTableMin: spans.a,
spanTableMax: spans.a.Add(spans.ArrayLen() * ptrSize),
})
// Copy out ptr/nonptr bits
n := bitmap.ArrayLen()
for i := int64(0); i < n; i++ {
if oneBitBitmap {
// The array uses 1 bit per word of heap. See mbitmap.go for
// more information.
m := bitmap.ArrayIndex(i).Uintptr()
bits := 8 * ptrSize
for j := int64(0); j < bits; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*bits + j) * ptrSize))
}
}
} else {
// The nth byte is composed of 4 object bits and 4 live/dead
// bits. We ignore the 4 live/dead bits, which are on the
// high order side of the byte.
//
// See mbitmap.go for more information on the format of
// the bitmap field of heapArena.
m := bitmap.ArrayIndex(i).Uint8()
for j := int64(0); j < 4; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*4 + j) * ptrSize))
}
}
}
}
}
}
}
p.readSpans(mheap, arenas)
}
func (p *Process) readSpans(mheap region, arenas []arena) {
var all int64
var text int64
var readOnly int64
var heap int64
var spanTable int64
var bitmap int64
var data int64
var bss int64 // also includes mmap'd regions
for _, m := range p.proc.Mappings() {
size := m.Size()
all += size
switch m.Perm() {
case core.Read:
readOnly += size
case core.Read | core.Exec:
text += size
case core.Read | core.Write:
if m.CopyOnWrite() {
// Check if m.file == text's file? That could distinguish
// data segment from mmapped file.
data += size
break
}
attribute := func(x, y core.Address, p *int64) {
a := x.Max(m.Min())
b := y.Min(m.Max())
if a < b {
*p += b.Sub(a)
size -= b.Sub(a)
}
}
for _, a := range arenas {
attribute(a.heapMin, a.heapMax, &heap)
attribute(a.bitmapMin, a.bitmapMax, &bitmap)
attribute(a.spanTableMin, a.spanTableMax, &spanTable)
}
// Any other anonymous mapping is bss.
// TODO: how to distinguish original bss from anonymous mmap?
bss += size
default:
panic("weird mapping " + m.Perm().String())
}
}
if !p.is117OrGreater && mheap.HasField("curArena") {
// 1.13.3 and up have curArena. Subtract unallocated space in
// the current arena from the heap.
//
// As of 1.17, the runtime does this automatically
// (https://go.dev/cl/270537).
ca := mheap.Field("curArena")
unused := int64(ca.Field("end").Uintptr() - ca.Field("base").Uintptr())
heap -= unused
all -= unused
}
pageSize := p.rtConstants["_PageSize"]
// Span types
spanInUse := uint8(p.rtConstants["_MSpanInUse"])
spanManual := uint8(p.rtConstants["_MSpanManual"])
spanDead := uint8(p.rtConstants["_MSpanDead"])
spanFree := uint8(p.rtConstants["_MSpanFree"])
// Process spans.
if pageSize%heapInfoSize != 0 {
panic(fmt.Sprintf("page size not a multiple of %d", heapInfoSize))
}
allspans := mheap.Field("allspans")
var freeSpanSize int64
var releasedSpanSize int64
var manualSpanSize int64
var inUseSpanSize int64
var allocSize int64
var freeSize int64
var spanRoundSize int64
var manualAllocSize int64
var manualFreeSize int64
n := allspans.SliceLen()
for i := int64(0); i < n; i++ {
s := allspans.SliceIndex(i).Deref()
min := core.Address(s.Field("startAddr").Uintptr())
elemSize := int64(s.Field("elemsize").Uintptr())
nPages := int64(s.Field("npages").Uintptr())
spanSize := nPages * pageSize
max := min.Add(spanSize)
for a := min; a != max; a = a.Add(pageSize) {
if !p.proc.Readable(a) {
// Sometimes allocated but not yet touched pages or
// MADV_DONTNEEDed pages are not written
// to the core file. Don't count these pages toward
// space usage (otherwise it can look like the heap
// is larger than the total memory used).
spanSize -= pageSize
}
}
st := s.Field("state")
if st.IsStruct() && st.HasField("s") { // go1.14+
st = st.Field("s")
}
if st.IsStruct() && st.HasField("value") { // go1.20+
st = st.Field("value")
}
switch st.Uint8() {
case spanInUse:
inUseSpanSize += spanSize
n := int64(s.Field("nelems").Uintptr())
// An object is allocated if it is marked as
// allocated or it is below freeindex.
x := s.Field("allocBits").Address()
alloc := make([]bool, n)
for i := int64(0); i < n; i++ {
alloc[i] = p.proc.ReadUint8(x.Add(i/8))>>uint(i%8)&1 != 0
}
k := int64(s.Field("freeindex").Uintptr())
for i := int64(0); i < k; i++ {
alloc[i] = true
}
for i := int64(0); i < n; i++ {
if alloc[i] {
allocSize += elemSize
} else {
freeSize += elemSize
}
|
}
spanRoundSize += spanSize - n*elemSize
|
random_line_split
|
|
server.js
|
// Port number: 3306
var connect = mysql.createConnection({
host: 'sql9.freesqldatabase.com',
user:'sql9203547',
password:'hhldFiMrKp',
database:'sql9203547'
});
// --------------------------Data base side----------------------------------------
// ---------------------create tables and connection--------------------------------
connect.connect(function () {
var userTable = 'CREATE TABLE IF NOT EXISTS users( \
id INT AUTO_INCREMENT PRIMARY KEY, \
username varchar(255) NOT NULL UNIQUE,\
password varchar(255),\
Nationallity varchar(60),\
Birthday varchar(60) ,\
status varchar(255) ,\
imag longtext,\
Location varchar(60))';
// check it tomorrow??
var commentTable = 'CREATE TABLE IF NOT EXISTS comments( \
id INT AUTO_INCREMENT PRIMARY KEY, \
comment varchar(255) ,\
username varchar(255) ,\
roomID int ,\
FOREIGN KEY (roomID) REFERENCES rooms(id))';
// FOREIGN KEY (usernmae) REFERENCES users(id) ,\
var roomTable = 'CREATE TABLE IF NOT EXISTS rooms(id INT AUTO_INCREMENT PRIMARY KEY,location varchar(60),image longtext,discribtion varchar(255),contactInfo varchar(100),userID int,userName varchar(60),FOREIGN KEY (userID) REFERENCES users(id))';
connect.query(userTable);
connect.query(commentTable);
connect.query(roomTable);
});
// -----------------Sign Up ----and ------Login------------------------------------
// ----------------------sign up----------------------------------------
app.post('/signup',function (req,res) {
var password='';
var username= req.body.username;
var Image=req.body.image;
bcrypt.hash(req.body.password,3,function (err,hash) {
password=hash;
})
var Nationallity=req.body.nationality;
var Birthday=req.body.birthday;
var location=req.body.location;
var signup = 'SELECT * FROM users WHERE username=\''+username+'\'';
connect.query(signup,function (err,checkeduser) {
if(checkeduser.length<1){// user not exist
var data = 'INSERT INTO users (username,password,Nationallity,Birthday,location,imag) VALUES (\''+username+'\',\''+password+'\',\''+Nationallity+'\',\''+Birthday+'\',\''+location +'\',\''+Image+'\')';
connect.query(data);
res.send('true');
}else{
res.send('false');
}
});
});
// ---------------------login-----------------------------------------
var users=[];
var flag='false';
var x;
app.post('/login',function(req,res){
var results;
connect.query('SELECT * FROM users WHERE username=\''+req.body.username+'\'', function (err,result) {
console.log('hhhh',result,req.body.username )
if(result[0]!==undefined){
results=result;
compare();
}else{
flag=false;
res.send(flag)
}
});
function compare()
|
})
}
var createSession = function(req, responce, newUser) {
return req.session.regenerate(function() {
//newuser>>>> { id: 2, username: 'hananmajali', password: 'hananmajali' }
bcrypt.hash(req.body.password,3,function (err,hash) {
console.log(hash)
// x={'infog':['u',username,'p',hash]}
})
req.session.user = newUser;
users.push(req.session.user.username)
// console.log('after login ',req.session.user.username)
// console.log('true from server')
// console.log('flag is ',flag);
// console.log('hhhhh',flag)
res.send(flag)
});
};
});
//--------------------logout-----------------------------------
//Logout function destroys the open session.
app.get('/logout',function (req,res) {
users.splice(users.indexOf(req.session.user.username),1)
flag = 'false';
req.session.destroy();
res.clearCookie('info');
res.send(flag);
});
app.get('/show',function(req,res){
res.send(flag)
})
//----------------create and save inside roomtable---------------
app.post('/post',function(req,res) {
console.log('in post ',req.session.user.username,req.session.user.id)
var location = req.body.location;
var discribtion = req.body.discribtion;
var contactInfo = req.body.contactInfo;
var Image = req.body.image
var post = 'INSERT INTO rooms (location,discribtion,contactInfo,userID,userName,image) VALUES (\''+location+'\',\''+discribtion+'\',\''+contactInfo+'\',\''+req.session.user.id+'\',\''+req.session.user.username+'\',\''+Image+'\')';
connect.query(post);
res.send(req.session.user.username);
});
//-----return all roomdata to the client side in the main page for all users-------
app.get('/main',function(req,res) {
var rooms = 'SELECT rooms.id,rooms.location,rooms.image,rooms.discribtion,rooms.contactInfo,rooms.userName,users.imag FROM users INNER JOIN rooms ON rooms.userID = users.id';
connect.query(rooms,function (err,result) {
res.send(result)
})
});
//-----return all roomdata to the client side in the profile page for one user-------
app.get('/profile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.session.user.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.session.user.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
//-------------------clicked on specific name to take me to that profile---------
app.post('/Userprofile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.body.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.body.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
// -----------------delete room -----------------------------------------------
app.post('/deleteroom',function(req,res){
var roomId=req.body.id // I will recieve it from client side
var deleteroom= 'DELETE FROM rooms WHERE id=\''+roomId +'\'';
connect.query(deleteroom);
})
// --------------post comment and send all the comment-------------------------
app.post('/postcomment',function(req,res){
var roomId= req.body.roomid;
var Comment=req.body.commet;
var Comment2='INSERT INTO comments (comment,username,roomID) VALUES (\''+Comment+'\',\''+req.session.user.username+'\',\''+roomId+'\')';
connect.query(Comment2);
var allcomments='SELECT comments.username,comments.comment,users.imag FROM comments INNER JOIN users ON comments.username=users.username AND comments.roomID=\''+roomId+'\' ORDER BY comments.id';
connect.query(allcomments,function(err,allcommentss){
res.send(allcommentss)
});
});
//---------languge-----------------------------
app.post('/translate',function(req,response){
var value=req.body;
translate(req.body.text, {from:req.body.languageFrom+'', to: req.body.languageTo+'' })
.then(res => {
console.log(res.text);
//=> I speak English
//console.log(res.from.language.iso);
//=> nl
response.send(JSON.stringify(res.text))
})
.catch(err => {
console.error(err);
});
})
//------------status of the users in their profiles------------
app.put('/status',function(req,res){
var Status='UPDATE users SET status=\''+req.body.status+'\' WHERE username=\''+req.session.user.username+'\'';
connect.query(Status);
})
app.get('/Chat', function(req,res){
console.log('hanan',req.session.user.username)
res.send(req.session.user.username)
})
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-----------------------------------
var numUsers = 0;
console.log('numUsers',numUsers);
var lans=[];
var lan;
io.on('connection', function (socket) {
console.log('connected');
var addedUser = false;
// when the client emits 'new message', this listens and executes
socket.on('new message', function (data) {
if(lans.indexOf(data.lan)===-1){
lans.push(data.lan+'')
console.log(data.l
|
{
bcrypt.compare(req.body.password,results[0].password,function (err,match) {
if(err){
console.log(err)
}
if(match){
console.log('this user is correct')
flag = 'true';
console.log('flag now is true')
createSession(req,res,results[0]);
}else{
console.log('this user is very bad')
console.log('flag now is false in else')
flag='false';
res.send(flag)
}
|
identifier_body
|
server.js
|
// Port number: 3306
var connect = mysql.createConnection({
host: 'sql9.freesqldatabase.com',
user:'sql9203547',
password:'hhldFiMrKp',
database:'sql9203547'
});
// --------------------------Data base side----------------------------------------
// ---------------------create tables and connection--------------------------------
connect.connect(function () {
var userTable = 'CREATE TABLE IF NOT EXISTS users( \
id INT AUTO_INCREMENT PRIMARY KEY, \
username varchar(255) NOT NULL UNIQUE,\
password varchar(255),\
Nationallity varchar(60),\
Birthday varchar(60) ,\
status varchar(255) ,\
imag longtext,\
Location varchar(60))';
// check it tomorrow??
var commentTable = 'CREATE TABLE IF NOT EXISTS comments( \
id INT AUTO_INCREMENT PRIMARY KEY, \
comment varchar(255) ,\
username varchar(255) ,\
roomID int ,\
FOREIGN KEY (roomID) REFERENCES rooms(id))';
// FOREIGN KEY (usernmae) REFERENCES users(id) ,\
var roomTable = 'CREATE TABLE IF NOT EXISTS rooms(id INT AUTO_INCREMENT PRIMARY KEY,location varchar(60),image longtext,discribtion varchar(255),contactInfo varchar(100),userID int,userName varchar(60),FOREIGN KEY (userID) REFERENCES users(id))';
connect.query(userTable);
connect.query(commentTable);
connect.query(roomTable);
});
// -----------------Sign Up ----and ------Login------------------------------------
// ----------------------sign up----------------------------------------
app.post('/signup',function (req,res) {
var password='';
var username= req.body.username;
var Image=req.body.image;
bcrypt.hash(req.body.password,3,function (err,hash) {
password=hash;
})
var Nationallity=req.body.nationality;
var Birthday=req.body.birthday;
var location=req.body.location;
var signup = 'SELECT * FROM users WHERE username=\''+username+'\'';
connect.query(signup,function (err,checkeduser) {
if(checkeduser.length<1){// user not exist
var data = 'INSERT INTO users (username,password,Nationallity,Birthday,location,imag) VALUES (\''+username+'\',\''+password+'\',\''+Nationallity+'\',\''+Birthday+'\',\''+location +'\',\''+Image+'\')';
connect.query(data);
res.send('true');
}else{
res.send('false');
}
});
});
// ---------------------login-----------------------------------------
var users=[];
var flag='false';
var x;
app.post('/login',function(req,res){
var results;
connect.query('SELECT * FROM users WHERE username=\''+req.body.username+'\'', function (err,result) {
console.log('hhhh',result,req.body.username )
if(result[0]!==undefined){
results=result;
compare();
}else{
flag=false;
res.send(flag)
}
});
function compare() {
bcrypt.compare(req.body.password,results[0].password,function (err,match) {
if(err){
console.log(err)
}
if(match){
console.log('this user is correct')
flag = 'true';
console.log('flag now is true')
createSession(req,res,results[0]);
}else{
console.log('this user is very bad')
console.log('flag now is false in else')
flag='false';
res.send(flag)
}
})
}
var createSession = function(req, responce, newUser) {
return req.session.regenerate(function() {
//newuser>>>> { id: 2, username: 'hananmajali', password: 'hananmajali' }
bcrypt.hash(req.body.password,3,function (err,hash) {
console.log(hash)
// x={'infog':['u',username,'p',hash]}
})
req.session.user = newUser;
users.push(req.session.user.username)
// console.log('after login ',req.session.user.username)
// console.log('true from server')
// console.log('flag is ',flag);
// console.log('hhhhh',flag)
res.send(flag)
});
};
});
//--------------------logout-----------------------------------
//Logout function destroys the open session.
app.get('/logout',function (req,res) {
users.splice(users.indexOf(req.session.user.username),1)
flag = 'false';
req.session.destroy();
res.clearCookie('info');
res.send(flag);
});
app.get('/show',function(req,res){
res.send(flag)
})
//----------------create and save inside roomtable---------------
app.post('/post',function(req,res) {
console.log('in post ',req.session.user.username,req.session.user.id)
var location = req.body.location;
var discribtion = req.body.discribtion;
var contactInfo = req.body.contactInfo;
var Image = req.body.image
var post = 'INSERT INTO rooms (location,discribtion,contactInfo,userID,userName,image) VALUES (\''+location+'\',\''+discribtion+'\',\''+contactInfo+'\',\''+req.session.user.id+'\',\''+req.session.user.username+'\',\''+Image+'\')';
connect.query(post);
res.send(req.session.user.username);
});
//-----return all roomdata to the client side in the main page for all users-------
app.get('/main',function(req,res) {
var rooms = 'SELECT rooms.id,rooms.location,rooms.image,rooms.discribtion,rooms.contactInfo,rooms.userName,users.imag FROM users INNER JOIN rooms ON rooms.userID = users.id';
connect.query(rooms,function (err,result) {
res.send(result)
})
});
//-----return all roomdata to the client side in the profile page for one user-------
app.get('/profile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.session.user.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.session.user.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
//-------------------clicked on specific name to take me to that profile---------
app.post('/Userprofile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.body.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.body.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
// -----------------delete room -----------------------------------------------
app.post('/deleteroom',function(req,res){
var roomId=req.body.id // I will recieve it from client side
var deleteroom= 'DELETE FROM rooms WHERE id=\''+roomId +'\'';
connect.query(deleteroom);
})
// --------------post comment and send all the comment-------------------------
app.post('/postcomment',function(req,res){
var roomId= req.body.roomid;
var Comment=req.body.commet;
var Comment2='INSERT INTO comments (comment,username,roomID) VALUES (\''+Comment+'\',\''+req.session.user.username+'\',\''+roomId+'\')';
connect.query(Comment2);
var allcomments='SELECT comments.username,comments.comment,users.imag FROM comments INNER JOIN users ON comments.username=users.username AND comments.roomID=\''+roomId+'\' ORDER BY comments.id';
connect.query(allcomments,function(err,allcommentss){
res.send(allcommentss)
});
|
});
//---------languge-----------------------------
app.post('/translate',function(req,response){
var value=req.body;
translate(req.body.text, {from:req.body.languageFrom+'', to: req.body.languageTo+'' })
.then(res => {
console.log(res.text);
//=> I speak English
//console.log(res.from.language.iso);
//=> nl
response.send(JSON.stringify(res.text))
})
.catch(err => {
console.error(err);
});
})
//------------status of the users in their profiles------------
app.put('/status',function(req,res){
var Status='UPDATE users SET status=\''+req.body.status+'\' WHERE username=\''+req.session.user.username+'\'';
connect.query(Status);
})
app.get('/Chat', function(req,res){
console.log('hanan',req.session.user.username)
res.send(req.session.user.username)
})
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-----------------------------------
var numUsers = 0;
console.log('numUsers',numUsers);
var lans=[];
var lan;
io.on('connection', function (socket) {
console.log('connected');
var addedUser = false;
// when the client emits 'new message', this listens and executes
socket.on('new message', function (data) {
if(lans.indexOf(data.lan)===-1){
lans.push(data.lan+'')
console.log(data.l
|
random_line_split
|
|
server.js
|
exist
var data = 'INSERT INTO users (username,password,Nationallity,Birthday,location,imag) VALUES (\''+username+'\',\''+password+'\',\''+Nationallity+'\',\''+Birthday+'\',\''+location +'\',\''+Image+'\')';
connect.query(data);
res.send('true');
}else{
res.send('false');
}
});
});
// ---------------------login-----------------------------------------
var users=[];
var flag='false';
var x;
app.post('/login',function(req,res){
var results;
connect.query('SELECT * FROM users WHERE username=\''+req.body.username+'\'', function (err,result) {
console.log('hhhh',result,req.body.username )
if(result[0]!==undefined){
results=result;
compare();
}else{
flag=false;
res.send(flag)
}
});
function compare() {
bcrypt.compare(req.body.password,results[0].password,function (err,match) {
if(err){
console.log(err)
}
if(match){
console.log('this user is correct')
flag = 'true';
console.log('flag now is true')
createSession(req,res,results[0]);
}else{
console.log('this user is very bad')
console.log('flag now is false in else')
flag='false';
res.send(flag)
}
})
}
var createSession = function(req, responce, newUser) {
return req.session.regenerate(function() {
//newuser>>>> { id: 2, username: 'hananmajali', password: 'hananmajali' }
bcrypt.hash(req.body.password,3,function (err,hash) {
console.log(hash)
// x={'infog':['u',username,'p',hash]}
})
req.session.user = newUser;
users.push(req.session.user.username)
// console.log('after login ',req.session.user.username)
// console.log('true from server')
// console.log('flag is ',flag);
// console.log('hhhhh',flag)
res.send(flag)
});
};
});
//--------------------logout-----------------------------------
//Logout function destroys the open session.
app.get('/logout',function (req,res) {
users.splice(users.indexOf(req.session.user.username),1)
flag = 'false';
req.session.destroy();
res.clearCookie('info');
res.send(flag);
});
app.get('/show',function(req,res){
res.send(flag)
})
//----------------create and save inside roomtable---------------
app.post('/post',function(req,res) {
console.log('in post ',req.session.user.username,req.session.user.id)
var location = req.body.location;
var discribtion = req.body.discribtion;
var contactInfo = req.body.contactInfo;
var Image = req.body.image
var post = 'INSERT INTO rooms (location,discribtion,contactInfo,userID,userName,image) VALUES (\''+location+'\',\''+discribtion+'\',\''+contactInfo+'\',\''+req.session.user.id+'\',\''+req.session.user.username+'\',\''+Image+'\')';
connect.query(post);
res.send(req.session.user.username);
});
//-----return all roomdata to the client side in the main page for all users-------
app.get('/main',function(req,res) {
var rooms = 'SELECT rooms.id,rooms.location,rooms.image,rooms.discribtion,rooms.contactInfo,rooms.userName,users.imag FROM users INNER JOIN rooms ON rooms.userID = users.id';
connect.query(rooms,function (err,result) {
res.send(result)
})
});
//-----return all roomdata to the client side in the profile page for one user-------
app.get('/profile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.session.user.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.session.user.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
//-------------------clicked on specific name to take me to that profile---------
app.post('/Userprofile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.body.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.body.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
// -----------------delete room -----------------------------------------------
app.post('/deleteroom',function(req,res){
var roomId=req.body.id // I will recieve it from client side
var deleteroom= 'DELETE FROM rooms WHERE id=\''+roomId +'\'';
connect.query(deleteroom);
})
// --------------post comment and send all the comment-------------------------
app.post('/postcomment',function(req,res){
var roomId= req.body.roomid;
var Comment=req.body.commet;
var Comment2='INSERT INTO comments (comment,username,roomID) VALUES (\''+Comment+'\',\''+req.session.user.username+'\',\''+roomId+'\')';
connect.query(Comment2);
var allcomments='SELECT comments.username,comments.comment,users.imag FROM comments INNER JOIN users ON comments.username=users.username AND comments.roomID=\''+roomId+'\' ORDER BY comments.id';
connect.query(allcomments,function(err,allcommentss){
res.send(allcommentss)
});
});
//---------languge-----------------------------
app.post('/translate',function(req,response){
var value=req.body;
translate(req.body.text, {from:req.body.languageFrom+'', to: req.body.languageTo+'' })
.then(res => {
console.log(res.text);
//=> I speak English
//console.log(res.from.language.iso);
//=> nl
response.send(JSON.stringify(res.text))
})
.catch(err => {
console.error(err);
});
})
//------------status of the users in their profiles------------
app.put('/status',function(req,res){
var Status='UPDATE users SET status=\''+req.body.status+'\' WHERE username=\''+req.session.user.username+'\'';
connect.query(Status);
})
app.get('/Chat', function(req,res){
console.log('hanan',req.session.user.username)
res.send(req.session.user.username)
})
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-----------------------------------
var numUsers = 0;
console.log('numUsers',numUsers);
var lans=[];
var lan;
io.on('connection', function (socket) {
console.log('connected');
var addedUser = false;
// when the client emits 'new message', this listens and executes
socket.on('new message', function (data) {
if(lans.indexOf(data.lan)===-1){
lans.push(data.lan+'')
console.log(data.lan)
}
// for(var i=0;i>lans.length;i++){
// if(data.lan+''!==lans[i]){
// data.lan=lans[i]
// }
// }
if(lans[0]===data.lan+''){
data.lan=lans[1];
}else{
data.lan=lans[0]
}
console.log('array ',lans);
console.log('lan ',data.lan);
translate(data.message, { to:data.lan})
.then(res => {
console.log('hanan',res.text)
// we tell the client to execute 'new message'
socket.broadcast.emit('new message', {
username: socket.username,
message: res.text,
lan:data.lan
});
console.log('hanan',res.text)
})
});
// when the client emits 'add user', this listens and executes
socket.on('add user', function (username) {
if (addedUser) return;
// we store the username in the socket session for this client
socket.username = username.username;
++numUsers;
addedUser = true;
socket.emit('login', {
numUsers: numUsers
});
// echo globally (all clients) that a person has connected
socket.broadcast.emit('user joined', {
username: socket.username,
numUsers: numUsers
});
});
// when the client emits 'typing', we broadcast it to others
socket.on('typing', function () {
socket.broadcast.emit('typing', {
username: socket.username
});
});
// when the client emits 'stop typing', we broadcast it to others
socket.on('stop typing', function () {
socket.broadcast.emit('stop typing', {
username: socket.username
});
});
// when the user disconnects.. perform this
socket.on('disconnect', function () {
if (addedUser)
|
{
--numUsers;
// echo globally that this client has left
socket.broadcast.emit('user left', {
username: socket.username,
numUsers: numUsers
});
}
|
conditional_block
|
|
server.js
|
// Port number: 3306
var connect = mysql.createConnection({
host: 'sql9.freesqldatabase.com',
user:'sql9203547',
password:'hhldFiMrKp',
database:'sql9203547'
});
// --------------------------Data base side----------------------------------------
// ---------------------create tables and connection--------------------------------
connect.connect(function () {
var userTable = 'CREATE TABLE IF NOT EXISTS users( \
id INT AUTO_INCREMENT PRIMARY KEY, \
username varchar(255) NOT NULL UNIQUE,\
password varchar(255),\
Nationallity varchar(60),\
Birthday varchar(60) ,\
status varchar(255) ,\
imag longtext,\
Location varchar(60))';
// check it tomorrow??
var commentTable = 'CREATE TABLE IF NOT EXISTS comments( \
id INT AUTO_INCREMENT PRIMARY KEY, \
comment varchar(255) ,\
username varchar(255) ,\
roomID int ,\
FOREIGN KEY (roomID) REFERENCES rooms(id))';
// FOREIGN KEY (usernmae) REFERENCES users(id) ,\
var roomTable = 'CREATE TABLE IF NOT EXISTS rooms(id INT AUTO_INCREMENT PRIMARY KEY,location varchar(60),image longtext,discribtion varchar(255),contactInfo varchar(100),userID int,userName varchar(60),FOREIGN KEY (userID) REFERENCES users(id))';
connect.query(userTable);
connect.query(commentTable);
connect.query(roomTable);
});
// -----------------Sign Up ----and ------Login------------------------------------
// ----------------------sign up----------------------------------------
app.post('/signup',function (req,res) {
var password='';
var username= req.body.username;
var Image=req.body.image;
bcrypt.hash(req.body.password,3,function (err,hash) {
password=hash;
})
var Nationallity=req.body.nationality;
var Birthday=req.body.birthday;
var location=req.body.location;
var signup = 'SELECT * FROM users WHERE username=\''+username+'\'';
connect.query(signup,function (err,checkeduser) {
if(checkeduser.length<1){// user not exist
var data = 'INSERT INTO users (username,password,Nationallity,Birthday,location,imag) VALUES (\''+username+'\',\''+password+'\',\''+Nationallity+'\',\''+Birthday+'\',\''+location +'\',\''+Image+'\')';
connect.query(data);
res.send('true');
}else{
res.send('false');
}
});
});
// ---------------------login-----------------------------------------
var users=[];
var flag='false';
var x;
app.post('/login',function(req,res){
var results;
connect.query('SELECT * FROM users WHERE username=\''+req.body.username+'\'', function (err,result) {
console.log('hhhh',result,req.body.username )
if(result[0]!==undefined){
results=result;
compare();
}else{
flag=false;
res.send(flag)
}
});
function
|
() {
bcrypt.compare(req.body.password,results[0].password,function (err,match) {
if(err){
console.log(err)
}
if(match){
console.log('this user is correct')
flag = 'true';
console.log('flag now is true')
createSession(req,res,results[0]);
}else{
console.log('this user is very bad')
console.log('flag now is false in else')
flag='false';
res.send(flag)
}
})
}
var createSession = function(req, responce, newUser) {
return req.session.regenerate(function() {
//newuser>>>> { id: 2, username: 'hananmajali', password: 'hananmajali' }
bcrypt.hash(req.body.password,3,function (err,hash) {
console.log(hash)
// x={'infog':['u',username,'p',hash]}
})
req.session.user = newUser;
users.push(req.session.user.username)
// console.log('after login ',req.session.user.username)
// console.log('true from server')
// console.log('flag is ',flag);
// console.log('hhhhh',flag)
res.send(flag)
});
};
});
//--------------------logout-----------------------------------
//Logout function destroys the open session.
app.get('/logout',function (req,res) {
users.splice(users.indexOf(req.session.user.username),1)
flag = 'false';
req.session.destroy();
res.clearCookie('info');
res.send(flag);
});
app.get('/show',function(req,res){
res.send(flag)
})
//----------------create and save inside roomtable---------------
app.post('/post',function(req,res) {
console.log('in post ',req.session.user.username,req.session.user.id)
var location = req.body.location;
var discribtion = req.body.discribtion;
var contactInfo = req.body.contactInfo;
var Image = req.body.image
var post = 'INSERT INTO rooms (location,discribtion,contactInfo,userID,userName,image) VALUES (\''+location+'\',\''+discribtion+'\',\''+contactInfo+'\',\''+req.session.user.id+'\',\''+req.session.user.username+'\',\''+Image+'\')';
connect.query(post);
res.send(req.session.user.username);
});
//-----return all roomdata to the client side in the main page for all users-------
app.get('/main',function(req,res) {
var rooms = 'SELECT rooms.id,rooms.location,rooms.image,rooms.discribtion,rooms.contactInfo,rooms.userName,users.imag FROM users INNER JOIN rooms ON rooms.userID = users.id';
connect.query(rooms,function (err,result) {
res.send(result)
})
});
//-----return all roomdata to the client side in the profile page for one user-------
app.get('/profile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.session.user.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.session.user.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
//-------------------clicked on specific name to take me to that profile---------
app.post('/Userprofile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.body.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.body.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
// -----------------delete room -----------------------------------------------
app.post('/deleteroom',function(req,res){
var roomId=req.body.id // I will recieve it from client side
var deleteroom= 'DELETE FROM rooms WHERE id=\''+roomId +'\'';
connect.query(deleteroom);
})
// --------------post comment and send all the comment-------------------------
app.post('/postcomment',function(req,res){
var roomId= req.body.roomid;
var Comment=req.body.commet;
var Comment2='INSERT INTO comments (comment,username,roomID) VALUES (\''+Comment+'\',\''+req.session.user.username+'\',\''+roomId+'\')';
connect.query(Comment2);
var allcomments='SELECT comments.username,comments.comment,users.imag FROM comments INNER JOIN users ON comments.username=users.username AND comments.roomID=\''+roomId+'\' ORDER BY comments.id';
connect.query(allcomments,function(err,allcommentss){
res.send(allcommentss)
});
});
//---------languge-----------------------------
app.post('/translate',function(req,response){
var value=req.body;
translate(req.body.text, {from:req.body.languageFrom+'', to: req.body.languageTo+'' })
.then(res => {
console.log(res.text);
//=> I speak English
//console.log(res.from.language.iso);
//=> nl
response.send(JSON.stringify(res.text))
})
.catch(err => {
console.error(err);
});
})
//------------status of the users in their profiles------------
app.put('/status',function(req,res){
var Status='UPDATE users SET status=\''+req.body.status+'\' WHERE username=\''+req.session.user.username+'\'';
connect.query(Status);
})
app.get('/Chat', function(req,res){
console.log('hanan',req.session.user.username)
res.send(req.session.user.username)
})
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-----------------------------------
var numUsers = 0;
console.log('numUsers',numUsers);
var lans=[];
var lan;
io.on('connection', function (socket) {
console.log('connected');
var addedUser = false;
// when the client emits 'new message', this listens and executes
socket.on('new message', function (data) {
if(lans.indexOf(data.lan)===-1){
lans.push(data.lan+'')
console.log(data.l
|
compare
|
identifier_name
|
msc_chart.py
|
elif github_token:
g = Github(github_token)
else:
raise Exception(
"Either pygithub or github_token must be set when initializing MSCChart"
)
# Create a Github instance. The token only needs read:public_repo
self.repository = g.get_repo("matrix-org/matrix-doc")
def generate(self, type: ChartType, filepath: str):
"""Generate the chart
Args:
type: The type of chart to generate
filepath: Where to place the generated chart
"""
# Choose which chart type to generate
if type == ChartType.PIE:
self._generate_msc_pie_chart(filepath)
elif type == ChartType.STACKED_AREA:
self._generate_stacked_area_chart(filepath)
def _generate_stacked_area_chart(self, filepath: str):
"""Generates a historical stacked area chart of msc status"""
# Get time of the earliest issue
mscs = list(
self.repository.get_issues(
sort="created", state="all", direction="asc", labels=["proposal"],
)
)
# There are some MSCs that date all the way back to 2014. These skew the chart a bit,
# so lop those off
outlier_threshold = datetime.fromisoformat("2018-04-29T00:00:00")
# Generate list of weeks since the first msc
weeks = []
t = mscs[0].created_at
while t < datetime.now():
if t > outlier_threshold:
# Add t to our list of weeks
weeks.append(t)
# Move forward by three weeks
t = t + timedelta(weeks=1)
# And calculate it for today
weeks.append(datetime.now())
# Extract MSC event data beforehand so we don't do so again every week
msc_events = []
bar = Bar("Grabbing list of events for each MSC...", max=len(mscs))
for msc in mscs:
# TODO: We could theoretically optimize this by saving a list of events per
# MSC in a DB between runs. If the count of events for a given MSC number
# hasn't changed, then don't update the events
# This would prevent us from needing to fetch the label for each event
# Also try the GraphQL API
# Loop until we succeeded in getting the events for this MSC
while True:
try:
# Pre-request the event labels. This apparently takes another API call
event_label_tuples = []
for event in msc.get_events():
event_label_tuples.append(
(event, event.label if event.event == "labeled" else None)
)
# Events retrieved, break out of the inner loop
msc_events.append(event_label_tuples)
break
except RateLimitExceededException:
# Wait a bit and retry
if self.print_progress:
print("\nHit Ratelimit. Waiting 1 minute...")
sleep(60)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
if self.print_progress:
print("Got", sum((len(events) for events in msc_events)), "total events")
# Get the count of each MSC type at a given week
new_mscs = []
fcp_mscs = []
closed_mscs = []
merged_mscs = []
bar = Bar("Processing MSC state snapshots...", max=len(weeks))
for week in weeks:
new_msc_count = 0
fcp_msc_count = 0
closed_msc_count = 0
merged_msc_count = 0
for index, msc in enumerate(mscs):
msc_state = self._get_msc_state_at_time(msc, msc_events[index], week)
if msc_state == MSCState.NEW:
new_msc_count += 1
elif msc_state == MSCState.FCP:
fcp_msc_count += 1
elif msc_state == MSCState.CLOSED:
closed_msc_count += 1
elif msc_state == MSCState.MERGED:
merged_msc_count += 1
# Note down all counts for this week
new_mscs.append(new_msc_count)
fcp_mscs.append(fcp_msc_count)
closed_mscs.append(closed_msc_count)
merged_mscs.append(merged_msc_count)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
str_weeks = [dt.strftime("%d-%m-%Y") for dt in weeks]
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=str_weeks,
y=merged_mscs,
hoverinfo="x+y",
mode="lines",
name="Merged",
line=dict(width=0.5, color="#6f42c1"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=closed_mscs,
hoverinfo="x+y",
mode="lines",
name="Closed",
line=dict(width=0.5, color="#ce303d"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=fcp_mscs,
hoverinfo="x+y",
mode="lines",
name="FCP",
line=dict(width=0.5, color="yellow"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=new_mscs,
hoverinfo="x+y",
mode="lines",
name="New",
line=dict(width=0.5, color="#28a745"),
stackgroup="one",
)
)
# Add a nice title
fig.update_layout(
title={
"text": "Matrix Spec Change Proposals",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
font=dict(family="Arial", size=18, color="#222222",),
)
fig.write_image(filepath)
def _get_msc_state_at_time(
self,
msc: Issue,
msc_events: List[Tuple[IssueEvent, Optional[Label]]],
dt: datetime,
) -> MSCState:
"""Given a datetime, get the state of an MSC at that time
Args:
msc: The MSC to target,
msc_events: A cached List of github issue events to process, as well as cached label
information if the issue event relates to labels. We cache all this information
up front as pass it is as otherwise we'd have to do separate API requests for
each of them.
dt: The threshold at which to stop processing issue events, thus giving you the
state of an MSC at this given time.
"""
# Iterate through MSC events and calculate the current state of the issue at a given
# time
# Initially assume it doesn't exist. Change the state as we iterate through events
state = {
"prev_state": None,
"state": None,
} # type: Dict[str, Optional[MSCState]]
finished_fcp = False
def update_state(new_state: MSCState):
state["prev_state"] = state["state"]
state["state"] = new_state
disposition_state = None
is_closed = False
has_label_merged = False
rejected_or_abandoned = False
for event, label in msc_events:
if event.created_at > dt:
# We've reached our datetime threshold
break
# Classify the event
if label:
label_name = label.name
# This is a label event
if label_name == "proposal":
update_state(MSCState.NEW)
elif label_name == "final-comment-period":
update_state(MSCState.FCP)
elif label_name == "disposition-merge":
disposition_state = MSCState.MERGED
elif label_name == "disposition-close":
disposition_state = MSCState.CLOSED
elif label_name == "disposition-postpone":
disposition_state = MSCState.POSTPONED
# Some issues have this silly label
# i.e https://github.com/matrix-org/matrix-doc/issues/1466
elif label_name == "merged":
update_state(MSCState.MERGED)
has_label_merged = True
elif label_name == "finished-final-comment-period":
# Prevent issues which have finished FCP but associated PRs have not
# merged yet to not get stuck in FCP state forever.
# i.e https://github.com/matrix-org/matrix-doc/issues/1219
update_state(
disposition_state if disposition_state else MSCState.NEW
)
finished_fcp = True
elif label_name == "abandoned" or label_name == "rejected":
update_state(MSCState.CLOSED)
elif event.event == "reopened":
# TODO: What does mscbot-python do in this case? New or previous state?
update_state
|
g = pygithub
|
random_line_split
|
|
msc_chart.py
|
_token)
else:
raise Exception(
"Either pygithub or github_token must be set when initializing MSCChart"
)
# Create a Github instance. The token only needs read:public_repo
self.repository = g.get_repo("matrix-org/matrix-doc")
def generate(self, type: ChartType, filepath: str):
"""Generate the chart
Args:
type: The type of chart to generate
filepath: Where to place the generated chart
"""
# Choose which chart type to generate
if type == ChartType.PIE:
self._generate_msc_pie_chart(filepath)
elif type == ChartType.STACKED_AREA:
self._generate_stacked_area_chart(filepath)
def _generate_stacked_area_chart(self, filepath: str):
"""Generates a historical stacked area chart of msc status"""
# Get time of the earliest issue
mscs = list(
self.repository.get_issues(
sort="created", state="all", direction="asc", labels=["proposal"],
)
)
# There are some MSCs that date all the way back to 2014. These skew the chart a bit,
# so lop those off
outlier_threshold = datetime.fromisoformat("2018-04-29T00:00:00")
# Generate list of weeks since the first msc
weeks = []
t = mscs[0].created_at
while t < datetime.now():
if t > outlier_threshold:
# Add t to our list of weeks
weeks.append(t)
# Move forward by three weeks
t = t + timedelta(weeks=1)
# And calculate it for today
weeks.append(datetime.now())
# Extract MSC event data beforehand so we don't do so again every week
msc_events = []
bar = Bar("Grabbing list of events for each MSC...", max=len(mscs))
for msc in mscs:
# TODO: We could theoretically optimize this by saving a list of events per
# MSC in a DB between runs. If the count of events for a given MSC number
# hasn't changed, then don't update the events
# This would prevent us from needing to fetch the label for each event
# Also try the GraphQL API
# Loop until we succeeded in getting the events for this MSC
while True:
try:
# Pre-request the event labels. This apparently takes another API call
event_label_tuples = []
for event in msc.get_events():
event_label_tuples.append(
(event, event.label if event.event == "labeled" else None)
)
# Events retrieved, break out of the inner loop
msc_events.append(event_label_tuples)
break
except RateLimitExceededException:
# Wait a bit and retry
if self.print_progress:
print("\nHit Ratelimit. Waiting 1 minute...")
sleep(60)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
if self.print_progress:
print("Got", sum((len(events) for events in msc_events)), "total events")
# Get the count of each MSC type at a given week
new_mscs = []
fcp_mscs = []
closed_mscs = []
merged_mscs = []
bar = Bar("Processing MSC state snapshots...", max=len(weeks))
for week in weeks:
new_msc_count = 0
fcp_msc_count = 0
closed_msc_count = 0
merged_msc_count = 0
for index, msc in enumerate(mscs):
msc_state = self._get_msc_state_at_time(msc, msc_events[index], week)
if msc_state == MSCState.NEW:
new_msc_count += 1
elif msc_state == MSCState.FCP:
fcp_msc_count += 1
elif msc_state == MSCState.CLOSED:
closed_msc_count += 1
elif msc_state == MSCState.MERGED:
merged_msc_count += 1
# Note down all counts for this week
new_mscs.append(new_msc_count)
fcp_mscs.append(fcp_msc_count)
closed_mscs.append(closed_msc_count)
merged_mscs.append(merged_msc_count)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
str_weeks = [dt.strftime("%d-%m-%Y") for dt in weeks]
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=str_weeks,
y=merged_mscs,
hoverinfo="x+y",
mode="lines",
name="Merged",
line=dict(width=0.5, color="#6f42c1"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=closed_mscs,
hoverinfo="x+y",
mode="lines",
name="Closed",
line=dict(width=0.5, color="#ce303d"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=fcp_mscs,
hoverinfo="x+y",
mode="lines",
name="FCP",
line=dict(width=0.5, color="yellow"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=new_mscs,
hoverinfo="x+y",
mode="lines",
name="New",
line=dict(width=0.5, color="#28a745"),
stackgroup="one",
)
)
# Add a nice title
fig.update_layout(
title={
"text": "Matrix Spec Change Proposals",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
font=dict(family="Arial", size=18, color="#222222",),
)
fig.write_image(filepath)
def
|
(
self,
msc: Issue,
msc_events: List[Tuple[IssueEvent, Optional[Label]]],
dt: datetime,
) -> MSCState:
"""Given a datetime, get the state of an MSC at that time
Args:
msc: The MSC to target,
msc_events: A cached List of github issue events to process, as well as cached label
information if the issue event relates to labels. We cache all this information
up front as pass it is as otherwise we'd have to do separate API requests for
each of them.
dt: The threshold at which to stop processing issue events, thus giving you the
state of an MSC at this given time.
"""
# Iterate through MSC events and calculate the current state of the issue at a given
# time
# Initially assume it doesn't exist. Change the state as we iterate through events
state = {
"prev_state": None,
"state": None,
} # type: Dict[str, Optional[MSCState]]
finished_fcp = False
def update_state(new_state: MSCState):
state["prev_state"] = state["state"]
state["state"] = new_state
disposition_state = None
is_closed = False
has_label_merged = False
rejected_or_abandoned = False
for event, label in msc_events:
if event.created_at > dt:
# We've reached our datetime threshold
break
# Classify the event
if label:
label_name = label.name
# This is a label event
if label_name == "proposal":
update_state(MSCState.NEW)
elif label_name == "final-comment-period":
update_state(MSCState.FCP)
elif label_name == "disposition-merge":
disposition_state = MSCState.MERGED
elif label_name == "disposition-close":
disposition_state = MSCState.CLOSED
elif label_name == "disposition-postpone":
disposition_state = MSCState.POSTPONED
# Some issues have this silly label
# i.e https://github.com/matrix-org/matrix-doc/issues/1466
elif label_name == "merged":
update_state(MSCState.MERGED)
has_label_merged = True
elif label_name == "finished-final-comment-period":
# Prevent issues which have finished FCP but associated PRs have not
# merged yet to not get stuck in FCP state forever.
# i.e https://github.com/matrix-org/matrix-doc/issues/1219
update_state(
disposition_state if disposition_state else MSCState.NEW
)
finished_fcp = True
elif label_name == "abandoned" or label_name == "rejected":
update_state(MSCState.CLOSED)
elif event.event == "reopened":
# TODO: What does mscbot-python do in this case? New or previous state?
update_state(state["prev_state"])
is_closed = False
elif event.event ==
|
_get_msc_state_at_time
|
identifier_name
|
msc_chart.py
|
then don't update the events
# This would prevent us from needing to fetch the label for each event
# Also try the GraphQL API
# Loop until we succeeded in getting the events for this MSC
while True:
try:
# Pre-request the event labels. This apparently takes another API call
event_label_tuples = []
for event in msc.get_events():
event_label_tuples.append(
(event, event.label if event.event == "labeled" else None)
)
# Events retrieved, break out of the inner loop
msc_events.append(event_label_tuples)
break
except RateLimitExceededException:
# Wait a bit and retry
if self.print_progress:
print("\nHit Ratelimit. Waiting 1 minute...")
sleep(60)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
if self.print_progress:
print("Got", sum((len(events) for events in msc_events)), "total events")
# Get the count of each MSC type at a given week
new_mscs = []
fcp_mscs = []
closed_mscs = []
merged_mscs = []
bar = Bar("Processing MSC state snapshots...", max=len(weeks))
for week in weeks:
new_msc_count = 0
fcp_msc_count = 0
closed_msc_count = 0
merged_msc_count = 0
for index, msc in enumerate(mscs):
msc_state = self._get_msc_state_at_time(msc, msc_events[index], week)
if msc_state == MSCState.NEW:
new_msc_count += 1
elif msc_state == MSCState.FCP:
fcp_msc_count += 1
elif msc_state == MSCState.CLOSED:
closed_msc_count += 1
elif msc_state == MSCState.MERGED:
merged_msc_count += 1
# Note down all counts for this week
new_mscs.append(new_msc_count)
fcp_mscs.append(fcp_msc_count)
closed_mscs.append(closed_msc_count)
merged_mscs.append(merged_msc_count)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
str_weeks = [dt.strftime("%d-%m-%Y") for dt in weeks]
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=str_weeks,
y=merged_mscs,
hoverinfo="x+y",
mode="lines",
name="Merged",
line=dict(width=0.5, color="#6f42c1"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=closed_mscs,
hoverinfo="x+y",
mode="lines",
name="Closed",
line=dict(width=0.5, color="#ce303d"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=fcp_mscs,
hoverinfo="x+y",
mode="lines",
name="FCP",
line=dict(width=0.5, color="yellow"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=new_mscs,
hoverinfo="x+y",
mode="lines",
name="New",
line=dict(width=0.5, color="#28a745"),
stackgroup="one",
)
)
# Add a nice title
fig.update_layout(
title={
"text": "Matrix Spec Change Proposals",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
font=dict(family="Arial", size=18, color="#222222",),
)
fig.write_image(filepath)
def _get_msc_state_at_time(
self,
msc: Issue,
msc_events: List[Tuple[IssueEvent, Optional[Label]]],
dt: datetime,
) -> MSCState:
"""Given a datetime, get the state of an MSC at that time
Args:
msc: The MSC to target,
msc_events: A cached List of github issue events to process, as well as cached label
information if the issue event relates to labels. We cache all this information
up front as pass it is as otherwise we'd have to do separate API requests for
each of them.
dt: The threshold at which to stop processing issue events, thus giving you the
state of an MSC at this given time.
"""
# Iterate through MSC events and calculate the current state of the issue at a given
# time
# Initially assume it doesn't exist. Change the state as we iterate through events
state = {
"prev_state": None,
"state": None,
} # type: Dict[str, Optional[MSCState]]
finished_fcp = False
def update_state(new_state: MSCState):
state["prev_state"] = state["state"]
state["state"] = new_state
disposition_state = None
is_closed = False
has_label_merged = False
rejected_or_abandoned = False
for event, label in msc_events:
if event.created_at > dt:
# We've reached our datetime threshold
break
# Classify the event
if label:
label_name = label.name
# This is a label event
if label_name == "proposal":
update_state(MSCState.NEW)
elif label_name == "final-comment-period":
update_state(MSCState.FCP)
elif label_name == "disposition-merge":
disposition_state = MSCState.MERGED
elif label_name == "disposition-close":
disposition_state = MSCState.CLOSED
elif label_name == "disposition-postpone":
disposition_state = MSCState.POSTPONED
# Some issues have this silly label
# i.e https://github.com/matrix-org/matrix-doc/issues/1466
elif label_name == "merged":
update_state(MSCState.MERGED)
has_label_merged = True
elif label_name == "finished-final-comment-period":
# Prevent issues which have finished FCP but associated PRs have not
# merged yet to not get stuck in FCP state forever.
# i.e https://github.com/matrix-org/matrix-doc/issues/1219
update_state(
disposition_state if disposition_state else MSCState.NEW
)
finished_fcp = True
elif label_name == "abandoned" or label_name == "rejected":
update_state(MSCState.CLOSED)
elif event.event == "reopened":
# TODO: What does mscbot-python do in this case? New or previous state?
update_state(state["prev_state"])
is_closed = False
elif event.event == "closed":
# The MSC was closed
if msc.pull_request:
if state != MSCState.MERGED:
update_state(MSCState.CLOSED)
# Issues that are closed count as closed MSCs
else:
if has_label_merged:
update_state(MSCState.MERGED)
else:
update_state(MSCState.CLOSED)
elif event.event == "merged":
# The MSC was merged
if finished_fcp:
update_state(MSCState.MERGED)
if is_closed and rejected_or_abandoned:
update_state(MSCState.CLOSED)
return state["state"]
def _generate_msc_pie_chart(self, filepath: str):
# Get total number of {closed, open, merged, postponed, fcp} MSCs
fcp_mscs = self.repository.get_issues(
state="open", labels=["proposal", "final-comment-period"],
).totalCount
open_mscs = (
self.repository.get_issues(state="open", labels=["proposal"]).totalCount
- fcp_mscs
)
closed_mscs = self.repository.get_issues(
state="closed", labels=["proposal", "rejected"],
).totalCount
postponed_mscs = self.repository.get_issues(
state="open",
labels=[
"proposal",
"finished-final-comment-period",
"disposition-postpone",
],
).totalCount
merged_mscs = (
self.repository.get_issues(state="closed", labels=["proposal"],).totalCount
- closed_mscs
- postponed_mscs
)
# Create the pie chart
labels = ["Open", "Merged", "Closed", "FCP", "Postponed"]
colors = ["#28a745", "#6f42c1", "#ce303d", "yellow", "grey"]
values = [open_mscs, merged_mscs, closed_mscs, fcp_mscs, postponed_mscs]
# Add the respective count to each label
for idx, label in enumerate(labels):
|
labels[idx] = f"{label} ({values[idx]})"
|
conditional_block
|
|
msc_chart.py
|
_token)
else:
raise Exception(
"Either pygithub or github_token must be set when initializing MSCChart"
)
# Create a Github instance. The token only needs read:public_repo
self.repository = g.get_repo("matrix-org/matrix-doc")
def generate(self, type: ChartType, filepath: str):
|
def _generate_stacked_area_chart(self, filepath: str):
"""Generates a historical stacked area chart of msc status"""
# Get time of the earliest issue
mscs = list(
self.repository.get_issues(
sort="created", state="all", direction="asc", labels=["proposal"],
)
)
# There are some MSCs that date all the way back to 2014. These skew the chart a bit,
# so lop those off
outlier_threshold = datetime.fromisoformat("2018-04-29T00:00:00")
# Generate list of weeks since the first msc
weeks = []
t = mscs[0].created_at
while t < datetime.now():
if t > outlier_threshold:
# Add t to our list of weeks
weeks.append(t)
# Move forward by three weeks
t = t + timedelta(weeks=1)
# And calculate it for today
weeks.append(datetime.now())
# Extract MSC event data beforehand so we don't do so again every week
msc_events = []
bar = Bar("Grabbing list of events for each MSC...", max=len(mscs))
for msc in mscs:
# TODO: We could theoretically optimize this by saving a list of events per
# MSC in a DB between runs. If the count of events for a given MSC number
# hasn't changed, then don't update the events
# This would prevent us from needing to fetch the label for each event
# Also try the GraphQL API
# Loop until we succeeded in getting the events for this MSC
while True:
try:
# Pre-request the event labels. This apparently takes another API call
event_label_tuples = []
for event in msc.get_events():
event_label_tuples.append(
(event, event.label if event.event == "labeled" else None)
)
# Events retrieved, break out of the inner loop
msc_events.append(event_label_tuples)
break
except RateLimitExceededException:
# Wait a bit and retry
if self.print_progress:
print("\nHit Ratelimit. Waiting 1 minute...")
sleep(60)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
if self.print_progress:
print("Got", sum((len(events) for events in msc_events)), "total events")
# Get the count of each MSC type at a given week
new_mscs = []
fcp_mscs = []
closed_mscs = []
merged_mscs = []
bar = Bar("Processing MSC state snapshots...", max=len(weeks))
for week in weeks:
new_msc_count = 0
fcp_msc_count = 0
closed_msc_count = 0
merged_msc_count = 0
for index, msc in enumerate(mscs):
msc_state = self._get_msc_state_at_time(msc, msc_events[index], week)
if msc_state == MSCState.NEW:
new_msc_count += 1
elif msc_state == MSCState.FCP:
fcp_msc_count += 1
elif msc_state == MSCState.CLOSED:
closed_msc_count += 1
elif msc_state == MSCState.MERGED:
merged_msc_count += 1
# Note down all counts for this week
new_mscs.append(new_msc_count)
fcp_mscs.append(fcp_msc_count)
closed_mscs.append(closed_msc_count)
merged_mscs.append(merged_msc_count)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
str_weeks = [dt.strftime("%d-%m-%Y") for dt in weeks]
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=str_weeks,
y=merged_mscs,
hoverinfo="x+y",
mode="lines",
name="Merged",
line=dict(width=0.5, color="#6f42c1"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=closed_mscs,
hoverinfo="x+y",
mode="lines",
name="Closed",
line=dict(width=0.5, color="#ce303d"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=fcp_mscs,
hoverinfo="x+y",
mode="lines",
name="FCP",
line=dict(width=0.5, color="yellow"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=new_mscs,
hoverinfo="x+y",
mode="lines",
name="New",
line=dict(width=0.5, color="#28a745"),
stackgroup="one",
)
)
# Add a nice title
fig.update_layout(
title={
"text": "Matrix Spec Change Proposals",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
font=dict(family="Arial", size=18, color="#222222",),
)
fig.write_image(filepath)
def _get_msc_state_at_time(
self,
msc: Issue,
msc_events: List[Tuple[IssueEvent, Optional[Label]]],
dt: datetime,
) -> MSCState:
"""Given a datetime, get the state of an MSC at that time
Args:
msc: The MSC to target,
msc_events: A cached List of github issue events to process, as well as cached label
information if the issue event relates to labels. We cache all this information
up front as pass it is as otherwise we'd have to do separate API requests for
each of them.
dt: The threshold at which to stop processing issue events, thus giving you the
state of an MSC at this given time.
"""
# Iterate through MSC events and calculate the current state of the issue at a given
# time
# Initially assume it doesn't exist. Change the state as we iterate through events
state = {
"prev_state": None,
"state": None,
} # type: Dict[str, Optional[MSCState]]
finished_fcp = False
def update_state(new_state: MSCState):
state["prev_state"] = state["state"]
state["state"] = new_state
disposition_state = None
is_closed = False
has_label_merged = False
rejected_or_abandoned = False
for event, label in msc_events:
if event.created_at > dt:
# We've reached our datetime threshold
break
# Classify the event
if label:
label_name = label.name
# This is a label event
if label_name == "proposal":
update_state(MSCState.NEW)
elif label_name == "final-comment-period":
update_state(MSCState.FCP)
elif label_name == "disposition-merge":
disposition_state = MSCState.MERGED
elif label_name == "disposition-close":
disposition_state = MSCState.CLOSED
elif label_name == "disposition-postpone":
disposition_state = MSCState.POSTPONED
# Some issues have this silly label
# i.e https://github.com/matrix-org/matrix-doc/issues/1466
elif label_name == "merged":
update_state(MSCState.MERGED)
has_label_merged = True
elif label_name == "finished-final-comment-period":
# Prevent issues which have finished FCP but associated PRs have not
# merged yet to not get stuck in FCP state forever.
# i.e https://github.com/matrix-org/matrix-doc/issues/1219
update_state(
disposition_state if disposition_state else MSCState.NEW
)
finished_fcp = True
elif label_name == "abandoned" or label_name == "rejected":
update_state(MSCState.CLOSED)
elif event.event == "reopened":
# TODO: What does mscbot-python do in this case? New or previous state?
update_state(state["prev_state"])
is_closed = False
elif event.event
|
"""Generate the chart
Args:
type: The type of chart to generate
filepath: Where to place the generated chart
"""
# Choose which chart type to generate
if type == ChartType.PIE:
self._generate_msc_pie_chart(filepath)
elif type == ChartType.STACKED_AREA:
self._generate_stacked_area_chart(filepath)
|
identifier_body
|
main.rs
|
cfg(not(feature = "serde_type"))]
extern crate rustc_serialize;
extern crate mount;
extern crate staticfile;
extern crate reqwest;
extern crate serde_json;
extern crate iron_sessionstorage;
extern crate urlencoded;
use iron::prelude::*;
use iron::headers::ContentType;
use iron::modifiers::Redirect;
use iron::{Url, status};
use hbs::{Template, HandlebarsEngine, DirectorySource};
use rustc_serialize::json::{Json};
use staticfile::Static;
use mount::Mount;
use serde_json::Value;
use iron_sessionstorage::traits::*;
use iron_sessionstorage::SessionStorage;
use iron_sessionstorage::backends::SignedCookieBackend;
use urlencoded::UrlEncodedQuery;
use dotenv::dotenv;
use std::env;
use std::io::Read;
use std::collections::BTreeMap;
use std::path::Path;
use std::collections::HashMap;
static INSTAGRAM_OAUTH_URI: &'static str = "https://api.instagram.com/oauth/authorize/";
static GRANT_TYPE: &'static str = "authorization_code";
fn value_to_json(x: Value) -> Json {
match x {
Value::Number(ref x) if x.is_i64() => Json::I64(x.as_i64().unwrap()),
Value::Number(ref x) if x.is_u64() => Json::U64(x.as_u64().unwrap()),
Value::Number(ref x) if x.is_f64() => Json::F64(x.as_f64().unwrap()),
Value::String(x) => Json::String(x),
Value::Array(x) => Json::Array(x
.into_iter()
.map(|x| value_to_json(x))
.collect::<Vec<Json>>()
),
Value::Object(x) => {
let mut buf = BTreeMap::<String, Json>::new();
for (key, value) in x.into_iter() {
buf.insert(key, value_to_json(value));
}
Json::Object(buf)
},
Value::Bool(x) => Json::Boolean(x),
_ => Json::Null,
}
}
#[derive(Debug)]
struct AccessToken(String);
impl iron_sessionstorage::Value for AccessToken {
fn get_key() -> &'static str { "access_token" }
fn into_raw(self) -> String { self.0 }
fn from_raw(value: String) -> Option<Self> {
Some(AccessToken(value))
}
}
fn main() {
dotenv().ok();
let port = match env::var("PORT") {
Ok(p) => p,
Err(_) => "3000".to_string(),
};
let redirect_url = env::var("REDIRECT_URL").expect("lack of redirect url.");
let client_id = env::var("INSTAGRAM_CLIENT_ID").expect("lack of instagram client id.");
let client_secret = env::var("INSTAGRAM_CLIENT_SECRET").expect("lack of instagram client secret.");
let authorization_uri = format!("{}?client_id={}&redirect_uri={}&response_type=code&scope={}",
INSTAGRAM_OAUTH_URI,
client_id,
redirect_url,
"public_content".to_string());
let router = router!(
index: get "/" => move |req: &mut Request| {
match req.url.clone().query() {
Some(query) => {
let code = query.split("=").last().expect("query parsing is failed").to_string();
let params = [
("client_id", client_id.clone()),
("client_secret", client_secret.clone()),
("grant_type", GRANT_TYPE.clone().to_string()),
("redirect_uri", redirect_url.clone()),
("code", code.to_string())
];
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut result = http_client.post("https://api.instagram.com/oauth/access_token")
.form(¶ms)
.send()
.expect("send Request failed");
let result_json = result.json::<HashMap<String, Value>>().expect("Parse JSON failed");
let data = match result_json.get("access_token") {
Some(at) => {
let access_token = at.as_str().unwrap();
req.session().set(AccessToken(access_token.to_string())).unwrap();
let url = format!("https://api.instagram.com/v1/tags/nofilter/media/recent?access_token={}", access_token);
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.fold(HashMap::<String, Json>::new(), |mut acc, (key, value)| {
acc.insert(key, value_to_json(value));
acc
|
let mut resp = Response::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(Response::with((status::Found, Redirect(
Url::parse(redirect_url.as_str()).expect("parse url failed")
))))
},
None => {
let mut resp = Response::new();
let data = BTreeMap::<String, Json>::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(resp)
},
}
},
oauth: get "/oauth" => move |_: &mut Request| {
Ok(Response::with((status::Found, Redirect(
Url::parse(authorization_uri.as_str()).expect(format!("authorization_uri is invalid => {}", authorization_uri).as_str())
))))
},
api_username: get "/api/username" => move |req: &mut Request| {
let username = match req.url.clone().query() {
Some(query) => query.split("=").last().expect("query parsing is failed"),
_ => ""
}.to_string();
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
if access_token.len() == 0 {
return Ok(Response::with((ContentType::json().0, status::Ok, "{}")))
};
let url = format!("https://api.instagram.com/v1/users/search?q={}&access_token={}", username, access_token.to_string());
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut buffer = String::new();
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.read_to_string(&mut buffer)
.expect("read JSON string failed")
;
Ok(Response::with((ContentType::json().0, status::Ok, buffer)))
},
api_hashtag: get "/api/hashtag" => move |req: &mut Request| {
fn get_query(x: Option<&Vec<String>>) -> &str {
match x {
Some(y) => match y.first() {
Some(z) => z.as_str(),
None => "",
},
None => "",
}
}
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
let (user_id, hashtag) = match req.get_ref::<UrlEncodedQuery>() {
Ok(queries) => (get_query(queries.get("user_id")), get_query(queries.get("hashtag"))),
_ => ("", "")
};
let url = format!(
"https://api.instagram.com/v1/users/{}/media/recent/?access_token={}",
user_id.to_string(),
access_token.to_string()
);
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let response = http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.filter(|x| { (&x.0).as_str() == "data" })
.map(|x| {
match x.1 {
Value::Array(ys) => {
ys
.into_iter()
.filter(|media| {
if let &Value::Object(ref m) = media {
if let &Value::Array(ref tags) = m.get("tags").unwrap() {
tags.contains(&Value::String(hashtag.to_string()))
} else { false }
} else { false }
})
.map(value_to_json)
.collect::<Vec<Json>>()
},
_ => vec![],
}
})
.fold(vec![], |mut acc, mut xs| {
acc.append(&mut xs);
acc
})
;
Ok(Response::with((ContentType::json().0, status::Ok, Json::Array(response).to_string())))
}
);
let mut hbse = HandlebarsEngine::new();
hbse.add(Box::new(DirectorySource::new("./templates/", ".hbs")));
hbse.reload().expect("template can't reload collectory.");
let mut mount = Mount::new();
mount
.mount("/css", Static::new(Path::new("assets/css")))
.mount("/js", Static::new(Path::new("assets/js")))
.mount("/", router);
let mut chain = Chain::new(mount);
|
})
},
None => HashMap::<String, Json>::new(),
};
|
random_line_split
|
main.rs
|
cfg(not(feature = "serde_type"))]
extern crate rustc_serialize;
extern crate mount;
extern crate staticfile;
extern crate reqwest;
extern crate serde_json;
extern crate iron_sessionstorage;
extern crate urlencoded;
use iron::prelude::*;
use iron::headers::ContentType;
use iron::modifiers::Redirect;
use iron::{Url, status};
use hbs::{Template, HandlebarsEngine, DirectorySource};
use rustc_serialize::json::{Json};
use staticfile::Static;
use mount::Mount;
use serde_json::Value;
use iron_sessionstorage::traits::*;
use iron_sessionstorage::SessionStorage;
use iron_sessionstorage::backends::SignedCookieBackend;
use urlencoded::UrlEncodedQuery;
use dotenv::dotenv;
use std::env;
use std::io::Read;
use std::collections::BTreeMap;
use std::path::Path;
use std::collections::HashMap;
static INSTAGRAM_OAUTH_URI: &'static str = "https://api.instagram.com/oauth/authorize/";
static GRANT_TYPE: &'static str = "authorization_code";
fn value_to_json(x: Value) -> Json
|
}
}
#[derive(Debug)]
struct AccessToken(String);
impl iron_sessionstorage::Value for AccessToken {
fn get_key() -> &'static str { "access_token" }
fn into_raw(self) -> String { self.0 }
fn from_raw(value: String) -> Option<Self> {
Some(AccessToken(value))
}
}
fn main() {
dotenv().ok();
let port = match env::var("PORT") {
Ok(p) => p,
Err(_) => "3000".to_string(),
};
let redirect_url = env::var("REDIRECT_URL").expect("lack of redirect url.");
let client_id = env::var("INSTAGRAM_CLIENT_ID").expect("lack of instagram client id.");
let client_secret = env::var("INSTAGRAM_CLIENT_SECRET").expect("lack of instagram client secret.");
let authorization_uri = format!("{}?client_id={}&redirect_uri={}&response_type=code&scope={}",
INSTAGRAM_OAUTH_URI,
client_id,
redirect_url,
"public_content".to_string());
let router = router!(
index: get "/" => move |req: &mut Request| {
match req.url.clone().query() {
Some(query) => {
let code = query.split("=").last().expect("query parsing is failed").to_string();
let params = [
("client_id", client_id.clone()),
("client_secret", client_secret.clone()),
("grant_type", GRANT_TYPE.clone().to_string()),
("redirect_uri", redirect_url.clone()),
("code", code.to_string())
];
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut result = http_client.post("https://api.instagram.com/oauth/access_token")
.form(¶ms)
.send()
.expect("send Request failed");
let result_json = result.json::<HashMap<String, Value>>().expect("Parse JSON failed");
let data = match result_json.get("access_token") {
Some(at) => {
let access_token = at.as_str().unwrap();
req.session().set(AccessToken(access_token.to_string())).unwrap();
let url = format!("https://api.instagram.com/v1/tags/nofilter/media/recent?access_token={}", access_token);
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.fold(HashMap::<String, Json>::new(), |mut acc, (key, value)| {
acc.insert(key, value_to_json(value));
acc
})
},
None => HashMap::<String, Json>::new(),
};
let mut resp = Response::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(Response::with((status::Found, Redirect(
Url::parse(redirect_url.as_str()).expect("parse url failed")
))))
},
None => {
let mut resp = Response::new();
let data = BTreeMap::<String, Json>::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(resp)
},
}
},
oauth: get "/oauth" => move |_: &mut Request| {
Ok(Response::with((status::Found, Redirect(
Url::parse(authorization_uri.as_str()).expect(format!("authorization_uri is invalid => {}", authorization_uri).as_str())
))))
},
api_username: get "/api/username" => move |req: &mut Request| {
let username = match req.url.clone().query() {
Some(query) => query.split("=").last().expect("query parsing is failed"),
_ => ""
}.to_string();
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
if access_token.len() == 0 {
return Ok(Response::with((ContentType::json().0, status::Ok, "{}")))
};
let url = format!("https://api.instagram.com/v1/users/search?q={}&access_token={}", username, access_token.to_string());
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut buffer = String::new();
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.read_to_string(&mut buffer)
.expect("read JSON string failed")
;
Ok(Response::with((ContentType::json().0, status::Ok, buffer)))
},
api_hashtag: get "/api/hashtag" => move |req: &mut Request| {
fn get_query(x: Option<&Vec<String>>) -> &str {
match x {
Some(y) => match y.first() {
Some(z) => z.as_str(),
None => "",
},
None => "",
}
}
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
let (user_id, hashtag) = match req.get_ref::<UrlEncodedQuery>() {
Ok(queries) => (get_query(queries.get("user_id")), get_query(queries.get("hashtag"))),
_ => ("", "")
};
let url = format!(
"https://api.instagram.com/v1/users/{}/media/recent/?access_token={}",
user_id.to_string(),
access_token.to_string()
);
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let response = http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.filter(|x| { (&x.0).as_str() == "data" })
.map(|x| {
match x.1 {
Value::Array(ys) => {
ys
.into_iter()
.filter(|media| {
if let &Value::Object(ref m) = media {
if let &Value::Array(ref tags) = m.get("tags").unwrap() {
tags.contains(&Value::String(hashtag.to_string()))
} else { false }
} else { false }
})
.map(value_to_json)
.collect::<Vec<Json>>()
},
_ => vec![],
}
})
.fold(vec![], |mut acc, mut xs| {
acc.append(&mut xs);
acc
})
;
Ok(Response::with((ContentType::json().0, status::Ok, Json::Array(response).to_string())))
}
);
let mut hbse = HandlebarsEngine::new();
hbse.add(Box::new(DirectorySource::new("./templates/", ".hbs")));
hbse.reload().expect("template can't reload collectory.");
let mut mount = Mount::new();
mount
.mount("/css", Static::new(Path::new("assets/css")))
.mount("/js", Static::new(Path::new("assets/js")))
.mount("/", router);
let mut chain = Chain::new(mount
|
{
match x {
Value::Number(ref x) if x.is_i64() => Json::I64(x.as_i64().unwrap()),
Value::Number(ref x) if x.is_u64() => Json::U64(x.as_u64().unwrap()),
Value::Number(ref x) if x.is_f64() => Json::F64(x.as_f64().unwrap()),
Value::String(x) => Json::String(x),
Value::Array(x) => Json::Array(x
.into_iter()
.map(|x| value_to_json(x))
.collect::<Vec<Json>>()
),
Value::Object(x) => {
let mut buf = BTreeMap::<String, Json>::new();
for (key, value) in x.into_iter() {
buf.insert(key, value_to_json(value));
}
Json::Object(buf)
},
Value::Bool(x) => Json::Boolean(x),
_ => Json::Null,
|
identifier_body
|
main.rs
|
cfg(not(feature = "serde_type"))]
extern crate rustc_serialize;
extern crate mount;
extern crate staticfile;
extern crate reqwest;
extern crate serde_json;
extern crate iron_sessionstorage;
extern crate urlencoded;
use iron::prelude::*;
use iron::headers::ContentType;
use iron::modifiers::Redirect;
use iron::{Url, status};
use hbs::{Template, HandlebarsEngine, DirectorySource};
use rustc_serialize::json::{Json};
use staticfile::Static;
use mount::Mount;
use serde_json::Value;
use iron_sessionstorage::traits::*;
use iron_sessionstorage::SessionStorage;
use iron_sessionstorage::backends::SignedCookieBackend;
use urlencoded::UrlEncodedQuery;
use dotenv::dotenv;
use std::env;
use std::io::Read;
use std::collections::BTreeMap;
use std::path::Path;
use std::collections::HashMap;
static INSTAGRAM_OAUTH_URI: &'static str = "https://api.instagram.com/oauth/authorize/";
static GRANT_TYPE: &'static str = "authorization_code";
fn value_to_json(x: Value) -> Json {
match x {
Value::Number(ref x) if x.is_i64() => Json::I64(x.as_i64().unwrap()),
Value::Number(ref x) if x.is_u64() => Json::U64(x.as_u64().unwrap()),
Value::Number(ref x) if x.is_f64() => Json::F64(x.as_f64().unwrap()),
Value::String(x) => Json::String(x),
Value::Array(x) => Json::Array(x
.into_iter()
.map(|x| value_to_json(x))
.collect::<Vec<Json>>()
),
Value::Object(x) => {
let mut buf = BTreeMap::<String, Json>::new();
for (key, value) in x.into_iter() {
buf.insert(key, value_to_json(value));
}
Json::Object(buf)
},
Value::Bool(x) => Json::Boolean(x),
_ => Json::Null,
}
}
#[derive(Debug)]
struct
|
(String);
impl iron_sessionstorage::Value for AccessToken {
fn get_key() -> &'static str { "access_token" }
fn into_raw(self) -> String { self.0 }
fn from_raw(value: String) -> Option<Self> {
Some(AccessToken(value))
}
}
fn main() {
dotenv().ok();
let port = match env::var("PORT") {
Ok(p) => p,
Err(_) => "3000".to_string(),
};
let redirect_url = env::var("REDIRECT_URL").expect("lack of redirect url.");
let client_id = env::var("INSTAGRAM_CLIENT_ID").expect("lack of instagram client id.");
let client_secret = env::var("INSTAGRAM_CLIENT_SECRET").expect("lack of instagram client secret.");
let authorization_uri = format!("{}?client_id={}&redirect_uri={}&response_type=code&scope={}",
INSTAGRAM_OAUTH_URI,
client_id,
redirect_url,
"public_content".to_string());
let router = router!(
index: get "/" => move |req: &mut Request| {
match req.url.clone().query() {
Some(query) => {
let code = query.split("=").last().expect("query parsing is failed").to_string();
let params = [
("client_id", client_id.clone()),
("client_secret", client_secret.clone()),
("grant_type", GRANT_TYPE.clone().to_string()),
("redirect_uri", redirect_url.clone()),
("code", code.to_string())
];
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut result = http_client.post("https://api.instagram.com/oauth/access_token")
.form(¶ms)
.send()
.expect("send Request failed");
let result_json = result.json::<HashMap<String, Value>>().expect("Parse JSON failed");
let data = match result_json.get("access_token") {
Some(at) => {
let access_token = at.as_str().unwrap();
req.session().set(AccessToken(access_token.to_string())).unwrap();
let url = format!("https://api.instagram.com/v1/tags/nofilter/media/recent?access_token={}", access_token);
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.fold(HashMap::<String, Json>::new(), |mut acc, (key, value)| {
acc.insert(key, value_to_json(value));
acc
})
},
None => HashMap::<String, Json>::new(),
};
let mut resp = Response::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(Response::with((status::Found, Redirect(
Url::parse(redirect_url.as_str()).expect("parse url failed")
))))
},
None => {
let mut resp = Response::new();
let data = BTreeMap::<String, Json>::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(resp)
},
}
},
oauth: get "/oauth" => move |_: &mut Request| {
Ok(Response::with((status::Found, Redirect(
Url::parse(authorization_uri.as_str()).expect(format!("authorization_uri is invalid => {}", authorization_uri).as_str())
))))
},
api_username: get "/api/username" => move |req: &mut Request| {
let username = match req.url.clone().query() {
Some(query) => query.split("=").last().expect("query parsing is failed"),
_ => ""
}.to_string();
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
if access_token.len() == 0 {
return Ok(Response::with((ContentType::json().0, status::Ok, "{}")))
};
let url = format!("https://api.instagram.com/v1/users/search?q={}&access_token={}", username, access_token.to_string());
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut buffer = String::new();
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.read_to_string(&mut buffer)
.expect("read JSON string failed")
;
Ok(Response::with((ContentType::json().0, status::Ok, buffer)))
},
api_hashtag: get "/api/hashtag" => move |req: &mut Request| {
fn get_query(x: Option<&Vec<String>>) -> &str {
match x {
Some(y) => match y.first() {
Some(z) => z.as_str(),
None => "",
},
None => "",
}
}
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
let (user_id, hashtag) = match req.get_ref::<UrlEncodedQuery>() {
Ok(queries) => (get_query(queries.get("user_id")), get_query(queries.get("hashtag"))),
_ => ("", "")
};
let url = format!(
"https://api.instagram.com/v1/users/{}/media/recent/?access_token={}",
user_id.to_string(),
access_token.to_string()
);
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let response = http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.filter(|x| { (&x.0).as_str() == "data" })
.map(|x| {
match x.1 {
Value::Array(ys) => {
ys
.into_iter()
.filter(|media| {
if let &Value::Object(ref m) = media {
if let &Value::Array(ref tags) = m.get("tags").unwrap() {
tags.contains(&Value::String(hashtag.to_string()))
} else { false }
} else { false }
})
.map(value_to_json)
.collect::<Vec<Json>>()
},
_ => vec![],
}
})
.fold(vec![], |mut acc, mut xs| {
acc.append(&mut xs);
acc
})
;
Ok(Response::with((ContentType::json().0, status::Ok, Json::Array(response).to_string())))
}
);
let mut hbse = HandlebarsEngine::new();
hbse.add(Box::new(DirectorySource::new("./templates/", ".hbs")));
hbse.reload().expect("template can't reload collectory.");
let mut mount = Mount::new();
mount
.mount("/css", Static::new(Path::new("assets/css")))
.mount("/js", Static::new(Path::new("assets/js")))
.mount("/", router);
let mut chain = Chain::new(mount
|
AccessToken
|
identifier_name
|
MySVN.py
|
(message, raise_exception = True):
"""
Display error message, then terminate.
"""
print "Error:", message
print
if raise_exception:
raise ExternalCommandFailed
else:
sys.exit(1)
# Windows compatibility code by Bill Baxter
if os.name == "nt":
def find_program(name):
"""
Find the name of the program for Popen.
Windows is finnicky about having the complete file name. Popen
won't search the %PATH% for you automatically.
(Adapted from ctypes.find_library)
"""
# See MSDN for the REAL search order.
base, ext = os.path.splitext(name)
if ext:
exts = [ext]
else:
exts = ['.bat', '.exe']
for directory in os.environ['PATH'].split(os.pathsep):
for e in exts:
fname = os.path.join(directory, base + e)
if os.path.exists(fname):
return fname
return None
else:
def find_program(name):
"""
Find the name of the program for Popen.
On Unix, popen isn't picky about having absolute paths.
"""
return name
def shell_quote(s):
if os.name == "nt":
q = '"'
else:
q = "'"
return q + s.replace('\\', '\\\\').replace("'", "'\"'\"'") + q
locale_encoding = locale.getpreferredencoding()
def run_svn(args, fail_if_stderr=False, encoding="utf-8"):
"""
Run svn cmd in PIPE
exit if svn cmd failed
"""
def _transform_arg(a):
if isinstance(a, unicode):
a = a.encode(encoding or locale_encoding)
elif not isinstance(a, str):
a = str(a)
return a
t_args = map(_transform_arg, args)
cmd = find_program("svn")
cmd_string = str(" ".join(map(shell_quote, [cmd] + t_args)))
print "*", cmd_string
pipe = Popen([cmd] + t_args, executable=cmd, stdout=PIPE, stderr=PIPE)
out, err = pipe.communicate()
if pipe.returncode != 0 or (fail_if_stderr and err.strip()):
display_error("External program failed (return code %d): %s\n%s"
% (pipe.returncode, cmd_string, err))
return out
def svn_date_to_timestamp(svn_date):
"""
Parse an SVN date as read from the XML output and
return the corresponding timestamp.
"""
# Strip microseconds and timezone (always UTC, hopefully)
# XXX there are various ISO datetime parsing routines out there,
# cf. http://seehuhn.de/comp/pdate
date = svn_date.split('.', 2)[0]
time_tuple = time.strptime(date, "%Y-%m-%dT%H:%M:%S")
return calendar.timegm(time_tuple)
def parse_svn_info_xml(xml_string):
"""
Parse the XML output from an "svn info" command and extract
useful information as a dict.
"""
d = {}
tree = ET.fromstring(xml_string)
entry = tree.find('.//entry')
if entry:
d['url'] = entry.find('url').text
d['revision'] = int(entry.get('revision'))
d['repos_url'] = tree.find('.//repository/root').text
d['last_changed_rev'] = int(tree.find('.//commit').get('revision'))
d['kind'] = entry.get('kind')
return d
def parse_svn_log_xml(xml_string):
"""
Parse the XML output from an "svn log" command and extract
useful information as a list of dicts (one per log changeset).
"""
l = []
tree = ET.fromstring(xml_string)
for entry in tree.findall('logentry'):
d = {}
d['revision'] = int(entry.get('revision'))
# Some revisions don't have authors, most notably
# the first revision in a repository.
author = entry.find('author')
d['author'] = author is not None and author.text or None
d['date'] = svn_date_to_timestamp(entry.find('date').text)
# Some revisions may have empty commit message
message = entry.find('msg')
message = message is not None and message.text is not None \
and message.text.strip() or ""
# Replace DOS return '\r\n' and MacOS return '\r' with unix return '\n'
d['message'] = message.replace('\r\n', '\n').replace('\n\r', '\n'). \
replace('\r', '\n')
paths = d['changed_paths'] = []
for path in entry.findall('.//path'):
copyfrom_rev = path.get('copyfrom-rev')
if copyfrom_rev:
copyfrom_rev = int(copyfrom_rev)
paths.append({
'path': path.text,
'action': path.get('action'),
'copyfrom_path': path.get('copyfrom-path'),
'copyfrom_revision': copyfrom_rev,
})
l.append(d)
return l
def parse_svn_status_xml(xml_string, base_dir=None):
"""
Parse the XML output from an "svn status" command and extract
useful info as a list of dicts (one per status entry).
"""
l = []
tree = ET.fromstring(xml_string)
for entry in tree.findall('.//entry'):
d = {}
path = entry.get('path')
if base_dir is not None:
assert path.startswith(base_dir)
path = path[len(base_dir):].lstrip('/\\')
d['path'] = path
wc_status = entry.find('wc-status')
if wc_status.get('item') == 'external':
d['type'] = 'external'
elif wc_status.get('revision') is not None:
d['type'] = 'normal'
else:
d['type'] = 'unversioned'
l.append(d)
return l
def get_svn_info(svn_url_or_wc, rev_number=None):
"""
Get SVN information for the given URL or working copy,
with an optionally specified revision number.
Returns a dict as created by parse_svn_info_xml().
"""
if rev_number is not None:
args = [svn_url_or_wc + "@" + str(rev_number)]
else:
args = [svn_url_or_wc]
xml_string = run_svn(svn_info_args + args,
fail_if_stderr=True)
return parse_svn_info_xml(xml_string)
def svn_checkout(svn_url, checkout_dir, rev_number=None):
"""
Checkout the given URL at an optional revision number.
"""
args = []
if rev_number is not None:
args += ['-r', rev_number]
args += [svn_url, checkout_dir]
return run_svn(svn_checkout_args + args)
def run_svn_log(svn_url_or_wc, rev_start, rev_end, limit, stop_on_copy=False):
"""
Fetch up to 'limit' SVN log entries between the given revisions.
"""
if stop_on_copy:
args = ['--stop-on-copy']
else:
args = []
args += ['-r', '%s:%s' % (rev_start, rev_end), '--limit',
str(limit), svn_url_or_wc]
xml_string = run_svn(svn_log_args + args)
return parse_svn_log_xml(xml_string)
def get_svn_status(svn_wc):
"""
Get SVN status information about the given working copy.
"""
# Ensure proper stripping by canonicalizing the path
svn_wc = os.path.abspath(svn_wc)
args = [svn_wc]
xml_string = run_svn(svn_status_args + args)
return parse_svn_status_xml(xml_string, svn_wc)
def get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=False):
"""
Get the first SVN log entry in the requested revision range.
"""
entries = run_svn_log(svn_url, rev_start, rev_end, 1, stop_on_copy)
if not entries:
display_error("No SVN log for %s between revisions %s and %s" %
(svn_url, rev_start, rev_end))
return entries[0]
def get_first_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the first log entry after/at the given revision number in an SVN branch.
By default the revision number is set to 0, which will give you the log
entry corresponding to the branch creaction.
NOTE: to know whether the branch creation corresponds to an SVN import or
a copy from another branch, inspect elements of the 'changed_paths' entry
in the returned dictionary.
"""
return get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=True)
def get_last_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the last log entry before/at the given revision number in an SVN branch.
By default the revision number is set to HEAD, which will give you the log
entry corresponding to the latest commit in branch.
"""
return get_one_svn_log_entry(svn_url, rev_end, rev_start, stop_on_copy=True)
log_duration_threshold = 10.0
log_min_chunk_length = 10
def iter_svn_log_entries(svn_url, first
|
display_error
|
identifier_name
|
|
MySVN.py
|
< 100:
commit_paths.append(p)
# Detect special cases
old_p = d['copyfrom_path']
if old_p and old_p.startswith(svn_path + "/"):
old_p = old_p[len(svn_path):].strip("/")
# Both paths can be identical if copied from an old rev.
# We treat like it a normal change.
if old_p != p:
if not os.path.exists(p + os.sep + '.svn'):
svn_add_dir(os.path.dirname(p))
run_svn(["up", old_p])
run_svn(["copy", old_p, p])
if os.path.isfile(p):
shutil.copy(original_wc + os.sep + p, p)
if action == 'R':
removed_paths.append(old_p)
if len (commit_paths) < 100:
commit_paths.append(old_p)
continue
if action == 'A':
if os.path.isdir(original_wc + os.sep + p):
svn_add_dir(p)
else:
p_path = os.path.dirname(p).strip() or '.'
svn_add_dir(p_path)
shutil.copy(original_wc + os.sep + p, p)
run_svn(["add", p])
elif action == 'D':
removed_paths.append(p)
else: # action == 'M'
merged_paths.append(p)
if removed_paths:
for r in removed_paths:
run_svn(["up", r])
run_svn(["remove", "--force", r])
if merged_paths:
for m in merged_paths:
run_svn(["up", m])
m_url = svn_url + "/" + m
out = run_svn(["merge", "-c", str(svn_rev), "--non-recursive",
m_url+"@"+str(svn_rev), m])
# if conflicts, use the copy from original_wc
if out and out.split()[0] == 'C':
print "\n### Conflicts ignored: %s, in revision: %s\n" \
% (m, svn_rev)
run_svn(["revert", "--recursive", m])
if os.path.isfile(m):
shutil.copy(original_wc + os.sep + m, m)
if unrelated_paths:
print "Unrelated paths: "
print "*", unrelated_paths
## too many files
if len (commit_paths) > 99:
commit_paths = []
try:
commit_from_svn_log_entry(log_entry, commit_paths,
keep_author=keep_author)
except ExternalCommandFailed:
# try to ignore the Properties conflicts on files and dirs
# use the copy from original_wc
has_Conflict = False
for d in log_entry['changed_paths']:
p = d['path']
p = p[len(svn_path):].strip("/")
if os.path.isfile(p):
if os.path.isfile(p + ".prej"):
has_Conflict = True
shutil.copy(original_wc + os.sep + p, p)
p2=os.sep + p.replace('_', '__').replace('/', '_') \
+ ".prej-" + str(svn_rev)
shutil.move(p + ".prej", os.path.dirname(original_wc) + p2)
w="\n### Properties conflicts ignored:"
print "%s %s, in revision: %s\n" % (w, p, svn_rev)
elif os.path.isdir(p):
if os.path.isfile(p + os.sep + "dir_conflicts.prej"):
has_Conflict = True
p2=os.sep + p.replace('_', '__').replace('/', '_') \
+ "_dir__conflicts.prej-" + str(svn_rev)
shutil.move(p + os.sep + "dir_conflicts.prej",
os.path.dirname(original_wc) + p2)
w="\n### Properties conflicts ignored:"
print "%s %s, in revision: %s\n" % (w, p, svn_rev)
out = run_svn(["propget", "svn:ignore",
original_wc + os.sep + p])
if out:
run_svn(["propset", "svn:ignore", out.strip(), p])
out = run_svn(["propget", "svn:externel",
original_wc + os.sep + p])
if out:
run_svn(["propset", "svn:external", out.strip(), p])
# try again
if has_Conflict:
commit_from_svn_log_entry(log_entry, commit_paths,
keep_author=keep_author)
else:
raise ExternalCommandFailed
def main():
usage = "Usage: %prog [-a] [-c] [-r SVN rev] <Source SVN URL> <Target SVN URL>"
parser = OptionParser(usage)
parser.add_option("-a", "--keep-author", action="store_true",
dest="keep_author", help="Keep revision Author or not")
parser.add_option("-c", "--continue-from-break", action="store_true",
dest="cont_from_break",
help="Continue from previous break")
parser.add_option("-r", "--svn-rev", type="int", dest="svn_rev",
help="SVN revision to checkout from")
(options, args) = parser.parse_args()
if len(args) != 2:
display_error("incorrect number of arguments\n\nTry: svn2svn.py --help",
False)
source_url = args.pop(0).rstrip("/")
target_url = args.pop(0).rstrip("/")
if options.keep_author:
keep_author = True
else:
keep_author = False
# Find the greatest_rev
# don't use 'svn info' to get greatest_rev, it doesn't work sometimes
svn_log = get_one_svn_log_entry(source_url, "HEAD", "HEAD")
greatest_rev = svn_log['revision']
original_wc = "_original_wc"
dup_wc = "_dup_wc"
## old working copy does not exist, disable continue mode
if not os.path.exists(dup_wc):
options.cont_from_break = False
if not options.cont_from_break:
# Warn if Target SVN URL existed
cmd = find_program("svn")
pipe = Popen([cmd] + ["list"] + [target_url], executable=cmd,
stdout=PIPE, stderr=PIPE)
out, err = pipe.communicate()
if pipe.returncode == 0:
print "Target SVN URL: %s existed!" % target_url
if out:
print out
print "Press 'Enter' to Continue, 'Ctrl + C' to Cancel..."
print "(Timeout in 5 seconds)"
rfds, wfds, efds = select.select([sys.stdin], [], [], 5)
# Get log entry for the SVN revision we will check out
if options.svn_rev:
# If specify a rev, get log entry just before or at rev
svn_start_log = get_last_svn_log_entry(source_url, 1,
options.svn_rev)
else:
# Otherwise, get log entry of branch creation
svn_start_log = get_first_svn_log_entry(source_url, 1,
greatest_rev)
# This is the revision we will checkout from
svn_rev = svn_start_log['revision']
# Check out first revision (changeset) from Source SVN URL
if os.path.exists(original_wc):
shutil.rmtree(original_wc)
svn_checkout(source_url, original_wc, svn_rev)
# Import first revision (changeset) into Target SVN URL
timestamp = int(svn_start_log['date'])
svn_date = str(datetime.fromtimestamp(timestamp))
if keep_author:
run_svn(["import", original_wc, target_url, "-m",
svn_start_log['message'] + "\nDate: " + svn_date,
"--username", svn_start_log['author']])
else:
run_svn(["import", original_wc, target_url, "-m",
svn_start_log['message'] + "\nDate: " + svn_date +
"\nAuthor: " + svn_start_log['author']])
# Check out a working copy
if os.path.exists(dup_wc):
shutil.rmtree(dup_wc)
svn_checkout(target_url, dup_wc)
original_wc = os.path.abspath(original_wc)
dup_wc = os.path.abspath(dup_wc)
os.chdir(dup_wc)
# Get SVN info
svn_info = get_svn_info(original_wc)
# e.g. u'svn://svn.twistedmatrix.com/svn/Twisted'
repos_url = svn_info['repos_url']
# e.g. u'svn://svn.twistedmatrix.com/svn/Twisted/branches/xmpp'
svn_url = svn_info['url']
assert svn_url.startswith(repos_url)
# e.g. u'/branches/xmpp'
svn_path = svn_url[len(repos_url):]
# e.g. 'xmpp'
svn_branch = svn_url.split("/")[-1]
if options.cont_from_break:
svn_rev = svn_info['revision'] - 1
if svn_rev < 1:
svn_rev = 1
# Load SVN log starting from svn_rev + 1
it_log_entries = iter_svn_log_entries(svn_url, svn_rev + 1, greatest_rev)
try:
for log_entry in it_log_entries:
|
pull_svn_rev(log_entry, svn_url, target_url, svn_path,
original_wc, keep_author)
|
conditional_block
|
|
MySVN.py
|
locale_encoding = locale.getpreferredencoding()
def run_svn(args, fail_if_stderr=False, encoding="utf-8"):
"""
Run svn cmd in PIPE
exit if svn cmd failed
"""
def _transform_arg(a):
if isinstance(a, unicode):
a = a.encode(encoding or locale_encoding)
elif not isinstance(a, str):
a = str(a)
return a
t_args = map(_transform_arg, args)
cmd = find_program("svn")
cmd_string = str(" ".join(map(shell_quote, [cmd] + t_args)))
print "*", cmd_string
pipe = Popen([cmd] + t_args, executable=cmd, stdout=PIPE, stderr=PIPE)
out, err = pipe.communicate()
if pipe.returncode != 0 or (fail_if_stderr and err.strip()):
display_error("External program failed (return code %d): %s\n%s"
% (pipe.returncode, cmd_string, err))
return out
def svn_date_to_timestamp(svn_date):
"""
Parse an SVN date as read from the XML output and
return the corresponding timestamp.
"""
# Strip microseconds and timezone (always UTC, hopefully)
# XXX there are various ISO datetime parsing routines out there,
# cf. http://seehuhn.de/comp/pdate
date = svn_date.split('.', 2)[0]
time_tuple = time.strptime(date, "%Y-%m-%dT%H:%M:%S")
return calendar.timegm(time_tuple)
def parse_svn_info_xml(xml_string):
"""
Parse the XML output from an "svn info" command and extract
useful information as a dict.
"""
d = {}
tree = ET.fromstring(xml_string)
entry = tree.find('.//entry')
if entry:
d['url'] = entry.find('url').text
d['revision'] = int(entry.get('revision'))
d['repos_url'] = tree.find('.//repository/root').text
d['last_changed_rev'] = int(tree.find('.//commit').get('revision'))
d['kind'] = entry.get('kind')
return d
def parse_svn_log_xml(xml_string):
"""
Parse the XML output from an "svn log" command and extract
useful information as a list of dicts (one per log changeset).
"""
l = []
tree = ET.fromstring(xml_string)
for entry in tree.findall('logentry'):
d = {}
d['revision'] = int(entry.get('revision'))
# Some revisions don't have authors, most notably
# the first revision in a repository.
author = entry.find('author')
d['author'] = author is not None and author.text or None
d['date'] = svn_date_to_timestamp(entry.find('date').text)
# Some revisions may have empty commit message
message = entry.find('msg')
message = message is not None and message.text is not None \
and message.text.strip() or ""
# Replace DOS return '\r\n' and MacOS return '\r' with unix return '\n'
d['message'] = message.replace('\r\n', '\n').replace('\n\r', '\n'). \
replace('\r', '\n')
paths = d['changed_paths'] = []
for path in entry.findall('.//path'):
copyfrom_rev = path.get('copyfrom-rev')
if copyfrom_rev:
copyfrom_rev = int(copyfrom_rev)
paths.append({
'path': path.text,
'action': path.get('action'),
'copyfrom_path': path.get('copyfrom-path'),
'copyfrom_revision': copyfrom_rev,
})
l.append(d)
return l
def parse_svn_status_xml(xml_string, base_dir=None):
"""
Parse the XML output from an "svn status" command and extract
useful info as a list of dicts (one per status entry).
"""
l = []
tree = ET.fromstring(xml_string)
for entry in tree.findall('.//entry'):
d = {}
path = entry.get('path')
if base_dir is not None:
assert path.startswith(base_dir)
path = path[len(base_dir):].lstrip('/\\')
d['path'] = path
wc_status = entry.find('wc-status')
if wc_status.get('item') == 'external':
d['type'] = 'external'
elif wc_status.get('revision') is not None:
d['type'] = 'normal'
else:
d['type'] = 'unversioned'
l.append(d)
return l
def get_svn_info(svn_url_or_wc, rev_number=None):
"""
Get SVN information for the given URL or working copy,
with an optionally specified revision number.
Returns a dict as created by parse_svn_info_xml().
"""
if rev_number is not None:
args = [svn_url_or_wc + "@" + str(rev_number)]
else:
args = [svn_url_or_wc]
xml_string = run_svn(svn_info_args + args,
fail_if_stderr=True)
return parse_svn_info_xml(xml_string)
def svn_checkout(svn_url, checkout_dir, rev_number=None):
"""
Checkout the given URL at an optional revision number.
"""
args = []
if rev_number is not None:
args += ['-r', rev_number]
args += [svn_url, checkout_dir]
return run_svn(svn_checkout_args + args)
def run_svn_log(svn_url_or_wc, rev_start, rev_end, limit, stop_on_copy=False):
"""
Fetch up to 'limit' SVN log entries between the given revisions.
"""
if stop_on_copy:
args = ['--stop-on-copy']
else:
args = []
args += ['-r', '%s:%s' % (rev_start, rev_end), '--limit',
str(limit), svn_url_or_wc]
xml_string = run_svn(svn_log_args + args)
return parse_svn_log_xml(xml_string)
def get_svn_status(svn_wc):
"""
Get SVN status information about the given working copy.
"""
# Ensure proper stripping by canonicalizing the path
svn_wc = os.path.abspath(svn_wc)
args = [svn_wc]
xml_string = run_svn(svn_status_args + args)
return parse_svn_status_xml(xml_string, svn_wc)
def get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=False):
"""
Get the first SVN log entry in the requested revision range.
"""
entries = run_svn_log(svn_url, rev_start, rev_end, 1, stop_on_copy)
if not entries:
display_error("No SVN log for %s between revisions %s and %s" %
(svn_url, rev_start, rev_end))
return entries[0]
def get_first_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the first log entry after/at the given revision number in an SVN branch.
By default the revision number is set to 0, which will give you the log
entry corresponding to the branch creaction.
NOTE: to know whether the branch creation corresponds to an SVN import or
a copy from another branch, inspect elements of the 'changed_paths' entry
in the returned dictionary.
"""
return get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=True)
def get_last_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the last log entry before/at the given revision number in an SVN branch.
By default the revision number is set to HEAD, which will give you the log
entry corresponding to the latest commit in branch.
"""
return get_one_svn_log_entry(svn_url, rev_end, rev_start, stop_on_copy=True)
log_duration_threshold = 10.0
log_min_chunk_length = 10
def iter_svn_log_entries(svn_url, first_rev, last_rev):
"""
Iterate over SVN log entries between first_rev and last_rev.
This function features chunked log fetching so that it isn't too nasty
to the SVN server if many entries are requested.
"""
cur_rev = first_rev
chunk_length = log_min_chunk_length
chunk_interval_factor = 1.0
while last_rev == "HEAD" or cur_rev <= last_rev:
start_t = time.time()
stop_rev = min(last_rev, cur_rev + int(chunk_length * chunk_interval_factor))
entries = run_svn_log(svn_url, cur_rev, stop_rev, chunk_length)
duration = time.time() - start_t
if not entries:
if stop_rev == last_rev:
break
cur_rev = stop_rev + 1
chunk_interval_factor *= 2.0
continue
for e in entries:
yield e
cur_rev = e['revision'] + 1
# Adapt chunk length based on measured request duration
if duration < log_duration_threshold:
chunk_length = int(chunk_length * 2.0)
elif duration > log_duration_threshold * 2:
chunk_length = max(log_min_chunk_length, int(chunk_length / 2
|
if os.name == "nt":
q = '"'
else:
q = "'"
return q + s.replace('\\', '\\\\').replace("'", "'\"'\"'") + q
|
identifier_body
|
|
MySVN.py
|
vn_url_or_wc, rev_number=None):
"""
Get SVN information for the given URL or working copy,
with an optionally specified revision number.
Returns a dict as created by parse_svn_info_xml().
"""
if rev_number is not None:
args = [svn_url_or_wc + "@" + str(rev_number)]
else:
args = [svn_url_or_wc]
xml_string = run_svn(svn_info_args + args,
fail_if_stderr=True)
return parse_svn_info_xml(xml_string)
def svn_checkout(svn_url, checkout_dir, rev_number=None):
"""
Checkout the given URL at an optional revision number.
"""
args = []
if rev_number is not None:
args += ['-r', rev_number]
args += [svn_url, checkout_dir]
return run_svn(svn_checkout_args + args)
def run_svn_log(svn_url_or_wc, rev_start, rev_end, limit, stop_on_copy=False):
"""
Fetch up to 'limit' SVN log entries between the given revisions.
"""
if stop_on_copy:
args = ['--stop-on-copy']
else:
args = []
args += ['-r', '%s:%s' % (rev_start, rev_end), '--limit',
str(limit), svn_url_or_wc]
xml_string = run_svn(svn_log_args + args)
return parse_svn_log_xml(xml_string)
def get_svn_status(svn_wc):
"""
Get SVN status information about the given working copy.
"""
# Ensure proper stripping by canonicalizing the path
svn_wc = os.path.abspath(svn_wc)
args = [svn_wc]
xml_string = run_svn(svn_status_args + args)
return parse_svn_status_xml(xml_string, svn_wc)
def get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=False):
"""
Get the first SVN log entry in the requested revision range.
"""
entries = run_svn_log(svn_url, rev_start, rev_end, 1, stop_on_copy)
if not entries:
display_error("No SVN log for %s between revisions %s and %s" %
(svn_url, rev_start, rev_end))
return entries[0]
def get_first_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the first log entry after/at the given revision number in an SVN branch.
By default the revision number is set to 0, which will give you the log
entry corresponding to the branch creaction.
NOTE: to know whether the branch creation corresponds to an SVN import or
a copy from another branch, inspect elements of the 'changed_paths' entry
in the returned dictionary.
"""
return get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=True)
def get_last_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the last log entry before/at the given revision number in an SVN branch.
By default the revision number is set to HEAD, which will give you the log
entry corresponding to the latest commit in branch.
"""
return get_one_svn_log_entry(svn_url, rev_end, rev_start, stop_on_copy=True)
log_duration_threshold = 10.0
log_min_chunk_length = 10
def iter_svn_log_entries(svn_url, first_rev, last_rev):
"""
Iterate over SVN log entries between first_rev and last_rev.
This function features chunked log fetching so that it isn't too nasty
to the SVN server if many entries are requested.
"""
cur_rev = first_rev
chunk_length = log_min_chunk_length
chunk_interval_factor = 1.0
while last_rev == "HEAD" or cur_rev <= last_rev:
start_t = time.time()
stop_rev = min(last_rev, cur_rev + int(chunk_length * chunk_interval_factor))
entries = run_svn_log(svn_url, cur_rev, stop_rev, chunk_length)
duration = time.time() - start_t
if not entries:
if stop_rev == last_rev:
break
cur_rev = stop_rev + 1
chunk_interval_factor *= 2.0
continue
for e in entries:
yield e
cur_rev = e['revision'] + 1
# Adapt chunk length based on measured request duration
if duration < log_duration_threshold:
chunk_length = int(chunk_length * 2.0)
elif duration > log_duration_threshold * 2:
chunk_length = max(log_min_chunk_length, int(chunk_length / 2.0))
def commit_from_svn_log_entry(entry, files=None, keep_author=False):
"""
Given an SVN log entry and an optional sequence of files, do an svn commit.
"""
# This will use the local timezone for displaying commit times
timestamp = int(entry['date'])
svn_date = str(datetime.fromtimestamp(timestamp))
# Uncomment this one one if you prefer UTC commit times
#svn_date = "%d 0" % timestamp
if keep_author:
options = ["ci", "--force-log", "-m", entry['message'] + "\nDate: " + svn_date, "--username", entry['author']]
else:
options = ["ci", "--force-log", "-m", entry['message'] + "\nDate: " + svn_date + "\nAuthor: " + entry['author']]
if files:
options += list(files)
run_svn(options)
def svn_add_dir(p):
# set p = "." when p = ""
#p = p.strip() or "."
if p.strip() and not os.path.exists(p + os.sep + ".svn"):
svn_add_dir(os.path.dirname(p))
if not os.path.exists(p):
os.makedirs(p)
run_svn(["add", p])
def pull_svn_rev(log_entry, svn_url, target_url, svn_path, original_wc, keep_author=False):
"""
Pull SVN changes from the given log entry.
Returns the new SVN revision.
If an exception occurs, it will rollback to revision 'svn_rev - 1'.
"""
svn_rev = log_entry['revision']
run_svn(["up", "--ignore-externals", "-r", svn_rev, original_wc])
removed_paths = []
merged_paths = []
unrelated_paths = []
commit_paths = []
for d in log_entry['changed_paths']:
# e.g. u'/branches/xmpp/twisted/words/test/test.py'
p = d['path']
if not p.startswith(svn_path + "/"):
# Ignore changed files that are not part of this subdir
if p != svn_path:
unrelated_paths.append(p)
continue
# e.g. u'twisted/words/test/test.py'
p = p[len(svn_path):].strip("/")
# Record for commit
action = d['action']
if action not in 'MARD':
display_error("In SVN rev. %d: action '%s' not supported. \
Please report a bug!" % (svn_rev, action))
if len (commit_paths) < 100:
commit_paths.append(p)
# Detect special cases
old_p = d['copyfrom_path']
if old_p and old_p.startswith(svn_path + "/"):
old_p = old_p[len(svn_path):].strip("/")
# Both paths can be identical if copied from an old rev.
# We treat like it a normal change.
if old_p != p:
if not os.path.exists(p + os.sep + '.svn'):
svn_add_dir(os.path.dirname(p))
run_svn(["up", old_p])
run_svn(["copy", old_p, p])
if os.path.isfile(p):
shutil.copy(original_wc + os.sep + p, p)
if action == 'R':
removed_paths.append(old_p)
if len (commit_paths) < 100:
commit_paths.append(old_p)
continue
if action == 'A':
if os.path.isdir(original_wc + os.sep + p):
svn_add_dir(p)
else:
p_path = os.path.dirname(p).strip() or '.'
svn_add_dir(p_path)
shutil.copy(original_wc + os.sep + p, p)
run_svn(["add", p])
elif action == 'D':
removed_paths.append(p)
else: # action == 'M'
merged_paths.append(p)
if removed_paths:
for r in removed_paths:
run_svn(["up", r])
run_svn(["remove", "--force", r])
if merged_paths:
for m in merged_paths:
run_svn(["up", m])
m_url = svn_url + "/" + m
out = run_svn(["merge", "-c", str(svn_rev), "--non-recursive",
m_url+"@"+str(svn_rev), m])
# if conflicts, use the copy from original_wc
if out and out.split()[0] == 'C':
print "\n### Conflicts ignored: %s, in revision: %s\n" \
% (m, svn_rev)
run_svn(["revert", "--recursive", m])
if os.path.isfile(m):
shutil.copy(original_wc + os.sep + m, m)
if unrelated_paths:
print "Unrelated paths: "
|
random_line_split
|
||
base.js
|
AlertAge:path+"/web/healthAlert/getAlertAge",// 获得预警年龄列表
riskNewHealthAlert:path+"/web/healthAlert/newHealthAlert",// 新增风险预警
riskGetHealthAlert:path+"/web/healthAlert/getHealthAlert",// 获取单条健康预警
riskUpdateHealthAlert:path+"/web/healthAlert/updateHealthAlert",// 更改健康预警
riskDeleteHealthAlert:path+"/web/healthAlert/deleteHealthAlert",// 删除健康预警
// 健康信息
healthGetExamDateList:path+"/web/healthInfo/getExamDateList",// 根据班级获得检查日期列表
healthGetClassHealthInfo:path+"/web/healthInfo/getClassHealthInfo",// 获得班级健康信息
healthGetChildListOfClass:path+"/web/healthInfo/getChildListOfClass",// 获得班级幼儿列表
healthGetBirthdaySex:path+"/web/healthInfo/getBirthdaySex",// 获得幼儿生日及性别
healthCalculateAge:path+"/web/healthInfo/calculateAge",// 根据生日,体检日期,计算年龄
healthHPValue:path+"/web/healthInfo/HPValue",// 计算身高p值
healthWPValue:path+"/web/healthInfo/WPValue",// 计算体重p值
healthFatnessValue:path+"/web/healthInfo/FatnessValue",// 计算肥胖值
healthNewHealthInfo:path+"/web/healthInfo/newHealthInfo",// 新增健康信息
healthGetSingleHI:path+"/web/healthInfo/getSingleHI",// 获得单条健康信息
healthUpdateHealthInfo:path+"/web/healthInfo/updateHealthInfo",// 更新健康信息
healthDeleteHealthInfo:path+"/web/healthInfo/deleteHealthInfo",// 删除健康信息
// 自选课程 剧场活动
GetSchoolIds:path+"/web/activity/TSCourse_GetSchoolIds",//特色课程 获取学校课程id
GetSchoolJYIds:path+"/web/activity/TSCourse_GetSchoolJYIds",//剧场活动 id
GetSchoolCourses:path+"/web/activity/TSCourse_GetSchoolCourses",//特色课程 获取学校课程
AddCourse:path+"/web/activity/TSCourse_AddCourse",//特色课程 新增
GetCourseDetails:path+"/web/activity/TSCourse_GetCourseDetails",//获取学校课程详情
tsDelCourse:path+"/web/activity/TSCourse_DelCourse",// 删除学校课程
tsGetBookedChildren:path+"/web/activity/TSCourse_GetBookedChildren",// 签到学生列表
tsCallRoll:path+"/web/activity/TSCourse_CallRoll",// 签到
tsCancelRoll:path+"/web/activity/TSCourse_CancelRoll",// 取消签到
tsTempBookCourse:path+"/web/activity/TSCourse_tempBookCourse",// 补加预约人数
getCourseSimpleTJ:path+"/web/activity/TSCourse_getCourseSimpleTJ",// 自选活动 活动统计
getCourseClassTJ:path+"/web/activity/TSCourse_getCourseClassTJ",// 自选活动 班级统计
getCourseStudentTJ:path+"/web/activity/TSCourse_getCourseStudentTJ",// 自选活动 学生统计
getCourseStudentDetailTJ:path+"/web/activity/TSCourse_getCourseStudentDetailTJ",// 自选活动 活动统计详情
getCourseAllTJ:path+"/web/activity/TSCourse_getCourseAllTJ2",// 自选活动 活动统计01
// 文件中心
fileGetRoot:path+"/web/fileCenter/getRoot",// 获取根目录
fileGetChildFileInfo:path+"/web/fileCenter/getChildFileInfo",// 获取文件的所有子级文件
fileGetSingleFileInfo:path+"/web/fileCenter/getSingleFileInfo",// 获取单项文件信息
fileAddFileInfo:path+"/web/fileCenter/addFileInfo",// 增加一项文件信息
fileDeleteFileInfo:path+"/web/fileCenter/deleteFileInfo",// 删除文件信息
fileUpdateFileName:path+"/web/fileCenter/updateFileName",// 更新文件名
// 08设置
setting:''
};
function initAjax(url,param,callback,callback01,callback02) {
$.ajax({
type:"POST",
url:url,
data:param,
dataType:"json",
statusCode:{
404:function(){
console.log("访问地址不存在或接口参数有误 错误代码404");
},
500:function(){
console.log("因为意外情况,服务器不能完成请求 错误代码500");
// window.location.href=httpUrl.loginHttp;
},
405:function(){
console.log("资源被禁止 错误代码405");
}
},
beforeSend:function () {
// loadingIn();// loading载入
},
success:function(result){
callback(result,callback01,callback02);
// loadingOut(); // loading退出
},
error:function(result){
console.log("请求失败 ajax error!");
// window.location.href=httpUrl.loginHttp;
}
});
};
// loading载入函数
function loadingIn() {
$("#page-loader").removeClass('hide');
$("#page-loader").css("z-index","999999");
};
function loadingOut(argument) {
$("#page-loader").addClass('hide');
};
Date.prototype.Format = function (fmt) {
var o = {
"M+": this.getMonth() + 1, //月份
"d+": this.getDate(), //日
"h+": this.getHours(), //小时
"m+": this.getMinutes(), //分
"s+": this.getSeconds(), //秒
"q+": Math.floor((this.getMonth() + 3) / 3), //季度
"S": this.getMilliseconds() //毫秒
};
if (/(y+)/.test(fmt)) fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "").substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt)) fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k]) : (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
};
// 地址栏search参数筛选函数
function GetQueryString(name){
var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var result = window.location.search.substr(1).match(reg);
return result?decodeURIComponent(result[2]):null;
}
// 设置cookie 过期时间s20代表20秒 h12代表12小时 d30代表30天
function setCookie(name,value,time){
var strsec = getsec(time);
var exp = new Date();
exp.setTime(exp.getTime() + strsec*1);
// document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString()+"path=/; domain="+domain;
document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString();
};
function getsec(str){
var str1=str.substring(1,str.length)*1;
var str2=str.substring(0,1);
if (str2=="s"){
return str1*1000;
}
else if (str2=="h")
{
return str1*60*60*1000;
}
else if (str2=="d")
{
return str1*24*60*60*1000;
}
};
// 获取cookie
function getCookie(name){
var arr,reg=new RegExp("(^| )"+name+"=([^;]*)(;|$)");
if(arr=document.cookie.match(reg)){
return unescape(arr[2]);
}
else{
return null;
}
};
// 删除cookie
function delCookie(name){
var exp = new Date();
exp.setTime(exp.getTime() - 1);
var cval=getCookie(name);
if(cval!=null){
document.cookie= name + "="+cval+";expires="+exp.toGMTString();
};
};
// niceScroll滚动条
function chooseNiceScroll(AA,color) {
$(AA).niceScroll({
cursorcolor: color || "#ccc",//#CC0071 光标颜色
cursoropacitymax: 1, //改变不透明度非常光标处于活动状态(scrollabar“可见”状态),范围从1到0
touchbehavior: true, //使光标拖动滚动像在台式电脑触摸设备
cursorwidth: "5px", //像素光标的宽度
cursorborder: "0", // 游标边框css定义
cursorborderradius: "5px",//以像素为光标边界半径
autohidemode: true //是否隐藏滚动条
});
};
// 消息提示函数
function toastTip(heading,text,hideAfter,afterHidden) {
$.toast({
heading: heading,
text: text,
showHideTransition: 'slide',
icon: 'success',
hideAfter: hideAfter || 1500,
loaderBg: '#edd42e',
position: 'bottom-right',
afterHidden: afterHidden
});
};
|
identifier_body
|
||
base.js
|
/TJ/TJ_GCJL_GetClassAbilibySimple",// 班级领域发展水平--数量统计
getCourseAbilibyCount:path+"/web/sample/TJ/TJ_GCJL_GetCourseAbilibyCount",// 课程发展水平--数量统计
// 成长档案
recordStudent:path+"/web/mbtrack/dan/student",// 获取学生列表(含档案信息)
recordList:path+"/web/mbtrack/danbook/list",// 获取档案册列表
recordMonthList:path+"/web/mbtrack/danbook/danList",// 获取档案册档案页详情
recordNewDanbook:path+"/web/mbtrack/danbook/save",// 新建档案册
recordDownload:path+"/file/patch/download",//图片批量下载(档案页)
recordDanbookUpdate:path+"/web/mbtrack/danbook/update",// 档案册名更新
recordTeacherStat:path+"/web/mbtrack/report/teacher",// 教师成长档案统计
recordParentStat:path+"/web/mbtrack/report/parent",// 家长成长档案统计
// 考勤
attendGetChildOfClass:path+"/web/attendance/teacher/getChildOfClass",// 获得班级所有幼儿信息
attendGetAttendanceRecord:path+"/web/attendance/teacher/getAttendanceRecord",// 获得考勤记录
attendCheckConfirm:path+"/web/attendance/teacher/checkConfirm",// 教师端检查确认
attendDisPlayAttendDays:path+"/web/attendance/teacher/disPlayAttendDays",// 查看已设置的考勤天数
attendUpdateAttendDays:path+"/web/attendance/teacher/updateAttendDays",// 修改考勤天数设置
attendResetAttendDays:path+"/web/attendance/teacher/resetAttendDays",// 复位考勤天数设置
attendGetClassAttendanceInfo:path+"/web/attendance/teacher/getClassAttendanceInfo",// 获得班级考勤
attendGetPersonalAttendance:path+"/web/attendance/parent/getPersonalAttendance",// 获得个人考勤
// 公告
getMyClassInfo:path+"/web/basic/getMyClassInfo",// 获取我的班级信息
getClassStuAndTeachers:path+"/web/basic/getClassStuAndTeachers",// 获取班级所有学生和老师
noticeGetDesc:path+"/web/notice/getNoticeDesc",// 获取公告描述
noticeReaded:path+"/web/notice/markNoticeReaded",// 公告置为已读
noticeAddNew:path+"/web/notice/addNewNotice",// 新增新的公告内容
noticeGetContentList:path+"/web/notice/getNoticeContent",// 获取某个公告内容列表
noticeGetReadDetail:path+"/web/notice/getReadDetail",// 获取某条公告内容阅读详情
noticeDelNoticeContent:path+"/web/notice/delNoticeContent",// 删除某条公告内容
noticeUpdateNoticeContent:path+"/web/notice/updateNoticeContent",// 更新某条公告内容
// 每周菜谱
menuSaveTable:path+"/web/cookbook/saveTable",// 保存表格
menuDeleteTable:path+"/web/cookbook/deleteTable",// 删除整张表
menuUpdateTitle:path+"/web/cookbook/updateTitle",// 更新菜谱标题
menuSelectCell:path+"/web/cookbook/selectCell",// 获得某个单元
menuGetTitleList:path+"/web/cookbook/getTitleList",// 获得菜谱标题列表
menuStructuringTableCell:path+"/web/cookbook/structuringTableCell",// 通过开始日期获取表单
// 风险预警
riskGetCompanyHealthAlert:path+"/web/healthAlert/getCompanyHealthAlert",// 获取登录人所在学校的所有预警
riskGetAlertType:path+"/web/healthAlert/getAlertType",// 获取预警类型列表
riskGetAlertAge:path+"/web/healthAlert/getAlertAge",// 获得预警年龄列表
riskNewHealthAlert:path+"/web/healthAlert/newHealthAlert",// 新增风险预警
riskGetHealthAlert:path+"/web/healthAlert/getHealthAlert",// 获取单条健康预警
riskUpdateHealthAlert:path+"/web/healthAlert/updateHealthAlert",// 更改健康预警
riskDeleteHealthAlert:path+"/web/healthAlert/deleteHealthAlert",// 删除健康预警
// 健康信息
healthGetExamDateList:path+"/web/healthInfo/getExamDateList",// 根据班级获得检查日期列表
healthGetClassHealthInfo:path+"/web/healthInfo/getClassHealthInfo",// 获得班级健康信息
healthGetChildListOfClass:path+"/web/healthInfo/getChildListOfClass",// 获得班级幼儿列表
healthGetBirthdaySex:path+"/web/healthInfo/getBirthdaySex",// 获得幼儿生日及性别
healthCalculateAge:path+"/web/healthInfo/calculateAge",// 根据生日,体检日期,计算年龄
healthHPValue:path+"/web/healthInfo/HPValue",// 计算身高p值
healthWPValue:path+"/web/healthInfo/WPValue",// 计算体重p值
healthFatnessValue:path+"/web/healthInfo/FatnessValue",// 计算肥胖值
healthNewHealthInfo:path+"/web/healthInfo/newHealthInfo",// 新增健康信息
healthGetSingleHI:path+"/web/healthInfo/getSingleHI",// 获得单条健康信息
healthUpdateHealthInfo:path+"/web/healthInfo/updateHealthInfo",// 更新健康信息
healthDeleteHealthInfo:path+"/web/healthInfo/deleteHealthInfo",// 删除健康信息
// 自选课程 剧场活动
GetSchoolIds:path+"/web/activity/TSCourse_GetSchoolIds",//特色课程 获取学校课程id
GetSchoolJYIds:path+"/web/activity/TSCourse_GetSchoolJYIds",//剧场活动 id
GetSchoolCourses:path+"/web/activity/TSCourse_GetSchoolCourses",//特色课程 获取学校课程
AddCourse:path+"/web/activity/TSCourse_AddCourse",//特色课程 新增
GetCourseDetails:path+"/web/activity/TSCourse_GetCourseDetails",//获取学校课程详情
tsDelCourse:path+"/web/activity/TSCourse_DelCourse",// 删除学校课程
tsGetBookedChildren:path+"/web/activity/TSCourse_GetBookedChildren",// 签到学生列表
tsCallRoll:path+"/web/activity/TSCourse_CallRoll",// 签到
tsCancelRoll:path+"/web/activity/TSCourse_CancelRoll",// 取消签到
tsTempBookCourse:path+"/web/activity/TSCourse_tempBookCourse",// 补加预约人数
getCourseSimpleTJ:path+"/web/activity/TSCourse_getCourseSimpleTJ",// 自选活动 活动统计
getCourseClassTJ:path+"/web/activity/TSCourse_getCourseClassTJ",// 自选活动 班级统计
getCourseStudentTJ:path+"/web/activity/TSCourse_getCourseStudentTJ",// 自选活动 学生统计
getCourseStudentDetailTJ:path+"/web/activity/TSCourse_getCourseStudentDetailTJ",// 自选活动 活动统计详情
getCourseAllTJ:path+"/web/activity/TSCourse_getCourseAllTJ2",// 自选活动 活动统计01
// 文件中心
fileGetRoot:path+"/web/fileCenter/getRoot",// 获取根目录
fileGetChildFileInfo:path+"/web/fileCenter/getChildFileInfo",// 获取文件的所有子级文件
fileGetSingleFileInfo:path+"/web/fileCenter/getSingleFileInfo",// 获取单项文件信息
fileAddFileInfo:path+"/web/fileCenter/addFileInfo",// 增加一项文件信息
fileDeleteFileInfo:path+"/web/fileCenter/deleteFileInfo",// 删除文件信息
fileUpdateFileName:path+"/web/fileCenter/updateFileName",// 更新文件名
// 08设置
setting:''
};
function initAjax(url,param,callback,callback01,callback02) {
$.ajax({
type:"POST",
url:url,
data:param,
dataType:"json",
statusCode:{
404:function(){
console.log("访问地址不存在或接口参数有误 错误代码404");
},
500:function(){
console.log("因为意外情况,服务器不能完成请求 错误代码500");
// window.location.href=httpUrl.loginHttp;
},
405:function(){
console.log("资源被禁止 错误代码405");
}
},
beforeSend:function () {
// loadingIn();// loading载入
},
success:function(result){
callback(result,callback01,callback02);
// loadingOut(); // loading退出
},
error:function(result){
console.log("请求失败 ajax error!");
// window.location.href=httpUrl.loginHttp;
}
});
};
// loading载入函数
function loadingIn() {
$("#page-loader").removeClass('hide');
$("#page-loader").css("z-index","999999");
};
function loadingOut(argument) {
$("#page-loader").addClass('hide');
|
};
|
random_line_split
|
|
base.js
|
AlertAge:path+"/web/healthAlert/getAlertAge",// 获得预警年龄列表
riskNewHealthAlert:path+"/web/healthAlert/newHealthAlert",// 新增风险预警
riskGetHealthAlert:path+"/web/healthAlert/getHealthAlert",// 获取单条健康预警
riskUpdateHealthAlert:path+"/web/healthAlert/updateHealthAlert",// 更改健康预警
riskDeleteHealthAlert:path+"/web/healthAlert/deleteHealthAlert",// 删除健康预警
// 健康信息
healthGetExamDateList:path+"/web/healthInfo/getExamDateList",// 根据班级获得检查日期列表
healthGetClassHealthInfo:path+"/web/healthInfo/getClassHealthInfo",// 获得班级健康信息
healthGetChildListOfClass:path+"/web/healthInfo/getChildListOfClass",// 获得班级幼儿列表
healthGetBirthdaySex:path+"/web/healthInfo/getBirthdaySex",// 获得幼儿生日及性别
healthCalculateAge:path+"/web/healthInfo/calculateAge",// 根据生日,体检日期,计算年龄
healthHPValue:path+"/web/healthInfo/HPValue",// 计算身高p值
healthWPValue:path+"/web/healthInfo/WPValue",// 计算体重p值
healthFatnessValue:path+"/web/healthInfo/FatnessValue",// 计算肥胖值
healthNewHealthInfo:path+"/web/healthInfo/newHealthInfo",// 新增健康信息
healthGetSingleHI:path+"/web/healthInfo/getSingleHI",// 获得单条健康信息
healthUpdateHealthInfo:path+"/web/healthInfo/updateHealthInfo",// 更新健康信息
healthDeleteHealthInfo:path+"/web/healthInfo/deleteHealthInfo",// 删除健康信息
// 自选课程 剧场活动
GetSchoolIds:path+"/web/activity/TSCourse_GetSchoolIds",//特色课程 获取学校课程id
GetSchoolJYIds:path+"/web/activity/TSCourse_GetSchoolJYIds",//剧场活动 id
GetSchoolCourses:path+"/web/activity/TSCourse_GetSchoolCourses",//特色课程 获取学校课程
AddCourse:path+"/web/activity/TSCourse_AddCourse",//特色课程 新增
GetCourseDetails:path+"/web/activity/TSCourse_GetCourseDetails",//获取学校课程详情
tsDelCourse:path+"/web/activity/TSCourse_DelCourse",// 删除学校课程
tsGetBookedChildren:path+"/web/activity/TSCourse_GetBookedChildren",// 签到学生列表
tsCallRoll:path+"/web/activity/TSCourse_CallRoll",// 签到
tsCancelRoll:path+"/web/activity/TSCourse_CancelRoll",// 取消签到
tsTempBookCourse:path+"/web/activity/TSCourse_tempBookCourse",// 补加预约人数
getCourseSimpleTJ:path+"/web/activity/TSCourse_getCourseSimpleTJ",// 自选活动 活动统计
getCourseClassTJ:path+"/web/activity/TSCourse_getCourseClassTJ",// 自选活动 班级统计
getCourseStudentTJ:path+"/web/activity/TSCourse_getCourseStudentTJ",// 自选活动 学生统计
getCourseStudentDetailTJ:path+"/web/activity/TSCourse_getCourseStudentDetailTJ",// 自选活动 活动统计详情
getCourseAllTJ:path+"/web/activity/TSCourse_getCourseAllTJ2",// 自选活动 活动统计01
// 文件中心
fileGetRoot:path+"/web/fileCenter/getRoot",// 获取根目录
fileGetChildFileInfo:path+"/web/fileCenter/getChildFileInfo",// 获取文件的所有子级文件
fileGetSingleFileInfo:path+"/web/fileCenter/getSingleFileInfo",// 获取单项文件信息
fileAddFileInfo:path+"/web/fileCenter/addFileInfo",// 增加一项文件信息
fileDeleteFileInfo:path+"/web/fileCenter/deleteFileInfo",// 删除文件信息
fileUpdateFileName:path+"/web/fileCenter/updateFileName",// 更新文件名
// 08设置
setting:''
};
function initAjax(url,param,callback,callback01,callback02) {
$.ajax({
type:"POST",
url:url,
data:param,
dataType:"json",
statusCode:{
404:function(){
console.log("访问地址不存在或接口参数有误 错误代码404");
},
500:function(){
console.log("因为意外情况,服务器不能完成请求 错误代码500");
// window.location.href=httpUrl.loginHttp;
},
405:function(){
console.log("资源被禁止 错误代码405");
}
},
beforeSend:function () {
// loadingIn();// loading载入
},
success:function(result){
callback(result,callback01,callback02);
// loadingOut(); // loading退出
},
error:function(result){
console.log("请求失败 ajax error!");
// window.location.href=httpUrl.loginHttp;
}
});
};
// loading载入函数
function loadingIn() {
$("#page-loader").removeClass('hide');
$("#page-loader").css("z-index","999999");
};
function loadingOut(argument) {
$("#page-loader").addClass('hide');
};
Date.prototype.Format = function (fmt) {
var o = {
"M+": this.getMonth() + 1, //月份
"d+": this.getDate(), //日
"h+": this.getHours(), //小时
"m+": this.getMinutes(), //分
"s+": this.getSeconds(), //秒
"q+": Math.floor((this.getMonth() + 3) / 3), //季度
"S": this.getMilliseconds() //毫秒
};
if (/(y+)/.test(fmt)) fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "").substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt)) fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k]) : (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
};
// 地址栏search参数筛选函数
function GetQueryString(name){
var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var result = window.location.search.substr(1).match(reg);
return result?decodeURIComponent(result[2]):null;
}
// 设置cookie 过期时间s20代表20秒 h12代表12小时 d30代表30天
function setCookie(name,value,time){
var strsec = getsec(time);
var exp = new Date();
exp.setTime(exp.getTime() + strsec*1);
// document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString()+"path=/; domain="+domain;
document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString();
};
function getsec(str){
var str1=str.substring(1,str.length)*1;
var str2=str.substring(0,1);
if (str2=="s"){
return str1*1000;
}
else if (str2=="h")
{
return str1*60*60*1000;
}
else if (str2=="d")
{
return str1*24*60*60*1000;
}
};
// 获取cookie
function getCookie(name){
var arr,reg=new RegExp("(^| )"+name+"=([^;]*)(;|$)");
if(arr=document.cookie.match(reg)){
return unescape(arr[2]);
}
else{
return null;
}
};
// 删除cookie
function delCookie(name){
var exp = new Date();
exp.setTime(exp.getTime() - 1);
var cval=getCookie(name);
if(cval!=null){
document.cookie= name + "="+cval+";expires="+exp.toGMTString();
};
};
// niceScroll滚动条
function chooseNiceScroll(AA,color) {
$(AA).niceScroll({
cursorcolor: color || "#ccc",//#CC0071 光标颜色
cursoropacitymax: 1, //改变不透明度非常光标处于活动状态(scrollabar“可见”状态),范围从1到0
touchbehavior: true, //使光标拖动滚动像在台式电脑触摸设备
cursorwidth: "5px", //像素光标的宽度
cursorborder: "0", // 游标边框css定义
cursorborderradius: "5px",//以像素为光标边界半径
autohidemode: true //是否隐藏滚动条
});
};
// 消息提示函数
function toastTip(heading,text,hideAfter,afterHidden) {
$.toast({
heading: heading,
text: text,
showHideTransition: 'slide',
icon: 'success',
hideAfter: hideAfter || 1500,
loaderBg: '#edd42e',
position: 'bottom-right',
afterHidden: afterHidden
});
};
|
identifier_name
|
||
base.js
|
AlertAge:path+"/web/healthAlert/getAlertAge",// 获得预警年龄列表
riskNewHealthAlert:path+"/web/healthAlert/newHealthAlert",// 新增风险预警
riskGetHealthAlert:path+"/web/healthAlert/getHealthAlert",// 获取单条健康预警
riskUpdateHealthAlert:path+"/web/healthAlert/updateHealthAlert",// 更改健康预警
riskDeleteHealthAlert:path+"/web/healthAlert/deleteHealthAlert",// 删除健康预警
// 健康信息
healthGetExamDateList:path+"/web/healthInfo/getExamDateList",// 根据班级获得检查日期列表
healthGetClassHealthInfo:path+"/web/healthInfo/getClassHealthInfo",// 获得班级健康信息
healthGetChildListOfClass:path+"/web/healthInfo/getChildListOfClass",// 获得班级幼儿列表
healthGetBirthdaySex:path+"/web/healthInfo/getBirthdaySex",// 获得幼儿生日及性别
healthCalculateAge:path+"/web/healthInfo/calculateAge",// 根据生日,体检日期,计算年龄
healthHPValue:path+"/web/healthInfo/HPValue",// 计算身高p值
healthWPValue:path+"/web/healthInfo/WPValue",// 计算体重p值
healthFatnessValue:path+"/web/healthInfo/FatnessValue",// 计算肥胖值
healthNewHealthInfo:path+"/web/healthInfo/newHealthInfo",// 新增健康信息
healthGetSingleHI:path+"/web/healthInfo/getSingleHI",// 获得单条健康信息
healthUpdateHealthInfo:path+"/web/healthInfo/updateHealthInfo",// 更新健康信息
healthDeleteHealthInfo:path+"/web/healthInfo/deleteHealthInfo",// 删除健康信息
// 自选课程 剧场活动
GetSchoolIds:path+"/web/activity/TSCourse_GetSchoolIds",//特色课程 获取学校课程id
GetSchoolJYIds:path+"/web/activity/TSCourse_GetSchoolJYIds",//剧场活动 id
GetSchoolCourses:path+"/web/activity/TSCourse_GetSchoolCourses",//特色课程 获取学校课程
AddCourse:path+"/web/activity/TSCourse_AddCourse",//特色课程 新增
GetCourseDetails:path+"/web/activity/TSCourse_GetCourseDetails",//获取学校课程详情
tsDelCourse:path+"/web/activity/TSCourse_DelCourse",// 删除学校课程
tsGetBookedChildren:path+"/web/activity/TSCourse_GetBookedChildren",// 签到学生列表
tsCallRoll:path+"/web/activity/TSCourse_CallRoll",// 签到
tsCancelRoll:path+"/web/activity/TSCourse_CancelRoll",// 取消签到
tsTempBookCourse:path+"/web/activity/TSCourse_tempBookCourse",// 补加预约人数
getCourseSimpleTJ:path+"/web/activity/TSCourse_getCourseSimpleTJ",// 自选活动 活动统计
getCourseClassTJ:path+"/web/activity/TSCourse_getCourseClassTJ",// 自选活动 班级统计
getCourseStudentTJ:path+"/web/activity/TSCourse_getCourseStudentTJ",// 自选活动 学生统计
getCourseStudentDetailTJ:path+"/web/activity/TSCourse_getCourseStudentDetailTJ",// 自选活动 活动统计详情
getCourseAllTJ:path+"/web/activity/TSCourse_getCourseAllTJ2",// 自选活动 活动统计01
// 文件中心
fileGetRoot:path+"/web/fileCenter/getRoot",// 获取根目录
fileGetChildFileInfo:path+"/web/fileCenter/getChildFileInfo",// 获取文件的所有子级文件
fileGetSingleFileInfo:path+"/web/fileCenter/getSingleFileInfo",// 获取单项文件信息
fileAddFileInfo:path+"/web/fileCenter/addFileInfo",// 增加一项文件信息
fileDeleteFileInfo:path+"/web/fileCenter/deleteFileInfo",// 删除文件信息
fileUpdateFileName:path+"/web/fileCenter/updateFileName",// 更新文件名
// 08设置
setting:''
};
function initAjax(url,param,callback,callback01,callback02) {
$.ajax({
type:"POST",
url:url,
data:param,
dataType:"json",
statusCode:{
404:function(){
console.log("访问地址不存在或接口参数有误 错误代码404");
},
500:function(){
console.log("因为意外情况,服务器不能完成请求 错误代码500");
// window.location.href=httpUrl.loginHttp;
},
405:function(){
console.log("资源被禁止 错误代码405");
}
},
beforeSend:function () {
// loadingIn();// loading载入
},
success:function(result){
callback(result,callback01,callback02);
// loadingOut(); // loading退出
},
error:function(result){
console.log("请求失败 ajax error!");
// window.location.href=httpUrl.loginHttp;
}
});
};
// loading载入函数
function loadingIn() {
$("#page-loader").removeClass('hide');
$("#page-loader").css("z-index","999999");
};
function loadingOut(argument) {
$("#page-loader").addClass('hide');
};
Date.prototype.Format = function (fmt) {
var o = {
"M+": this.getMonth() + 1, //月份
"d+": this.getDate(), //日
"h+": this.getHours(), //小时
"m+": this.getMinutes(), //分
"s+": this.getSeconds(), //秒
"q+": Math.floor((this.getMonth() + 3) / 3), //季度
"S": this.getMilliseconds() //毫秒
};
if (/(y+)/.test(fmt)) fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "").substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt)) fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k]) : (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
};
// 地址栏search参数筛选函数
function GetQueryString(name){
var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var result = window.location.search.substr(1).match(reg);
return result?decodeURIComponent(result[2]):null;
}
// 设置cookie 过期时间s20代表20秒 h12代表12小时 d30代表30天
function setCookie(name,value,time){
var strsec = getsec(time);
var exp = new Date();
exp.setTime(exp.getTime() + strsec*1);
// document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString()+"path=/; domain="+domain;
document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString();
};
function getsec(str){
var str1=str.substring(1,str.length)*1;
var str2=str.substring(0,1);
if (str2=="s"){
return str1*1000;
}
else if (str2=="h")
{
return str1*60*60*1000;
}
else if (str2=="d")
{
return str1*24*60*60*1000;
}
};
// 获取cookie
function getCookie(name){
var arr,reg=new RegExp("(^| )"+name+"=([^;]*)(;|$)");
if(arr=document.cookie.match(reg)){
return unescape(arr[2]);
}
else{
return null;
}
};
// 删除cookie
function delCookie(name){
var exp = new Date();
exp.setTime(exp.getTime() - 1);
var cval=getCookie(name);
if(cval!=null){
document.cookie= name + "="+cval+";expires="+exp.toGMTString();
};
};
// niceScroll滚动条
function chooseNiceScroll(AA,color) {
$(AA).niceScroll({
cursorcolor: color || "#ccc",//#CC0071 光标颜色
cursoropacitymax: 1, //改变不透明度非常光标处于活动状态(scrollabar“可见”状态),范围从1到0
touchbehavior: true, //使光标拖动滚动像在台式电脑触摸设备
cursorwidth: "5px", //像素光标的宽度
cursorborder: "0", // 游标边框css定义
cursorborderradius: "5px",//以像素为光标边界半径
autohidemode: true //是否隐藏滚动条
});
};
// 消息提示函数
function toastTip(heading,text,hideAfter,afterHidden) {
$.toast({
heading: heading,
text: text,
showHideTransition: 'slide',
icon: 'success',
hideAfter: hideAfter || 1500,
loaderBg: '#edd42e',
position: 'bottom-right',
afterHidden: afterHidden
});
};
|
conditional_block
|
||
pouch-db-singleton.ts
|
'../storage-provider-base.js';
import {SerializedModelEntry, ModelValue} from '../crdt-collection-model.js';
import {PouchDbStorageProvider} from './pouch-db-storage-provider.js';
import {PouchDbStorage} from './pouch-db-storage.js';
import {upsert, UpsertDoc, UpsertMutatorFn} from './pouch-db-upsert.js';
/**
* A representation of a Singleton in Pouch storage.
*/
interface SingletonStorage extends UpsertDoc {
value: ModelValue;
/** ReferenceMode state for this data */
referenceMode: boolean;
/** Monotonically increasing version number */
version: number;
}
/**
* The PouchDB-based implementation of a Singleton.
*/
export class PouchDbSingleton extends PouchDbStorageProvider implements SingletonStorageProvider {
private localKeyId = 0;
/**
* Create a new PouchDbSingleton.
*
* @param type the underlying type for this singleton.
* @param storageEngine a reference back to the PouchDbStorage, used for baseStorageKey calls.
* @param name appears unused.
* @param id see base class.
* @param key the storage key for this collection.
*/
constructor(type: Type, storageEngine: PouchDbStorage, name: string, id: string, key: string, refMode: boolean) {
super(type, storageEngine, name, id, key, refMode);
this._version = 0;
// See if the value has been set
this.upsert(async doc => doc).then((doc) => {
this.resolveInitialized();
// value has been written
}).catch((err) => {
console.warn('Error init ' + this.storageKey, err);
// TODO(lindner) error out the initialized Promise
throw err;
});
}
/** @inheritDoc */
backingType(): Type {
return this.type;
}
async clone(): Promise<PouchDbSingleton> {
const singleton = new PouchDbSingleton(this.type, this.storageEngine, this.name, this.id, null, this.referenceMode);
await singleton.cloneFrom(this);
return singleton;
}
async cloneFrom(handle): Promise<void> {
const literal = await handle.serializeContents();
await this.initialized;
this.referenceMode = handle.referenceMode;
if (handle.referenceMode && literal.model.length > 0) {
// cloneFrom the backing store data by reading the model and writing it out.
const [backingStore, handleBackingStore] = await Promise.all(
[this.ensureBackingStore(), handle.ensureBackingStore()]);
literal.model = literal.model.map(({id, value}) => ({id, value: {id: value.id, storageKey: backingStore.storageKey}}));
const underlying = await handleBackingStore.getMultiple(literal.model.map(({id}) => id));
await backingStore.storeMultiple(underlying, [this.storageKey]);
}
await this.fromLiteral(literal);
if (literal && literal.model && literal.model.length === 1) {
const newvalue = literal.model[0].value;
if (newvalue) {
await this.upsert(async doc => {
doc.value = newvalue;
doc.referenceMode = this.referenceMode;
doc.version = Math.max(this._version, doc.version) + 1;
return doc;
});
}
await this._fire(new ChangeEvent({data: newvalue, version: this._version}));
}
}
/**
* Returns the model data in a format suitable for transport over
* the API channel (i.e. between execution host and context).
*/
async modelForSynchronization() {
await this.initialized;
const doc = await this.upsert(async doc => doc);
const value = doc.value;
if (this.referenceMode && value !== null) {
const backingStore = await this.ensureBackingStore();
const result = await backingStore.get(value.id);
return {
version: this._version,
model: [{id: value.id, value: result}]
};
}
return super.modelForSynchronization();
}
/**
* Returns the state of this singleton based as an object of the form
* {version, model: [{id, value}]}
*/
async
|
(): Promise<{version: number; model: SerializedModelEntry[]}> {
await this.initialized;
const doc = await this.upsert(async doc => doc);
const value = doc.value;
let model: SerializedModelEntry[] = [];
if (value != null) {
model = [
{
id: value.id,
keys: [],
value
}
];
}
return {
version: this._version,
model
};
}
/**
* Updates the internal state of this singleton with the supplied data.
*/
async fromLiteral({version, model}): Promise<void> {
await this.initialized;
const value = model.length === 0 ? null : model[0].value;
if (this.referenceMode && value && value.rawData) {
assert(false, `shouldn't have rawData ${JSON.stringify(value.rawData)} here`);
}
assert(value !== undefined);
const newDoc = await this.upsert(async (doc) => {
// modify document
doc.value = value;
doc.referenceMode = this.referenceMode;
doc.version = Math.max(version, doc.version) + 1;
return doc;
});
this._version = newDoc.version;
}
/**
* @return a promise containing the singleton value or null if it does not exist.
*/
async get(): Promise<ModelValue> {
await this.initialized;
try {
const doc = await this.upsert(async doc => doc);
let value = doc.value;
if (value == null) {
//console.warn('value is null and refmode=' + this.referenceMode);
}
if (this.referenceMode && value) {
const backingStore = await this.ensureBackingStore();
value = await backingStore.get(value.id);
}
// logging goes here
return value;
} catch (err) {
// TODO(plindner): caught for compatibility: pouchdb layer can throw, firebase layer never does
console.warn('PouchDbSingleton.get err=', err);
return null;
}
}
/**
* Set the value for this singleton.
* @param value the value we want to set. If null remove the singleton from storage
* @param originatorId TBD
* @param barrier TBD
*/
async set(value, originatorId: string = null, barrier: string|null = null): Promise<void> {
assert(value !== undefined);
let stored: SingletonStorage;
if (this.referenceMode && value) {
// Even if this value is identical to the previously written one,
// we can't suppress an event here because we don't actually have
// the previous value for comparison (that's down in the backing store).
// TODO(shans): should we fetch and compare in the case of the ids matching?
const referredType = this.type;
const storageKey = this.storageEngine.baseStorageKey(referredType, this.storageKey);
const backingStore = await this.ensureBackingStore();
// TODO(shans): mutating the storageKey here to provide unique keys is
// a hack that can be removed once entity mutation is distinct from collection
// updates. Once entity mutation exists, it shouldn't ever be possible to write
// different values with the same id.
await backingStore.store(value, [this.storageKey + this.localKeyId++]);
// Store the indirect pointer to the storageKey
// Do this *after* the write to backing store, otherwise null responses could occur
stored = await this.upsert(async doc => {
doc.referenceMode = this.referenceMode;
doc.version = this._version;
doc.value = {id: value['id'], storageKey};
return doc;
});
} else {
// Update Pouch/_stored, If value is null delete key, otherwise store it.
if (value == null) {
try {
const doc = await this.db.get(this.pouchDbKey.location);
await this.db.remove(doc);
} catch (err) {
// Deleting an already deleted item is acceptable.
if (err.name !== 'not_found') {
console.warn('PouchDbSingleton.remove err=', err);
throw err;
}
}
} else {
stored = await this.upsert(async doc => {
doc.referenceMode = this.referenceMode;
doc.version = this._version;
doc.value = value;
return doc;
});
}
}
this.bumpVersion();
const data = this.referenceMode ? value : stored.value;
await this._fire(new ChangeEvent({data, version: this._version, originatorId, barrier}));
}
/**
* Clear a singleton from storage.
* @param originatorId TBD
* @param barrier TBD
*/
async clear(originatorId: string = null, barrier: string = null): Promise<void> {
await this.set(null, originatorId, barrier);
}
/**
* Triggered when the storage key has been modified or deleted.
*/
async onRemoteStateSynced(doc: PouchDB.Core.ExistingDocument<SingletonStorage>) {
// TODO(lindner): reimplement as simple fires when we have replication working again
// TODO(lindner): consider using doc._deleted to
|
serializeContents
|
identifier_name
|
pouch-db-singleton.ts
|
'../storage-provider-base.js';
import {SerializedModelEntry, ModelValue} from '../crdt-collection-model.js';
import {PouchDbStorageProvider} from './pouch-db-storage-provider.js';
import {PouchDbStorage} from './pouch-db-storage.js';
import {upsert, UpsertDoc, UpsertMutatorFn} from './pouch-db-upsert.js';
/**
* A representation of a Singleton in Pouch storage.
*/
interface SingletonStorage extends UpsertDoc {
value: ModelValue;
/** ReferenceMode state for this data */
referenceMode: boolean;
/** Monotonically increasing version number */
version: number;
}
/**
* The PouchDB-based implementation of a Singleton.
*/
export class PouchDbSingleton extends PouchDbStorageProvider implements SingletonStorageProvider {
private localKeyId = 0;
/**
* Create a new PouchDbSingleton.
*
* @param type the underlying type for this singleton.
* @param storageEngine a reference back to the PouchDbStorage, used for baseStorageKey calls.
* @param name appears unused.
* @param id see base class.
* @param key the storage key for this collection.
*/
constructor(type: Type, storageEngine: PouchDbStorage, name: string, id: string, key: string, refMode: boolean) {
super(type, storageEngine, name, id, key, refMode);
this._version = 0;
// See if the value has been set
this.upsert(async doc => doc).then((doc) => {
this.resolveInitialized();
// value has been written
}).catch((err) => {
console.warn('Error init ' + this.storageKey, err);
// TODO(lindner) error out the initialized Promise
throw err;
});
}
/** @inheritDoc */
backingType(): Type {
return this.type;
}
async clone(): Promise<PouchDbSingleton> {
const singleton = new PouchDbSingleton(this.type, this.storageEngine, this.name, this.id, null, this.referenceMode);
await singleton.cloneFrom(this);
return singleton;
}
async cloneFrom(handle): Promise<void> {
const literal = await handle.serializeContents();
await this.initialized;
this.referenceMode = handle.referenceMode;
if (handle.referenceMode && literal.model.length > 0) {
// cloneFrom the backing store data by reading the model and writing it out.
const [backingStore, handleBackingStore] = await Promise.all(
[this.ensureBackingStore(), handle.ensureBackingStore()]);
literal.model = literal.model.map(({id, value}) => ({id, value: {id: value.id, storageKey: backingStore.storageKey}}));
const underlying = await handleBackingStore.getMultiple(literal.model.map(({id}) => id));
await backingStore.storeMultiple(underlying, [this.storageKey]);
}
await this.fromLiteral(literal);
if (literal && literal.model && literal.model.length === 1) {
const newvalue = literal.model[0].value;
if (newvalue) {
await this.upsert(async doc => {
doc.value = newvalue;
|
}
await this._fire(new ChangeEvent({data: newvalue, version: this._version}));
}
}
/**
* Returns the model data in a format suitable for transport over
* the API channel (i.e. between execution host and context).
*/
async modelForSynchronization() {
await this.initialized;
const doc = await this.upsert(async doc => doc);
const value = doc.value;
if (this.referenceMode && value !== null) {
const backingStore = await this.ensureBackingStore();
const result = await backingStore.get(value.id);
return {
version: this._version,
model: [{id: value.id, value: result}]
};
}
return super.modelForSynchronization();
}
/**
* Returns the state of this singleton based as an object of the form
* {version, model: [{id, value}]}
*/
async serializeContents(): Promise<{version: number; model: SerializedModelEntry[]}> {
await this.initialized;
const doc = await this.upsert(async doc => doc);
const value = doc.value;
let model: SerializedModelEntry[] = [];
if (value != null) {
model = [
{
id: value.id,
keys: [],
value
}
];
}
return {
version: this._version,
model
};
}
/**
* Updates the internal state of this singleton with the supplied data.
*/
async fromLiteral({version, model}): Promise<void> {
await this.initialized;
const value = model.length === 0 ? null : model[0].value;
if (this.referenceMode && value && value.rawData) {
assert(false, `shouldn't have rawData ${JSON.stringify(value.rawData)} here`);
}
assert(value !== undefined);
const newDoc = await this.upsert(async (doc) => {
// modify document
doc.value = value;
doc.referenceMode = this.referenceMode;
doc.version = Math.max(version, doc.version) + 1;
return doc;
});
this._version = newDoc.version;
}
/**
* @return a promise containing the singleton value or null if it does not exist.
*/
async get(): Promise<ModelValue> {
await this.initialized;
try {
const doc = await this.upsert(async doc => doc);
let value = doc.value;
if (value == null) {
//console.warn('value is null and refmode=' + this.referenceMode);
}
if (this.referenceMode && value) {
const backingStore = await this.ensureBackingStore();
value = await backingStore.get(value.id);
}
// logging goes here
return value;
} catch (err) {
// TODO(plindner): caught for compatibility: pouchdb layer can throw, firebase layer never does
console.warn('PouchDbSingleton.get err=', err);
return null;
}
}
/**
* Set the value for this singleton.
* @param value the value we want to set. If null remove the singleton from storage
* @param originatorId TBD
* @param barrier TBD
*/
async set(value, originatorId: string = null, barrier: string|null = null): Promise<void> {
assert(value !== undefined);
let stored: SingletonStorage;
if (this.referenceMode && value) {
// Even if this value is identical to the previously written one,
// we can't suppress an event here because we don't actually have
// the previous value for comparison (that's down in the backing store).
// TODO(shans): should we fetch and compare in the case of the ids matching?
const referredType = this.type;
const storageKey = this.storageEngine.baseStorageKey(referredType, this.storageKey);
const backingStore = await this.ensureBackingStore();
// TODO(shans): mutating the storageKey here to provide unique keys is
// a hack that can be removed once entity mutation is distinct from collection
// updates. Once entity mutation exists, it shouldn't ever be possible to write
// different values with the same id.
await backingStore.store(value, [this.storageKey + this.localKeyId++]);
// Store the indirect pointer to the storageKey
// Do this *after* the write to backing store, otherwise null responses could occur
stored = await this.upsert(async doc => {
doc.referenceMode = this.referenceMode;
doc.version = this._version;
doc.value = {id: value['id'], storageKey};
return doc;
});
} else {
// Update Pouch/_stored, If value is null delete key, otherwise store it.
if (value == null) {
try {
const doc = await this.db.get(this.pouchDbKey.location);
await this.db.remove(doc);
} catch (err) {
// Deleting an already deleted item is acceptable.
if (err.name !== 'not_found') {
console.warn('PouchDbSingleton.remove err=', err);
throw err;
}
}
} else {
stored = await this.upsert(async doc => {
doc.referenceMode = this.referenceMode;
doc.version = this._version;
doc.value = value;
return doc;
});
}
}
this.bumpVersion();
const data = this.referenceMode ? value : stored.value;
await this._fire(new ChangeEvent({data, version: this._version, originatorId, barrier}));
}
/**
* Clear a singleton from storage.
* @param originatorId TBD
* @param barrier TBD
*/
async clear(originatorId: string = null, barrier: string = null): Promise<void> {
await this.set(null, originatorId, barrier);
}
/**
* Triggered when the storage key has been modified or deleted.
*/
async onRemoteStateSynced(doc: PouchDB.Core.ExistingDocument<SingletonStorage>) {
// TODO(lindner): reimplement as simple fires when we have replication working again
// TODO(lindner): consider using doc._deleted to special
|
doc.referenceMode = this.referenceMode;
doc.version = Math.max(this._version, doc.version) + 1;
return doc;
});
|
random_line_split
|
pouch-db-singleton.ts
|
= 0;
/**
* Create a new PouchDbSingleton.
*
* @param type the underlying type for this singleton.
* @param storageEngine a reference back to the PouchDbStorage, used for baseStorageKey calls.
* @param name appears unused.
* @param id see base class.
* @param key the storage key for this collection.
*/
constructor(type: Type, storageEngine: PouchDbStorage, name: string, id: string, key: string, refMode: boolean) {
super(type, storageEngine, name, id, key, refMode);
this._version = 0;
// See if the value has been set
this.upsert(async doc => doc).then((doc) => {
this.resolveInitialized();
// value has been written
}).catch((err) => {
console.warn('Error init ' + this.storageKey, err);
// TODO(lindner) error out the initialized Promise
throw err;
});
}
/** @inheritDoc */
backingType(): Type {
return this.type;
}
async clone(): Promise<PouchDbSingleton> {
const singleton = new PouchDbSingleton(this.type, this.storageEngine, this.name, this.id, null, this.referenceMode);
await singleton.cloneFrom(this);
return singleton;
}
async cloneFrom(handle): Promise<void> {
const literal = await handle.serializeContents();
await this.initialized;
this.referenceMode = handle.referenceMode;
if (handle.referenceMode && literal.model.length > 0) {
// cloneFrom the backing store data by reading the model and writing it out.
const [backingStore, handleBackingStore] = await Promise.all(
[this.ensureBackingStore(), handle.ensureBackingStore()]);
literal.model = literal.model.map(({id, value}) => ({id, value: {id: value.id, storageKey: backingStore.storageKey}}));
const underlying = await handleBackingStore.getMultiple(literal.model.map(({id}) => id));
await backingStore.storeMultiple(underlying, [this.storageKey]);
}
await this.fromLiteral(literal);
if (literal && literal.model && literal.model.length === 1) {
const newvalue = literal.model[0].value;
if (newvalue) {
await this.upsert(async doc => {
doc.value = newvalue;
doc.referenceMode = this.referenceMode;
doc.version = Math.max(this._version, doc.version) + 1;
return doc;
});
}
await this._fire(new ChangeEvent({data: newvalue, version: this._version}));
}
}
/**
* Returns the model data in a format suitable for transport over
* the API channel (i.e. between execution host and context).
*/
async modelForSynchronization() {
await this.initialized;
const doc = await this.upsert(async doc => doc);
const value = doc.value;
if (this.referenceMode && value !== null) {
const backingStore = await this.ensureBackingStore();
const result = await backingStore.get(value.id);
return {
version: this._version,
model: [{id: value.id, value: result}]
};
}
return super.modelForSynchronization();
}
/**
* Returns the state of this singleton based as an object of the form
* {version, model: [{id, value}]}
*/
async serializeContents(): Promise<{version: number; model: SerializedModelEntry[]}> {
await this.initialized;
const doc = await this.upsert(async doc => doc);
const value = doc.value;
let model: SerializedModelEntry[] = [];
if (value != null) {
model = [
{
id: value.id,
keys: [],
value
}
];
}
return {
version: this._version,
model
};
}
/**
* Updates the internal state of this singleton with the supplied data.
*/
async fromLiteral({version, model}): Promise<void> {
await this.initialized;
const value = model.length === 0 ? null : model[0].value;
if (this.referenceMode && value && value.rawData) {
assert(false, `shouldn't have rawData ${JSON.stringify(value.rawData)} here`);
}
assert(value !== undefined);
const newDoc = await this.upsert(async (doc) => {
// modify document
doc.value = value;
doc.referenceMode = this.referenceMode;
doc.version = Math.max(version, doc.version) + 1;
return doc;
});
this._version = newDoc.version;
}
/**
* @return a promise containing the singleton value or null if it does not exist.
*/
async get(): Promise<ModelValue> {
await this.initialized;
try {
const doc = await this.upsert(async doc => doc);
let value = doc.value;
if (value == null) {
//console.warn('value is null and refmode=' + this.referenceMode);
}
if (this.referenceMode && value) {
const backingStore = await this.ensureBackingStore();
value = await backingStore.get(value.id);
}
// logging goes here
return value;
} catch (err) {
// TODO(plindner): caught for compatibility: pouchdb layer can throw, firebase layer never does
console.warn('PouchDbSingleton.get err=', err);
return null;
}
}
/**
* Set the value for this singleton.
* @param value the value we want to set. If null remove the singleton from storage
* @param originatorId TBD
* @param barrier TBD
*/
async set(value, originatorId: string = null, barrier: string|null = null): Promise<void> {
assert(value !== undefined);
let stored: SingletonStorage;
if (this.referenceMode && value) {
// Even if this value is identical to the previously written one,
// we can't suppress an event here because we don't actually have
// the previous value for comparison (that's down in the backing store).
// TODO(shans): should we fetch and compare in the case of the ids matching?
const referredType = this.type;
const storageKey = this.storageEngine.baseStorageKey(referredType, this.storageKey);
const backingStore = await this.ensureBackingStore();
// TODO(shans): mutating the storageKey here to provide unique keys is
// a hack that can be removed once entity mutation is distinct from collection
// updates. Once entity mutation exists, it shouldn't ever be possible to write
// different values with the same id.
await backingStore.store(value, [this.storageKey + this.localKeyId++]);
// Store the indirect pointer to the storageKey
// Do this *after* the write to backing store, otherwise null responses could occur
stored = await this.upsert(async doc => {
doc.referenceMode = this.referenceMode;
doc.version = this._version;
doc.value = {id: value['id'], storageKey};
return doc;
});
} else {
// Update Pouch/_stored, If value is null delete key, otherwise store it.
if (value == null) {
try {
const doc = await this.db.get(this.pouchDbKey.location);
await this.db.remove(doc);
} catch (err) {
// Deleting an already deleted item is acceptable.
if (err.name !== 'not_found') {
console.warn('PouchDbSingleton.remove err=', err);
throw err;
}
}
} else {
stored = await this.upsert(async doc => {
doc.referenceMode = this.referenceMode;
doc.version = this._version;
doc.value = value;
return doc;
});
}
}
this.bumpVersion();
const data = this.referenceMode ? value : stored.value;
await this._fire(new ChangeEvent({data, version: this._version, originatorId, barrier}));
}
/**
* Clear a singleton from storage.
* @param originatorId TBD
* @param barrier TBD
*/
async clear(originatorId: string = null, barrier: string = null): Promise<void> {
await this.set(null, originatorId, barrier);
}
/**
* Triggered when the storage key has been modified or deleted.
*/
async onRemoteStateSynced(doc: PouchDB.Core.ExistingDocument<SingletonStorage>) {
// TODO(lindner): reimplement as simple fires when we have replication working again
// TODO(lindner): consider using doc._deleted to special case.
const value = doc.value;
// Store locally
this.bumpVersion(doc.version);
// Skip if value == null, which is what happens when docs are deleted..
if (value) {
await this.ensureBackingStore().then(async store => {
const data = await store.get(value.id);
if (!data) {
// TODO(lindner): data referred to by this data is missing.
console.log('PouchDbSingleton.onRemoteSynced: possible race condition for id=' + value.id);
return;
}
await this._fire(new ChangeEvent({data, version: this._version}));
});
} else {
if (value != null)
|
{
await this._fire(new ChangeEvent({data: value, version: this._version}));
}
|
conditional_block
|
|
world.py
|
]
self.legalstate = filter(lambda x:not ( x in self.traps or x in self.targets or x in self.walls ) , [ i for i in range(self.rows * self.cols)])
# print(self.legalstate)
self.actionmap = {
0:'up',
1:'down',
2:'left',
3:'right'
}
self.targetReward = 1.0
self.trapReward = -1.0
self.wallReward = -0.5
self.norReward = -0.1
self.map_to_state , self.state_to_map = self.init()
self.stateNumber = len(self.map_to_state)
self.legalstate = map(lambda x:self.map_to_state[x],self.legalstate)
# print(self.legalstate)
def init(self):
map_to_state = {}
state_to_map = {}
mapindex = 0
stateindex = 0
for i in range(self.rows) :
for j in range(self.cols) :
if self.map[i][j] != 'b' :
map_to_state[mapindex] = stateindex
state_to_map[stateindex] = mapindex
stateindex += 1
mapindex += 1
return map_to_state , state_to_map
print(len(state_to_map))
print(map_to_state)
print(state_to_map)
def simulation(self,state,action):
if state not in self.state_to_map :
print("exception: illegal state")
exit(0)
m_state = self.state_to_map[state]
nextstate = self.__execute(m_state,action)
c = 0
r = 0.0
if self.isLegalState(nextstate):
c , r = self.getReward(nextstate)
else:
r = self.norReward
c = 0
nextstate = m_state
if nextstate in self.walls :
nextstate = m_state
return [state,action,self.map_to_state[nextstate],r,c]
def __execute(self,state,action):
if action == 0 :
nextstate = state - self.rows
elif action == 1 :
nextstate = state + self.rows
elif action == 2 :
nextstate = state - 1
elif action == 3 :
nextstate = state + 1
else:
nextstate = state
return nextstate
def isTarget(self,state):
if state < 0 or state >= self.rows * self.cols:
return False
state = self.state_to_map[state]
row = state / self.cols
col = state - row * self.cols
if self.map[row][col] == 'g':
return True
return False
def getReward(self,state):
|
return (-1,self.targetReward)
elif state in self.traps :
return (-1,self.trapReward)
elif state in self.walls :
return (0,self.wallReward)
else:
return (0,self.norReward)
def isLegalState(self,state):
if state >= 0 and state <= self.rows * self.cols - 1 :
return True
else:
return False
def state_to_coordinate(self,state):
if self.isLegalState(state) :
row = state / self.cols
col = state - self.cols * row
return (row+1,col+1)
else:
return (None,None)
def setMap(self,map):
self.Map = map
self.Map.gridsize = self.Map.gridsize * self.Map.grids
self.rows = int(self.Map.height / self.Map.grids) + 1
self.cols = int(self.Map.width / self.Map.grids) + 1
self.stateNumber = self.cols * self.rows
print(self.Map.height,self.Map.width)
print(self.rows,self.cols)
print(self.stateNumber)
print(self.Map.turtlebotPosition)
print(self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition)))
def getPosition(self):
if self.Map == None :
return None
else:
return self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition))
def doAction(self,action):
if action == 0 :
x = 0
y = 1
elif action == 1 :
x = 0
y = -1
elif action == 2 :
x = -1
y = 0
elif action == 3 :
x = 1
y = 0
else:
x = y = 0
return x , y
#把机器人发来的动作转换成地图中的下一个目标位置,然后发送给move_base
#同时接收返回的数据,包括reward、当前位置等,然后通过一定的reward机制评定最后的reward,最后返回给机器人
def doSimulation(self,action):
x , y = self.doAction(action)
print("action ",self.actionmap[action])
state = self.Map.sendGoal(x,y)
if state != None :
state.Print()
# time.sleep(1)
state1 = self.getStateNum(state.curCell)
state2 = self.getStateNum(state.realCell)
realstate = self.getStateNum(state.realCell)
targetCell = self.getCellFromStateNum(self.Target)
diff1 = math.fabs(targetCell.X-state.curCell.X) + math.fabs(targetCell.Y-state.curCell.Y)
diff2 = math.fabs(targetCell.X-state.realCell.X) + math.fabs(targetCell.Y-state.realCell.Y)
addreward = 0
extraReward = 0.0
if diff2 >= diff1 :
extraReward += 10
#原地不懂,惩罚,表明可能遇到了障碍物
if state1 == realstate :
extraReward += 10
#离目标越近,reward越高
if math.fabs( targetCell.X - state.curCell.X ) > math.fabs(targetCell.X - state.realCell.X) :
addreward += 20
if math.fabs(targetCell.X - state.realCell.X) <= 2 :
addreward += 11
self.changeRewardOnPath(40)
elif math.fabs(targetCell.X - state.realCell.X) <= 3 :
addreward += 9
self.changeRewardOnPath(30)
elif math.fabs(targetCell.X - state.realCell.X) <= 4 :
addreward += 7
self.changeRewardOnPath(20)
elif math.fabs(targetCell.X - state.realCell.X) <= 5 :
addreward += 5
self.changeRewardOnPath(15)
elif math.fabs(targetCell.X - state.realCell.X) <= 6 :
addreward += 5
self.changeRewardOnPath(10)
if math.fabs( targetCell.X - state.curCell.X ) < math.fabs(targetCell.X - state.realCell.X) :
addreward -= 50
if math.fabs( targetCell.Y - state.curCell.Y ) > math.fabs(targetCell.Y - state.realCell.Y) :
addreward += 20
if math.fabs(targetCell.Y - state.realCell.Y) <= 1 :
addreward += 5
if math.fabs( targetCell.Y - state.curCell.Y ) < math.fabs(targetCell.Y - state.realCell.Y) :
addreward -40
state.reward -= extraReward
state.reward += addreward
print("a reward ",state.reward)
self.path.append([state1,action,state2,state.reward,state.c])
flag = self.checkGoal(state2)
if flag :
for i in range(len(self.path)):
self.path[i][3] += 10
if flag :
return realstate , False , self.path , state.reward , action
else:
return realstate , True , self.path , state.reward , action
def changeRewardOnPath(self,reward,rate=0.5,axis=0):
for i in range(len(self.path)-1,-1,-1):
cell1 = self.getCellFromStateNum(self.path[i][0])
cell2 = self.getCellFromStateNum(self.path[i][2])
targetCell = self.getCellFromStateNum(self.Target)
if math.fabs( targetCell.X - cell1.X ) > math.fabs(targetCell.X - cell2.X) :
self.path[i][3] += reward
if reward > 5 :
reward = reward * rate
else:
reward = 5
def checkGoal(self,state):
if state == self.Target :
print("get target ",self.getCellFromStateNum(self.Target))
return True
return False
def clearPath(self):
self.path = []
#根据地图中的cell获取状态编号
def getStateNum(self,cell):
num = cell.Y * self.cols + cell.X
return num
#根据状态编号获取地图中cell
def getCellFromStateNum(self,num):
y = num / self.cols
x = num - self.cols * y
cell = Cell(x,y)
return cell
def getPositionStateNum
|
if state in self.targets :
|
random_line_split
|
world.py
|
]
self.legalstate = filter(lambda x:not ( x in self.traps or x in self.targets or x in self.walls ) , [ i for i in range(self.rows * self.cols)])
# print(self.legalstate)
self.actionmap = {
0:'up',
1:'down',
2:'left',
3:'right'
}
self.targetReward = 1.0
self.trapReward = -1.0
self.wallReward = -0.5
self.norReward = -0.1
self.map_to_state , self.state_to_map = self.init()
self.stateNumber = len(self.map_to_state)
self.legalstate = map(lambda x:self.map_to_state[x],self.legalstate)
# print(self.legalstate)
def init(self):
map_to_state = {}
state_to_map = {}
mapindex = 0
stateindex = 0
for i in range(self.rows) :
for j in range(self.cols) :
if self.map[i][j] != 'b' :
map_to_state[mapindex] = stateindex
state_to_map[stateindex] = mapindex
stateindex += 1
mapindex += 1
return map_to_state , state_to_map
print(len(state_to_map))
print(map_to_state)
print(state_to_map)
def simulation(self,state,action):
if state not in self.state_to_map :
print("exception: illegal state")
exit(0)
m_state = self.state_to_map[state]
nextstate = self.__execute(m_state,action)
c = 0
r = 0.0
if self.isLegalState(nextstate):
c , r = self.getReward(nextstate)
else:
r = self.norReward
c = 0
nextstate = m_state
if nextstate in self.walls :
nextstate = m_state
return [state,action,self.map_to_state[nextstate],r,c]
def __execute(self,state,action):
if action == 0 :
nextstate = state - self.rows
elif action == 1 :
nextstate = state + self.rows
elif action == 2 :
nextstate = state - 1
elif action == 3 :
nextstate = state + 1
else:
nextstate = state
return nextstate
def
|
(self,state):
if state < 0 or state >= self.rows * self.cols:
return False
state = self.state_to_map[state]
row = state / self.cols
col = state - row * self.cols
if self.map[row][col] == 'g':
return True
return False
def getReward(self,state):
if state in self.targets :
return (-1,self.targetReward)
elif state in self.traps :
return (-1,self.trapReward)
elif state in self.walls :
return (0,self.wallReward)
else:
return (0,self.norReward)
def isLegalState(self,state):
if state >= 0 and state <= self.rows * self.cols - 1 :
return True
else:
return False
def state_to_coordinate(self,state):
if self.isLegalState(state) :
row = state / self.cols
col = state - self.cols * row
return (row+1,col+1)
else:
return (None,None)
def setMap(self,map):
self.Map = map
self.Map.gridsize = self.Map.gridsize * self.Map.grids
self.rows = int(self.Map.height / self.Map.grids) + 1
self.cols = int(self.Map.width / self.Map.grids) + 1
self.stateNumber = self.cols * self.rows
print(self.Map.height,self.Map.width)
print(self.rows,self.cols)
print(self.stateNumber)
print(self.Map.turtlebotPosition)
print(self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition)))
def getPosition(self):
if self.Map == None :
return None
else:
return self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition))
def doAction(self,action):
if action == 0 :
x = 0
y = 1
elif action == 1 :
x = 0
y = -1
elif action == 2 :
x = -1
y = 0
elif action == 3 :
x = 1
y = 0
else:
x = y = 0
return x , y
#把机器人发来的动作转换成地图中的下一个目标位置,然后发送给move_base
#同时接收返回的数据,包括reward、当前位置等,然后通过一定的reward机制评定最后的reward,最后返回给机器人
def doSimulation(self,action):
x , y = self.doAction(action)
print("action ",self.actionmap[action])
state = self.Map.sendGoal(x,y)
if state != None :
state.Print()
# time.sleep(1)
state1 = self.getStateNum(state.curCell)
state2 = self.getStateNum(state.realCell)
realstate = self.getStateNum(state.realCell)
targetCell = self.getCellFromStateNum(self.Target)
diff1 = math.fabs(targetCell.X-state.curCell.X) + math.fabs(targetCell.Y-state.curCell.Y)
diff2 = math.fabs(targetCell.X-state.realCell.X) + math.fabs(targetCell.Y-state.realCell.Y)
addreward = 0
extraReward = 0.0
if diff2 >= diff1 :
extraReward += 10
#原地不懂,惩罚,表明可能遇到了障碍物
if state1 == realstate :
extraReward += 10
#离目标越近,reward越高
if math.fabs( targetCell.X - state.curCell.X ) > math.fabs(targetCell.X - state.realCell.X) :
addreward += 20
if math.fabs(targetCell.X - state.realCell.X) <= 2 :
addreward += 11
self.changeRewardOnPath(40)
elif math.fabs(targetCell.X - state.realCell.X) <= 3 :
addreward += 9
self.changeRewardOnPath(30)
elif math.fabs(targetCell.X - state.realCell.X) <= 4 :
addreward += 7
self.changeRewardOnPath(20)
elif math.fabs(targetCell.X - state.realCell.X) <= 5 :
addreward += 5
self.changeRewardOnPath(15)
elif math.fabs(targetCell.X - state.realCell.X) <= 6 :
addreward += 5
self.changeRewardOnPath(10)
if math.fabs( targetCell.X - state.curCell.X ) < math.fabs(targetCell.X - state.realCell.X) :
addreward -= 50
if math.fabs( targetCell.Y - state.curCell.Y ) > math.fabs(targetCell.Y - state.realCell.Y) :
addreward += 20
if math.fabs(targetCell.Y - state.realCell.Y) <= 1 :
addreward += 5
if math.fabs( targetCell.Y - state.curCell.Y ) < math.fabs(targetCell.Y - state.realCell.Y) :
addreward -40
state.reward -= extraReward
state.reward += addreward
print("a reward ",state.reward)
self.path.append([state1,action,state2,state.reward,state.c])
flag = self.checkGoal(state2)
if flag :
for i in range(len(self.path)):
self.path[i][3] += 10
if flag :
return realstate , False , self.path , state.reward , action
else:
return realstate , True , self.path , state.reward , action
def changeRewardOnPath(self,reward,rate=0.5,axis=0):
for i in range(len(self.path)-1,-1,-1):
cell1 = self.getCellFromStateNum(self.path[i][0])
cell2 = self.getCellFromStateNum(self.path[i][2])
targetCell = self.getCellFromStateNum(self.Target)
if math.fabs( targetCell.X - cell1.X ) > math.fabs(targetCell.X - cell2.X) :
self.path[i][3] += reward
if reward > 5 :
reward = reward * rate
else:
reward = 5
def checkGoal(self,state):
if state == self.Target :
print("get target ",self.getCellFromStateNum(self.Target))
return True
return False
def clearPath(self):
self.path = []
#根据地图中的cell获取状态编号
def getStateNum(self,cell):
num = cell.Y * self.cols + cell.X
return num
#根据状态编号获取地图中cell
def getCellFromStateNum(self,num):
y = num / self.cols
x = num - self.cols * y
cell = Cell(x,y)
return cell
def getPositionStateNum(self
|
isTarget
|
identifier_name
|
world.py
|
]
self.legalstate = filter(lambda x:not ( x in self.traps or x in self.targets or x in self.walls ) , [ i for i in range(self.rows * self.cols)])
# print(self.legalstate)
self.actionmap = {
0:'up',
1:'down',
2:'left',
3:'right'
}
self.targetReward = 1.0
self.trapReward = -1.0
self.wallReward = -0.5
self.norReward = -0.1
self.map_to_state , self.state_to_map = self.init()
self.stateNumber = len(self.map_to_state)
self.legalstate = map(lambda x:self.map_to_state[x],self.legalstate)
# print(self.legalstate)
def init(self):
map_to_state = {}
state_to_map = {}
mapindex = 0
stateindex = 0
for i in range(self.rows) :
for j in range(self.cols) :
if self.map[i][j] != 'b' :
map_to_state[mapindex] = stateindex
state_to_map[stateindex] = mapindex
stateindex += 1
mapindex += 1
return map_to_state , state_to_map
print(len(state_to_map))
print(map_to_state)
print(state_to_map)
def simulation(self,state,action):
if state not in self.state_to_map :
print("exception: illegal state")
exit(0)
m_state = self.state_to_map[state]
nextstate = self.__execute(m_state,action)
c = 0
r = 0.0
if self.isLegalState(nextstate):
c , r = self.getReward(nextstate)
else:
r = self.norReward
c = 0
nextstate = m_state
if nextstate in self.walls :
nextstate = m_state
return [state,action,self.map_to_state[nextstate],r,c]
def __execute(self,state,action):
if action == 0 :
nextstate = state - self.rows
elif action == 1 :
nextstate = state + self.rows
elif action == 2 :
nextstate = state - 1
elif action == 3 :
nextstate = state + 1
else:
nextstate = state
return nextstate
def isTarget(self,state):
if state < 0 or state >= self.rows * self.cols:
return False
state = self.state_to_map[state]
row = state / self.cols
col = state - row * self.cols
if self.map[row][col] == 'g':
return True
return False
def getReward(self,state):
if state in self.targets :
return (-1,self.targetReward)
elif state in self.traps :
return (-1,self.trapReward)
elif state in self.walls :
return (0,self.wallReward)
else:
return (0,self.norReward)
def isLegalState(self,state):
if state >= 0 and state <= self.rows * self.cols - 1 :
return True
else:
return False
def state_to_coordinate(self,state):
if self.isLegalState(state) :
row = state / self.cols
col = state - self.cols * row
return (row+1,col+1)
else:
return (None,None)
def setMap(self,map):
self.Map = map
self.Map.gridsize = self.Map.gridsize * self.Map.grids
self.rows = int(self.Map.height / self.Map.grids) + 1
self.cols = int(self.Map.width / self.Map.grids) + 1
self.stateNumber = self.cols * self.rows
print(self.Map.height,self.Map.width)
print(self.rows,self.cols)
print(self.stateNumber)
print(self.Map.turtlebotPosition)
print(self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition)))
def getPosition(self):
if self.Map == None :
return None
else:
return self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition))
def doAction(self,action):
if action == 0 :
x = 0
y = 1
elif action == 1 :
x = 0
y = -1
elif action == 2 :
x = -1
y = 0
elif action == 3 :
x = 1
y = 0
else:
x = y = 0
return x , y
#把机器人发来的动作转换成地图中的下一个目标位置,然后发送给move_base
#同时接收返回的数据,包括reward、当前位置等,然后通过一定的reward机制评定最后的reward,最后返回给机器人
def doSimulation(self,action):
x , y = self.doAction(action)
print("action ",self.actionmap[action])
state = self.Map.sendGoal(x,y)
if state != None :
state.Print()
# time.sleep(1)
state1 = self.getStateNum(state.curCell)
state2 = self.getStateNum(state.realCell)
realstate = self.getStateNum(state.realCell)
targetCell = self.getCellFromStateNum(self.Target)
diff1 = math.fabs(targetCell.X-state.curCell.X) + math.fabs(targetCell.Y-state.curCell.Y)
diff2 = math.fabs(targetCell.X-state.realCell.X) + math.fabs(targetCell.Y-state.realCell.Y)
addreward = 0
extraReward = 0.0
if diff2 >= diff1 :
extraReward += 10
#原地不懂,惩罚,表明可能遇到了障碍物
if state1 == realstate :
extraReward += 10
#离目标越近,reward越高
if math.fabs( targetCell.X - state.curCell.X ) > math.fabs(targetCell.X - state.realCell.X) :
addreward += 20
if math.fabs(targetCell.X - state.realCell.X) <= 2 :
addreward += 11
self.changeRewardOnPath(40)
elif math.fabs(targetCell.X - state.realCell.X) <= 3 :
addreward += 9
self.changeRewardOnPath(30)
elif math.fabs(targetCell.X - state.realCell.X) <= 4 :
addreward += 7
self.changeRewardOnPath(20)
elif math.fabs(targetCell.X - state.realCell.X) <= 5 :
addreward += 5
self.changeRewardOnPath(15)
elif math.fabs(targetCell.X - state.realCell.X) <= 6 :
addreward += 5
self.changeRewardOnPath(10)
if math.fabs( targetCell.X - state.curCell.X ) < math.fabs(targetCell.X - state.realCell.X) :
addrewar
|
ll.Y ) > math.fabs(targetCell.Y - state.realCell.Y) :
addreward += 20
if math.fabs(targetCell.Y - state.realCell.Y) <= 1 :
addreward += 5
if math.fabs( targetCell.Y - state.curCell.Y ) < math.fabs(targetCell.Y - state.realCell.Y) :
addreward -40
state.reward -= extraReward
state.reward += addreward
print("a reward ",state.reward)
self.path.append([state1,action,state2,state.reward,state.c])
flag = self.checkGoal(state2)
if flag :
for i in range(len(self.path)):
self.path[i][3] += 10
if flag :
return realstate , False , self.path , state.reward , action
else:
return realstate , True , self.path , state.reward , action
def changeRewardOnPath(self,reward,rate=0.5,axis=0):
for i in range(len(self.path)-1,-1,-1):
cell1 = self.getCellFromStateNum(self.path[i][0])
cell2 = self.getCellFromStateNum(self.path[i][2])
targetCell = self.getCellFromStateNum(self.Target)
if math.fabs( targetCell.X - cell1.X ) > math.fabs(targetCell.X - cell2.X) :
self.path[i][3] += reward
if reward > 5 :
reward = reward * rate
else:
reward = 5
def checkGoal(self,state):
if state == self.Target :
print("get target ",self.getCellFromStateNum(self.Target))
return True
return False
def clearPath(self):
self.path = []
#根据地图中的cell获取状态编号
def getStateNum(self,cell):
num = cell.Y * self.cols + cell.X
return num
#根据状态编号获取地图中cell
def getCellFromStateNum(self,num):
y = num / self.cols
x = num - self.cols * y
cell = Cell(x,y)
return cell
def getPositionState
|
d -= 50
if math.fabs( targetCell.Y - state.curCe
|
conditional_block
|
world.py
|
.legalstate = filter(lambda x:not ( x in self.traps or x in self.targets or x in self.walls ) , [ i for i in range(self.rows * self.cols)])
# print(self.legalstate)
self.actionmap = {
0:'up',
1:'down',
2:'left',
3:'right'
}
self.targetReward = 1.0
self.trapReward = -1.0
self.wallReward = -0.5
self.norReward = -0.1
self.map_to_state , self.state_to_map = self.init()
self.stateNumber = len(self.map_to_state)
self.legalstate = map(lambda x:self.map_to_state[x],self.legalstate)
# print(self.legalstate)
def init(self):
map_to_state = {}
state_to_map = {}
mapindex = 0
stateindex = 0
for i in range(self.rows) :
for j in range(self.cols) :
if self.map[i][j] != 'b' :
map_to_state[mapindex] = stateindex
state_to_map[stateindex] = mapindex
stateindex += 1
mapindex += 1
return map_to_state , state_to_map
print(len(state_to_map))
print(map_to_state)
print(state_to_map)
def simulation(self,state,action):
if state not in self.state_to_map :
print("exception: illegal state")
exit(0)
m_state = self.state_to_map[state]
nextstate = self.__execute(m_state,action)
c = 0
r = 0.0
if self.isLegalState(nextstate):
c , r = self.getReward(nextstate)
else:
r = self.norReward
c = 0
nextstate = m_state
if nextstate in self.walls :
nextstate = m_state
return [state,action,self.map_to_state[nextstate],r,c]
def __execute(self,state,action):
if action == 0 :
nextstate = state - self.rows
elif action == 1 :
nextstate = state + self.rows
elif action == 2 :
nextstate = state - 1
elif action == 3 :
nextstate = state + 1
else:
nextstate = state
return nextstate
def isTarget(self,state):
if state < 0 or state >= self.rows * self.cols:
return False
state = self.state_to_map[state]
row = state / self.cols
col = state - row * self.cols
if self.map[row][col] == 'g':
return True
return False
def getReward(self,state):
if state in self.targets :
return (-1,self.targetReward)
elif state in self.traps :
return (-1,self.trapReward)
elif state in self.walls :
return (0,self.wallReward)
else:
return (0,self.norReward)
def isLegalState(self,state):
if state >= 0 and state <= self.rows * self.cols - 1 :
return True
else:
return False
def state_to_coordinate(self,state):
if self.isLegalState(state) :
row = state / self.cols
col = state - self.cols * row
return (row+1,col+1)
else:
return (None,None)
def setMap(self,map):
self.Map = map
self.Map.gridsize = self.Map.gridsize * self.Map.grids
self.rows = int(self.Map.height / self.Map.grids) + 1
self.cols = int(self.Map.width / self.Map.grids) + 1
self.stateNumber = self.cols * self.rows
print(self.Map.height,self.Map.width)
print(self.rows,self.cols)
print(self.stateNumber)
print(self.Map.turtlebotPosition)
print(self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition)))
def getPosition(self):
if self.Map == None :
return None
else:
return self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition))
def doAction(self,action):
if action == 0 :
x = 0
y = 1
elif action == 1 :
x = 0
y = -1
elif action == 2 :
x = -1
y = 0
elif action == 3 :
x = 1
y = 0
else:
x = y = 0
return x , y
#把机器人发来的动作转换成地图中的下一个目标位置,然后发送给move_base
#同时接收返回的数据,包括reward、当前位置等,然后通过一定的reward机制评定最后的reward,最后返回给机器人
def doSimulation(self,action):
x , y = self.doAction(action)
print("action ",self.actionmap[action])
state = self.Map.sendGoal(x,y)
if state != None :
state.Print()
# time.sleep(1)
state1 = self.getStateNum(state.curCell)
state2 = self.getStateNum(state.realCell)
realstate = self.getStateNum(state.realCell)
targetCell = self.getCellFromStateNum(self.Target)
diff1 = math.fabs(targetCell.X-state.curCell.X) + math.fabs(targetCell.Y-state.curCell.Y)
diff2 = math.fabs(targetCell.X-state.realCell.X) + math.fabs(targetCell.Y-state.realCell.Y)
addreward = 0
extraReward = 0.0
if diff2 >= diff1 :
extraReward += 10
#原地不懂,惩罚,表明可能遇到了障碍物
if state1 == realstate :
extraReward += 10
#离目标越近,reward越高
if math.fabs( targetCell.X - state.curCell.X ) > math.fabs(targetCell.X - state.realCell.X) :
addreward += 20
if math.fabs(targetCell.X - state.realCell.X) <= 2 :
addreward += 11
self.changeRewardOnPath(40)
elif math.fabs(targetCell.X - state.realCell.X) <= 3 :
addreward += 9
self.changeRewardOnPath(30)
elif math.fabs(targetCell.X - state.realCell.X) <= 4 :
addreward += 7
self.changeRewardOnPath(20)
elif math.fabs(targetCell.X - state.realCell.X) <= 5 :
addreward += 5
self.changeRewardOnPath(15)
elif math.fabs(targetCell.X - state.realCell.X) <= 6 :
addreward += 5
self.changeRewardOnPath(10)
if math.fabs( targetCell.X - state.curCell.X ) < math.fabs(targetCell.X - state.realCell.X) :
addreward -= 50
if math.fabs( targetCell.Y - state.curCell.Y ) > math.fabs(targetCell.Y - state.realCell.Y) :
addreward += 20
if math.fabs(targetCell.Y - state.realCell.Y) <= 1 :
addreward += 5
if math.fabs( targetCell.Y - state.curCell.Y ) < math.fabs(targetCell.Y - state.realCell.Y) :
addreward -40
state.reward -= extraReward
state.reward += addreward
print("a reward ",state.reward)
self.path.append([state1,action,state2,state.reward,state.c])
flag = self.checkGoal(state2)
if flag :
for i in range(len(self.path)):
self.path[i][3] += 10
if flag :
return realstate , False , self.path , state.reward , action
else:
return realstate , True , self.path , state.reward , action
def changeRewardOnPath(self,reward,rate=0.5,axis=0):
for i in range(len(self.path)-1,-1,-1):
cell1 = self.getCellFromStateNum(self.path[i][0])
cell2 = self.getCellFromStateNum(self.path[i][2])
targetCell = self.getCellFromStateNum(self.Target)
if math.fabs( targetCell.X - cell1.X ) > math.fabs(targetCell.X - cell2.X) :
self.path[i][3] += reward
if reward > 5 :
reward = reward * rate
else:
reward = 5
def checkGoal(self,state):
if state == self.Target :
print("get target ",self.getCellFromStateNum(self.Target))
return True
return False
def clearPath(self):
self.path = []
#根据地图中的cell获取状态编号
def getStateNum(self,cell):
num = cell.Y * self.cols + cell.X
return num
#根据状态编号获取地图中cell
def getCellFromStateNum(self,num):
y = num / self.cols
x = num - self.cols * y
cell = Cell(x,y)
ret
|
urn cell
def getPositionStateNum(self,pos):
|
identifier_body
|
|
sheetDocument.ts
|
arrays);
class SheetDocument {
pages: SheetPage[];
constructor (fields: Partial<SheetDocument>, {initialize = true} = {}) {
Object.assign(this, fields);
if (initialize)
this.updateTokenIndex();
}
get systems (): SheetSystem[] {
return [].concat(...this.pages.map(page => page.systems));
}
// DEPRECATED
get rows (): SheetSystem[] {
return this.systems;
}
get trackCount (): number{
return Math.max(...this.systems.map(system => system.staves.length), 0);
}
get pageSize (): {width: number, height: number} {
const page = this.pages && this.pages[0];
if (!page)
return null;
return {
width: parseUnitExp(page.width),
height: parseUnitExp(page.height),
};
}
updateTokenIndex () {
// remove null pages for broken document
this.pages = this.pages.filter(page => page);
this.pages.forEach((page, index) => page.systems.forEach(system => system.pageIndex = index));
let rowMeasureIndex = 1;
this.systems.forEach((system, index) => {
system.index = index;
system.width = system.tokens.concat(...system.staves.map(staff => staff.tokens))
.reduce((max, token) => Math.max(max, token.x), 0);
system.measureIndices = [];
system.staves = system.staves.filter(s => s);
system.staves.forEach((staff, t) => {
staff.measures.forEach((measure, i) => {
measure.index = rowMeasureIndex + i;
measure.class = {};
measure.tokens.forEach(token => {
token.system = index;
token.measure = measure.index;
token.endX = measure.noteRange.end;
});
measure.lineX = measure.lineX || 0;
if (i < staff.measures.length - 1)
staff.measures[i + 1].lineX = measure.noteRange.end;
if (t === 0)
system.measureIndices.push([measure.noteRange.end, measure.index]);
});
staff.markings = [];
staff.yRoundOffset = 0;
const line = staff.tokens.find(token => token.is("STAFF_LINE"));
if (line)
staff.yRoundOffset = line.y - line.ry;
});
rowMeasureIndex += Math.max(...system.staves.map(staff => staff.measures.length));
});
}
updateMatchedTokens (matchedIds: Set<string>) {
this.systems.forEach(system => {
system.staves.forEach(staff =>
staff.measures.forEach(measure => {
measure.matchedTokens = measure.tokens.filter(token => token.href && matchedIds.has(token.href));
if (!staff.yRoundOffset) {
const token = measure.matchedTokens[0];
if (token)
staff.yRoundOffset = token.y - token.ry;
}
}));
});
}
addMarking (systemIndex: number, staffIndex: number, data: Partial<SheetMarkingData>): SheetMarking {
const system = this.systems[systemIndex];
if (!system) {
console.warn("system index out of range:", systemIndex, this.systems.length);
return;
}
const staff = system.staves[staffIndex];
if (!staff) {
console.warn("staff index out of range:", staffIndex, system.staves.length);
return;
}
const marking = new SheetMarking(data);
staff.markings.push(marking);
return marking;
}
removeMarking (id: string) {
this.systems.forEach(system => system.staves.forEach(staff =>
staff.markings = staff.markings.filter(marking => marking.id !== id)));
}
clearMarkings () {
this.systems.forEach(system => system.staves.forEach(staff => staff.markings = []));
}
toJSON (): object {
return {
__prototype: "SheetDocument",
pages: this.pages,
};
}
getLocationTable (): MeasureLocationTable {
const table = {};
this.systems.forEach(system => system.staves.forEach(staff => staff.measures.forEach(measure => {
measure.tokens.forEach(token => {
if (token.href) {
const location = token.href.match(/\d+/g);
if (location) {
const [line, column] = location.map(Number);
table[line] = table[line] || {};
table[line][column] = Number.isFinite(table[line][column]) ? Math.min(table[line][column], measure.index) : measure.index;
}
else
console.warn("invalid href:", token.href);
}
});
})));
return table;
}
lookupMeasureIndex (systemIndex: number, x: number): number {
const system = this.systems[systemIndex];
if (!system || !system.measureIndices)
return null;
const [_, index] = system.measureIndices.find(([end]) => x < end) || [null, null];
return index;
}
tokensInSystem (systemIndex: number): StaffToken[] {
const system = this.systems[systemIndex];
if (!system)
return null;
return system.staves.reduce((tokens, staff) => {
const translate = token => token.translate({x: staff.x, y: staff.y});
tokens.push(...staff.tokens.map(translate));
staff.measures.forEach(measure => tokens.push(...measure.tokens.map(translate)));
return tokens;
}, [...system.tokens]);
}
tokensInPage (pageIndex: number, {withPageTokens = false} = {}): StaffToken[] {
const page = this.pages[pageIndex];
if (!page)
return null;
return page.systems.reduce((tokens, system) => {
tokens.push(...this.tokensInSystem(system.index).map(token => token.translate({x: system.x, y: system.y})));
return tokens;
}, withPageTokens ? [...page.tokens] : []);
}
fitPageViewbox ({margin = 5, verticalCropOnly = false, pageTokens = false} = {}) {
if (!this.pages || !this.pages.length)
return;
const svgScale = this.pageSize.width / this.pages[0].viewBox.width;
this.pages.forEach((page, i) => {
const rects = page.systems.filter(system => Number.isFinite(system.x + system.width + system.y + system.top + system.bottom))
.map(system => [system.x, system.x + system.width, system.y + system.top, system.y + system.bottom ]);
const tokens = this.tokensInPage(i, {withPageTokens: pageTokens}) || [];
const tokenXs = tokens.map(token => token.x).filter(Number.isFinite);
const tokenYs = tokens.map(token => token.y).filter(Number.isFinite);
//console.debug("tokens:", i, tokens, tokenXs, tokenYs);
if (!rects.length)
return;
|
const bottom = Math.max(...rects.map(rect => rect[3]), ...tokenYs);
const x = verticalCropOnly ? page.viewBox.x : left - margin;
const y = (verticalCropOnly && i === 0) ? page.viewBox.y : top - margin;
const width = verticalCropOnly ? page.viewBox.width : right - left + margin * 2;
const height = (verticalCropOnly && i === 0) ? bottom + margin - y : bottom - top + margin * 2;
page.viewBox = {x, y, width, height};
page.width = (page.viewBox.width * svgScale).toString();
page.height = (page.viewBox.height * svgScale).toString();
});
}
getTokensOf (symbol: string): StaffToken[] {
return this.systems.reduce((tokens, system) => {
system.staves.forEach(staff => staff.measures.forEach(measure =>
tokens.push(...measure.tokens.filter(token => token.is(symbol)))));
return tokens;
}, []);
}
getNoteHeads (): StaffToken[] {
return this.getTokensOf("NOTEHEAD");
}
getNotes (): StaffToken[] {
return this.getTokensOf("NOTE");
}
getTokenMap (): Map<string, StaffToken> {
return this.systems.reduce((tokenMap, system) => {
system.staves.forEach(staff => staff.measures.forEach(measure => measure.tokens
.filter(token => token.href)
.forEach(token => tokenMap.set(token.href, token))));
return tokenMap;
}, new Map<string, StaffToken>());
}
findTokensAround (token: StaffToken, indices: number[]): StaffToken[] {
const system = this.systems[token.system];
if (system) {
const tokens = [
...system.tokens,
...cc(system.staves.map(staff => [
...staff.tokens,
...cc(staff.measures.map(measure => measure.tokens)),
])),
];
return tokens.filter(token => indices.includes(token.index));
}
return null;
}
findTokenAround (token: StaffToken, index: number
|
const left = Math.min(...rects.map(rect => rect[0]), ...tokenXs);
const right = Math.max(...rects.map(rect => rect[1]), ...tokenXs);
const top = Math.min(...rects.map(rect => rect[2]), ...tokenYs);
|
random_line_split
|
sheetDocument.ts
|
(...arrays);
class SheetDocument {
pages: SheetPage[];
constructor (fields: Partial<SheetDocument>, {initialize = true} = {}) {
Object.assign(this, fields);
if (initialize)
this.updateTokenIndex();
}
get systems (): SheetSystem[] {
return [].concat(...this.pages.map(page => page.systems));
}
// DEPRECATED
get rows (): SheetSystem[] {
return this.systems;
}
get
|
(): number{
return Math.max(...this.systems.map(system => system.staves.length), 0);
}
get pageSize (): {width: number, height: number} {
const page = this.pages && this.pages[0];
if (!page)
return null;
return {
width: parseUnitExp(page.width),
height: parseUnitExp(page.height),
};
}
updateTokenIndex () {
// remove null pages for broken document
this.pages = this.pages.filter(page => page);
this.pages.forEach((page, index) => page.systems.forEach(system => system.pageIndex = index));
let rowMeasureIndex = 1;
this.systems.forEach((system, index) => {
system.index = index;
system.width = system.tokens.concat(...system.staves.map(staff => staff.tokens))
.reduce((max, token) => Math.max(max, token.x), 0);
system.measureIndices = [];
system.staves = system.staves.filter(s => s);
system.staves.forEach((staff, t) => {
staff.measures.forEach((measure, i) => {
measure.index = rowMeasureIndex + i;
measure.class = {};
measure.tokens.forEach(token => {
token.system = index;
token.measure = measure.index;
token.endX = measure.noteRange.end;
});
measure.lineX = measure.lineX || 0;
if (i < staff.measures.length - 1)
staff.measures[i + 1].lineX = measure.noteRange.end;
if (t === 0)
system.measureIndices.push([measure.noteRange.end, measure.index]);
});
staff.markings = [];
staff.yRoundOffset = 0;
const line = staff.tokens.find(token => token.is("STAFF_LINE"));
if (line)
staff.yRoundOffset = line.y - line.ry;
});
rowMeasureIndex += Math.max(...system.staves.map(staff => staff.measures.length));
});
}
updateMatchedTokens (matchedIds: Set<string>) {
this.systems.forEach(system => {
system.staves.forEach(staff =>
staff.measures.forEach(measure => {
measure.matchedTokens = measure.tokens.filter(token => token.href && matchedIds.has(token.href));
if (!staff.yRoundOffset) {
const token = measure.matchedTokens[0];
if (token)
staff.yRoundOffset = token.y - token.ry;
}
}));
});
}
addMarking (systemIndex: number, staffIndex: number, data: Partial<SheetMarkingData>): SheetMarking {
const system = this.systems[systemIndex];
if (!system) {
console.warn("system index out of range:", systemIndex, this.systems.length);
return;
}
const staff = system.staves[staffIndex];
if (!staff) {
console.warn("staff index out of range:", staffIndex, system.staves.length);
return;
}
const marking = new SheetMarking(data);
staff.markings.push(marking);
return marking;
}
removeMarking (id: string) {
this.systems.forEach(system => system.staves.forEach(staff =>
staff.markings = staff.markings.filter(marking => marking.id !== id)));
}
clearMarkings () {
this.systems.forEach(system => system.staves.forEach(staff => staff.markings = []));
}
toJSON (): object {
return {
__prototype: "SheetDocument",
pages: this.pages,
};
}
getLocationTable (): MeasureLocationTable {
const table = {};
this.systems.forEach(system => system.staves.forEach(staff => staff.measures.forEach(measure => {
measure.tokens.forEach(token => {
if (token.href) {
const location = token.href.match(/\d+/g);
if (location) {
const [line, column] = location.map(Number);
table[line] = table[line] || {};
table[line][column] = Number.isFinite(table[line][column]) ? Math.min(table[line][column], measure.index) : measure.index;
}
else
console.warn("invalid href:", token.href);
}
});
})));
return table;
}
lookupMeasureIndex (systemIndex: number, x: number): number {
const system = this.systems[systemIndex];
if (!system || !system.measureIndices)
return null;
const [_, index] = system.measureIndices.find(([end]) => x < end) || [null, null];
return index;
}
tokensInSystem (systemIndex: number): StaffToken[] {
const system = this.systems[systemIndex];
if (!system)
return null;
return system.staves.reduce((tokens, staff) => {
const translate = token => token.translate({x: staff.x, y: staff.y});
tokens.push(...staff.tokens.map(translate));
staff.measures.forEach(measure => tokens.push(...measure.tokens.map(translate)));
return tokens;
}, [...system.tokens]);
}
tokensInPage (pageIndex: number, {withPageTokens = false} = {}): StaffToken[] {
const page = this.pages[pageIndex];
if (!page)
return null;
return page.systems.reduce((tokens, system) => {
tokens.push(...this.tokensInSystem(system.index).map(token => token.translate({x: system.x, y: system.y})));
return tokens;
}, withPageTokens ? [...page.tokens] : []);
}
fitPageViewbox ({margin = 5, verticalCropOnly = false, pageTokens = false} = {}) {
if (!this.pages || !this.pages.length)
return;
const svgScale = this.pageSize.width / this.pages[0].viewBox.width;
this.pages.forEach((page, i) => {
const rects = page.systems.filter(system => Number.isFinite(system.x + system.width + system.y + system.top + system.bottom))
.map(system => [system.x, system.x + system.width, system.y + system.top, system.y + system.bottom ]);
const tokens = this.tokensInPage(i, {withPageTokens: pageTokens}) || [];
const tokenXs = tokens.map(token => token.x).filter(Number.isFinite);
const tokenYs = tokens.map(token => token.y).filter(Number.isFinite);
//console.debug("tokens:", i, tokens, tokenXs, tokenYs);
if (!rects.length)
return;
const left = Math.min(...rects.map(rect => rect[0]), ...tokenXs);
const right = Math.max(...rects.map(rect => rect[1]), ...tokenXs);
const top = Math.min(...rects.map(rect => rect[2]), ...tokenYs);
const bottom = Math.max(...rects.map(rect => rect[3]), ...tokenYs);
const x = verticalCropOnly ? page.viewBox.x : left - margin;
const y = (verticalCropOnly && i === 0) ? page.viewBox.y : top - margin;
const width = verticalCropOnly ? page.viewBox.width : right - left + margin * 2;
const height = (verticalCropOnly && i === 0) ? bottom + margin - y : bottom - top + margin * 2;
page.viewBox = {x, y, width, height};
page.width = (page.viewBox.width * svgScale).toString();
page.height = (page.viewBox.height * svgScale).toString();
});
}
getTokensOf (symbol: string): StaffToken[] {
return this.systems.reduce((tokens, system) => {
system.staves.forEach(staff => staff.measures.forEach(measure =>
tokens.push(...measure.tokens.filter(token => token.is(symbol)))));
return tokens;
}, []);
}
getNoteHeads (): StaffToken[] {
return this.getTokensOf("NOTEHEAD");
}
getNotes (): StaffToken[] {
return this.getTokensOf("NOTE");
}
getTokenMap (): Map<string, StaffToken> {
return this.systems.reduce((tokenMap, system) => {
system.staves.forEach(staff => staff.measures.forEach(measure => measure.tokens
.filter(token => token.href)
.forEach(token => tokenMap.set(token.href, token))));
return tokenMap;
}, new Map<string, StaffToken>());
}
findTokensAround (token: StaffToken, indices: number[]): StaffToken[] {
const system = this.systems[token.system];
if (system) {
const tokens = [
...system.tokens,
...cc(system.staves.map(staff => [
...staff.tokens,
...cc(staff.measures.map(measure => measure.tokens)),
])),
];
return tokens.filter(token => indices.includes(token.index));
}
return null;
}
findTokenAround (token: StaffToken, index
|
trackCount
|
identifier_name
|
sw_06_cv_functions.py
|
eresis thresholding using some bitwise magic.
"""
print("BEFORE HYSTERISIS THRESHOLDING:", image)
print("gradients:", image_gradients)
largest_gradient_value = np.max(image_gradients)
while largest_gradient_value < max_val:
print("Largest gradient value:", largest_gradient_value)
warnings.warn(UserWarning("Image has no edge gradients above upper threshold, increasing all gradients values!"))
# return np.zeros_like(image)
image_gradients *= 1.5
largest_gradient_value = np.max(image_gradients)
# print("Largest gradient value:", largest_gradient_value)
# the set of all 'strong' indices.
strong_indices = indices_where(image_gradients >= max_val)
off_indices = indices_where(image_gradients < min_val)
weak_indices = indices_where((min_val <= image_gradients) & (image_gradients < max_val))
image_height = image.shape[0]
image_width = image.shape[1]
# get the neighbours of all strong edges.
# convert their neighbours with weak edges to strong edges.
to_explore = np.zeros_like(image_gradients, dtype=bool)
to_explore[index_with(strong_indices)] = True
explored = np.zeros_like(image_gradients, dtype=bool)
strong = np.zeros_like(image_gradients, dtype=bool)
strong[index_with(strong_indices)] = True
# print("strong:", strong)
weak = np.zeros_like(image_gradients, dtype=bool)
weak[index_with(weak_indices)] = True
unexplored_indices = aggregate(np.nonzero(to_explore))
# print("unexplored (initial):", [str(v) for v in unexplored])
# print("weak indices (initial):", [str(v) for v in weak_indices])
# print("off indices (initial):", [str(v) for v in off_indices])
already_explored = np.zeros_like(to_explore)
while len(unexplored_indices) > 0:
# print("exploring indices ", [str(v) for v in indices])
# print(indices)
neighbours = neighbourhood(unexplored_indices, image_width, image_height)
is_neighbour = np.zeros_like(weak)
is_neighbour[index_with(neighbours)] = True
is_weak_neighbour = is_neighbour & weak
weak_neighbours = aggregate(np.nonzero(is_weak_neighbour))
# weak_neighbours = common_rows_between(neighbours, weak_indices)
# print("The neighbours of (", ",".join(str(pixel) for pixel in indices), ") are ", neighbours)
# print("weak neighbours:", [str(v) for v in weak_neighbours])
strong[index_with(weak_neighbours)] = True
weak[index_with(weak_neighbours)] = False
# mark that we need to explore these:
already_explored[index_with(unexplored_indices)] = True
# explore the indices of the weak neighbours, if they haven't been explored already.
to_explore[index_with(weak_neighbours)] = True
# do not re-explore already explored indices.
to_explore &= ~already_explored
unexplored_indices = aggregate(np.nonzero(to_explore))
out = np.zeros_like(image_gradients)
out[~strong] = 0
out[strong] = 255
print("AFTER HYSTERISIS THRESHOLDING:", out)
return out
def aggregate(list_of_indices):
|
def indices_where(condition):
return np.concatenate(np.dstack(np.where(condition)))
def index_with(list_of_indices):
return list_of_indices[:, 0], list_of_indices[:, 1]
def neighbourhood(index, image_width, image_height):
"""Returns the coordinates of the neighbours of a given coordinate or list of coordinates.
Arguments:
index {np.ndarray} -- either a list of coordinates (as an ndarray) or a coordinate itself, in the form (i, j)
NOTE: the pixels neighbours are clipped of (image_height-1, )
Returns:
np.ndarray -- ndarray of shape [?, 2], which contains the indices of the neighbouring pixels
"""
neighbourhoods = np.concatenate(np.dstack((np.indices([3,3]) - 1)))
if len(index.shape) == 2:
neighbourhoods = neighbourhoods[:, np.newaxis, :]
neighbours_and_itself = index + neighbourhoods
keep = np.ones(9, dtype=bool)
keep[4] = False # drop the point itself, but keep the neighbours.
neighbours = neighbours_and_itself[keep]
if len(index.shape) == 2:
neighbours = np.stack(neighbours, axis=1)
mask = np.ones_like(neighbours, dtype=bool)
# remove all neighbours that have either a negative value in them
negative = np.where(neighbours < 0)
mask[negative] = False
# or a value equal to image_height in x
greater_than_image_height = np.where(neighbours[..., 0] >= image_height)
mask[greater_than_image_height] = False
# or image_width in z
greater_than_image_width = np.where(neighbours[..., 1] >= image_height)
mask[greater_than_image_width] = False
# or that correspond to an index in 'index'
tiled = np.expand_dims(index, 1)
tiled = np.tile(tiled, (1, neighbours.shape[1], 1))
equal_to_index = np.equal(neighbours, tiled)
equal_to_index = np.all(equal_to_index, axis=-1)
mask[equal_to_index] = False
mask = np.all(mask, axis=-1)
# print(mask)
# for i, (m, n) in enumerate(zip(mask, neighbours)):
# if len(index.shape) == 2:
# for keep, (i, j) in zip(m, n):
# print("point", i, j, "is good:", keep)
# else:
# keep = m
# i, j = n
# print("point", i, j, "is good:", keep)
neighbours = neighbours[mask]
# get rid of duplicates:
neighbours = np.unique(neighbours, axis=0)
return neighbours
# # print(image[row, col])
# min_x = max(i-1, 0)
# max_x = min(i+1, image_w-1)
# min_y = max(j-1, 0)
# max_y = min(j+1, image_h-1)
# indices = set(
# (x, y)
# for x in range(min_x, max_x + 1)
# for y in range(min_y, max_y + 1)
# )
# print(indices)
# indices.discard((i, j))
# return indices
# # return np.array(indices)
def common_rows_between(array_1, array_2):
"""TAKEN FROM https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays
Arguments:
array_1 {np.ndarray} -- a 2d array
array_2 {np.ndarray} -- another 2d array
Returns:
np.ndarray -- a 2d array containing the common rows in both array_1 and array_2.
"""
nrows, ncols = array_1.shape
dtype={
'names': ['f{}'.format(i) for i in range(ncols)],
'formats': ncols * [array_1.dtype]
}
C = np.intersect1d(array_1.view(dtype), array_2.view(dtype))
# This last bit is optional if you're okay with "C" being a structured array...
C = C.view(array_1.dtype).reshape(-1, ncols)
return C
def non_maximum_suppression(image, image_gradients, gradient_directions):
"""Non-maximum suppression
To be honest, I'm very proud of this piece of code. No for-loops were needed.
Arguments:
image {[type]} -- the image to preform non-maximum suppresion on.
gradient_directions {[type]} -- the gradient directions
"""
print("Before non-maximum suppression:", image)
# Get where to check depending on the "direction"
direction_offset_x = np.round(np.cos(gradient_directions)).astype(int)
direction_offset_y = np.round(np.sin(gradient_directions)).astype(int)
direction_offset = np.dstack((direction_offset_x, direction_offset_y))
# the (i, j) indices of all points in the image.
row, col = np.indices(image.shape)
# in order not to cause any indexing errors, we create a
# padded version of the image with the edge values duplicated.
# a pixel at (row, col) in the image is located at (row+1, col+1) in the padded image.
image_ = np.pad(image, 1, mode="edge")
row_, col_ = row + 1, col + 1
# get the image pixels before and after each pixel in the image.
pixel_middle = image[row, col]
pixel_forward = image_[row_ + direction_offset_x, col_ + direction_offset_y]
pixel_backward = image_[row_ - direction_offset_x, col_ -
|
return np.concatenate(np.dstack(list_of_indices))
|
identifier_body
|
sw_06_cv_functions.py
|
eresis thresholding using some bitwise magic.
"""
print("BEFORE HYSTERISIS THRESHOLDING:", image)
print("gradients:", image_gradients)
largest_gradient_value = np.max(image_gradients)
while largest_gradient_value < max_val:
print("Largest gradient value:", largest_gradient_value)
warnings.warn(UserWarning("Image has no edge gradients above upper threshold, increasing all gradients values!"))
# return np.zeros_like(image)
image_gradients *= 1.5
largest_gradient_value = np.max(image_gradients)
# print("Largest gradient value:", largest_gradient_value)
# the set of all 'strong' indices.
strong_indices = indices_where(image_gradients >= max_val)
off_indices = indices_where(image_gradients < min_val)
weak_indices = indices_where((min_val <= image_gradients) & (image_gradients < max_val))
image_height = image.shape[0]
image_width = image.shape[1]
# get the neighbours of all strong edges.
# convert their neighbours with weak edges to strong edges.
to_explore = np.zeros_like(image_gradients, dtype=bool)
to_explore[index_with(strong_indices)] = True
explored = np.zeros_like(image_gradients, dtype=bool)
strong = np.zeros_like(image_gradients, dtype=bool)
strong[index_with(strong_indices)] = True
# print("strong:", strong)
weak = np.zeros_like(image_gradients, dtype=bool)
weak[index_with(weak_indices)] = True
unexplored_indices = aggregate(np.nonzero(to_explore))
# print("unexplored (initial):", [str(v) for v in unexplored])
# print("weak indices (initial):", [str(v) for v in weak_indices])
# print("off indices (initial):", [str(v) for v in off_indices])
already_explored = np.zeros_like(to_explore)
while len(unexplored_indices) > 0:
# print("exploring indices ", [str(v) for v in indices])
# print(indices)
neighbours = neighbourhood(unexplored_indices, image_width, image_height)
is_neighbour = np.zeros_like(weak)
is_neighbour[index_with(neighbours)] = True
is_weak_neighbour = is_neighbour & weak
weak_neighbours = aggregate(np.nonzero(is_weak_neighbour))
# weak_neighbours = common_rows_between(neighbours, weak_indices)
# print("The neighbours of (", ",".join(str(pixel) for pixel in indices), ") are ", neighbours)
# print("weak neighbours:", [str(v) for v in weak_neighbours])
strong[index_with(weak_neighbours)] = True
weak[index_with(weak_neighbours)] = False
# mark that we need to explore these:
already_explored[index_with(unexplored_indices)] = True
# explore the indices of the weak neighbours, if they haven't been explored already.
to_explore[index_with(weak_neighbours)] = True
# do not re-explore already explored indices.
to_explore &= ~already_explored
unexplored_indices = aggregate(np.nonzero(to_explore))
out = np.zeros_like(image_gradients)
out[~strong] = 0
out[strong] = 255
print("AFTER HYSTERISIS THRESHOLDING:", out)
return out
def
|
(list_of_indices):
return np.concatenate(np.dstack(list_of_indices))
def indices_where(condition):
return np.concatenate(np.dstack(np.where(condition)))
def index_with(list_of_indices):
return list_of_indices[:, 0], list_of_indices[:, 1]
def neighbourhood(index, image_width, image_height):
"""Returns the coordinates of the neighbours of a given coordinate or list of coordinates.
Arguments:
index {np.ndarray} -- either a list of coordinates (as an ndarray) or a coordinate itself, in the form (i, j)
NOTE: the pixels neighbours are clipped of (image_height-1, )
Returns:
np.ndarray -- ndarray of shape [?, 2], which contains the indices of the neighbouring pixels
"""
neighbourhoods = np.concatenate(np.dstack((np.indices([3,3]) - 1)))
if len(index.shape) == 2:
neighbourhoods = neighbourhoods[:, np.newaxis, :]
neighbours_and_itself = index + neighbourhoods
keep = np.ones(9, dtype=bool)
keep[4] = False # drop the point itself, but keep the neighbours.
neighbours = neighbours_and_itself[keep]
if len(index.shape) == 2:
neighbours = np.stack(neighbours, axis=1)
mask = np.ones_like(neighbours, dtype=bool)
# remove all neighbours that have either a negative value in them
negative = np.where(neighbours < 0)
mask[negative] = False
# or a value equal to image_height in x
greater_than_image_height = np.where(neighbours[..., 0] >= image_height)
mask[greater_than_image_height] = False
# or image_width in z
greater_than_image_width = np.where(neighbours[..., 1] >= image_height)
mask[greater_than_image_width] = False
# or that correspond to an index in 'index'
tiled = np.expand_dims(index, 1)
tiled = np.tile(tiled, (1, neighbours.shape[1], 1))
equal_to_index = np.equal(neighbours, tiled)
equal_to_index = np.all(equal_to_index, axis=-1)
mask[equal_to_index] = False
mask = np.all(mask, axis=-1)
# print(mask)
# for i, (m, n) in enumerate(zip(mask, neighbours)):
# if len(index.shape) == 2:
# for keep, (i, j) in zip(m, n):
# print("point", i, j, "is good:", keep)
# else:
# keep = m
# i, j = n
# print("point", i, j, "is good:", keep)
neighbours = neighbours[mask]
# get rid of duplicates:
neighbours = np.unique(neighbours, axis=0)
return neighbours
# # print(image[row, col])
# min_x = max(i-1, 0)
# max_x = min(i+1, image_w-1)
# min_y = max(j-1, 0)
# max_y = min(j+1, image_h-1)
# indices = set(
# (x, y)
# for x in range(min_x, max_x + 1)
# for y in range(min_y, max_y + 1)
# )
# print(indices)
# indices.discard((i, j))
# return indices
# # return np.array(indices)
def common_rows_between(array_1, array_2):
"""TAKEN FROM https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays
Arguments:
array_1 {np.ndarray} -- a 2d array
array_2 {np.ndarray} -- another 2d array
Returns:
np.ndarray -- a 2d array containing the common rows in both array_1 and array_2.
"""
nrows, ncols = array_1.shape
dtype={
'names': ['f{}'.format(i) for i in range(ncols)],
'formats': ncols * [array_1.dtype]
}
C = np.intersect1d(array_1.view(dtype), array_2.view(dtype))
# This last bit is optional if you're okay with "C" being a structured array...
C = C.view(array_1.dtype).reshape(-1, ncols)
return C
def non_maximum_suppression(image, image_gradients, gradient_directions):
"""Non-maximum suppression
To be honest, I'm very proud of this piece of code. No for-loops were needed.
Arguments:
image {[type]} -- the image to preform non-maximum suppresion on.
gradient_directions {[type]} -- the gradient directions
"""
print("Before non-maximum suppression:", image)
# Get where to check depending on the "direction"
direction_offset_x = np.round(np.cos(gradient_directions)).astype(int)
direction_offset_y = np.round(np.sin(gradient_directions)).astype(int)
direction_offset = np.dstack((direction_offset_x, direction_offset_y))
# the (i, j) indices of all points in the image.
row, col = np.indices(image.shape)
# in order not to cause any indexing errors, we create a
# padded version of the image with the edge values duplicated.
# a pixel at (row, col) in the image is located at (row+1, col+1) in the padded image.
image_ = np.pad(image, 1, mode="edge")
row_, col_ = row + 1, col + 1
# get the image pixels before and after each pixel in the image.
pixel_middle = image[row, col]
pixel_forward = image_[row_ + direction_offset_x, col_ + direction_offset_y]
pixel_backward = image_[row_ - direction_offset_x, col_ - direction
|
aggregate
|
identifier_name
|
sw_06_cv_functions.py
|
# do not re-explore already explored indices.
to_explore &= ~already_explored
unexplored_indices = aggregate(np.nonzero(to_explore))
out = np.zeros_like(image_gradients)
out[~strong] = 0
out[strong] = 255
print("AFTER HYSTERISIS THRESHOLDING:", out)
return out
def aggregate(list_of_indices):
return np.concatenate(np.dstack(list_of_indices))
def indices_where(condition):
return np.concatenate(np.dstack(np.where(condition)))
def index_with(list_of_indices):
return list_of_indices[:, 0], list_of_indices[:, 1]
def neighbourhood(index, image_width, image_height):
"""Returns the coordinates of the neighbours of a given coordinate or list of coordinates.
Arguments:
index {np.ndarray} -- either a list of coordinates (as an ndarray) or a coordinate itself, in the form (i, j)
NOTE: the pixels neighbours are clipped of (image_height-1, )
Returns:
np.ndarray -- ndarray of shape [?, 2], which contains the indices of the neighbouring pixels
"""
neighbourhoods = np.concatenate(np.dstack((np.indices([3,3]) - 1)))
if len(index.shape) == 2:
neighbourhoods = neighbourhoods[:, np.newaxis, :]
neighbours_and_itself = index + neighbourhoods
keep = np.ones(9, dtype=bool)
keep[4] = False # drop the point itself, but keep the neighbours.
neighbours = neighbours_and_itself[keep]
if len(index.shape) == 2:
neighbours = np.stack(neighbours, axis=1)
mask = np.ones_like(neighbours, dtype=bool)
# remove all neighbours that have either a negative value in them
negative = np.where(neighbours < 0)
mask[negative] = False
# or a value equal to image_height in x
greater_than_image_height = np.where(neighbours[..., 0] >= image_height)
mask[greater_than_image_height] = False
# or image_width in z
greater_than_image_width = np.where(neighbours[..., 1] >= image_height)
mask[greater_than_image_width] = False
# or that correspond to an index in 'index'
tiled = np.expand_dims(index, 1)
tiled = np.tile(tiled, (1, neighbours.shape[1], 1))
equal_to_index = np.equal(neighbours, tiled)
equal_to_index = np.all(equal_to_index, axis=-1)
mask[equal_to_index] = False
mask = np.all(mask, axis=-1)
# print(mask)
# for i, (m, n) in enumerate(zip(mask, neighbours)):
# if len(index.shape) == 2:
# for keep, (i, j) in zip(m, n):
# print("point", i, j, "is good:", keep)
# else:
# keep = m
# i, j = n
# print("point", i, j, "is good:", keep)
neighbours = neighbours[mask]
# get rid of duplicates:
neighbours = np.unique(neighbours, axis=0)
return neighbours
# # print(image[row, col])
# min_x = max(i-1, 0)
# max_x = min(i+1, image_w-1)
# min_y = max(j-1, 0)
# max_y = min(j+1, image_h-1)
# indices = set(
# (x, y)
# for x in range(min_x, max_x + 1)
# for y in range(min_y, max_y + 1)
# )
# print(indices)
# indices.discard((i, j))
# return indices
# # return np.array(indices)
def common_rows_between(array_1, array_2):
"""TAKEN FROM https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays
Arguments:
array_1 {np.ndarray} -- a 2d array
array_2 {np.ndarray} -- another 2d array
Returns:
np.ndarray -- a 2d array containing the common rows in both array_1 and array_2.
"""
nrows, ncols = array_1.shape
dtype={
'names': ['f{}'.format(i) for i in range(ncols)],
'formats': ncols * [array_1.dtype]
}
C = np.intersect1d(array_1.view(dtype), array_2.view(dtype))
# This last bit is optional if you're okay with "C" being a structured array...
C = C.view(array_1.dtype).reshape(-1, ncols)
return C
def non_maximum_suppression(image, image_gradients, gradient_directions):
"""Non-maximum suppression
To be honest, I'm very proud of this piece of code. No for-loops were needed.
Arguments:
image {[type]} -- the image to preform non-maximum suppresion on.
gradient_directions {[type]} -- the gradient directions
"""
print("Before non-maximum suppression:", image)
# Get where to check depending on the "direction"
direction_offset_x = np.round(np.cos(gradient_directions)).astype(int)
direction_offset_y = np.round(np.sin(gradient_directions)).astype(int)
direction_offset = np.dstack((direction_offset_x, direction_offset_y))
# the (i, j) indices of all points in the image.
row, col = np.indices(image.shape)
# in order not to cause any indexing errors, we create a
# padded version of the image with the edge values duplicated.
# a pixel at (row, col) in the image is located at (row+1, col+1) in the padded image.
image_ = np.pad(image, 1, mode="edge")
row_, col_ = row + 1, col + 1
# get the image pixels before and after each pixel in the image.
pixel_middle = image[row, col]
pixel_forward = image_[row_ + direction_offset_x, col_ + direction_offset_y]
pixel_backward = image_[row_ - direction_offset_x, col_ - direction_offset_y]
higher_than_forward = pixel_middle > pixel_forward
higher_than_backward = pixel_middle > pixel_backward
is_local_maximum = higher_than_backward & higher_than_forward
out = np.copy(image)
out[~is_local_maximum] = 0
print("AFTER non-maximum suppression: ", out)
return out
def snap_angles(angles):
"""Snaps a given set of angles to one of the horizontal, vertical, or one of the two diagonal orientations.
Arguments:
angles -- an array of anges in radians
"""
pi_over_four = np.pi / 4
return np.round(angles / pi_over_four) * pi_over_four
def image_derivatives(image):
""" Computes the Sobel X and Y operators for this image.
Loosely based on https://en.wikipedia.org/wiki/Sobel_operator
Arguments:
image {[type]} -- [description]
Returns:
[type] -- [description]
"""
sobel_sign = np.array([[-1, 0, 1]])
sobel_mag = np.array([[1, 2, 1]])
temp1 = conv2d(image, sobel_sign)
image_dx = conv2d(temp1, sobel_mag.T)
temp2 = conv2d(image, sobel_mag)
image_dy = conv2d(temp2, -sobel_sign.T)
return image_dx, image_dy
# save these for comparison
image_dx_1, image_dy_1 = image_dx, image_dy
# Slower alternative (from OpenCV docs):
sobel_x = np.array([
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1],
])
image_dx = conv2d(image, sobel_x)
image_dy = conv2d(image, -sobel_x.T)
assert np.all(np.isclose(image_dy, image_dy_1))
assert np.all(np.isclose(image_dx, image_dx_1))
return image_dx, image_dy
def conv2d(x, kernel, stride=1, padding="auto", padding_mode="constant"):
"""
TAKEN AND ADAPTED FROM https://stackoverflow.com/questions/54962004/implement-max-mean-poolingwith-stride-with-numpy
ALSO INSPIRED FROM https://cs231n.github.io/convolutional-networks/
2D Pooling
Parameters:
A: input 2D array
kernel: int, the size of the window
stride: int, the stride of the window
padding: int or string, implicit zero paddings on both sides of the input
"""
# Padding
assert len(kernel.shape) == 2, "kernel should be 2d."
assert kernel.shape[0] % 2 == 1 and kernel.shape[1] % 2 == 1, "only odd-sized kernels are allowed"
kernel_size = kernel.shape[0]
if padding == "auto":
|
padding = np.array(kernel.shape) // 2
|
conditional_block
|
|
sw_06_cv_functions.py
|
ysteresis thresholding using some bitwise magic.
"""
print("BEFORE HYSTERISIS THRESHOLDING:", image)
print("gradients:", image_gradients)
largest_gradient_value = np.max(image_gradients)
while largest_gradient_value < max_val:
print("Largest gradient value:", largest_gradient_value)
warnings.warn(UserWarning("Image has no edge gradients above upper threshold, increasing all gradients values!"))
# return np.zeros_like(image)
image_gradients *= 1.5
largest_gradient_value = np.max(image_gradients)
# print("Largest gradient value:", largest_gradient_value)
# the set of all 'strong' indices.
strong_indices = indices_where(image_gradients >= max_val)
off_indices = indices_where(image_gradients < min_val)
weak_indices = indices_where((min_val <= image_gradients) & (image_gradients < max_val))
image_height = image.shape[0]
image_width = image.shape[1]
# get the neighbours of all strong edges.
# convert their neighbours with weak edges to strong edges.
to_explore = np.zeros_like(image_gradients, dtype=bool)
to_explore[index_with(strong_indices)] = True
explored = np.zeros_like(image_gradients, dtype=bool)
strong = np.zeros_like(image_gradients, dtype=bool)
strong[index_with(strong_indices)] = True
# print("strong:", strong)
weak = np.zeros_like(image_gradients, dtype=bool)
weak[index_with(weak_indices)] = True
unexplored_indices = aggregate(np.nonzero(to_explore))
# print("unexplored (initial):", [str(v) for v in unexplored])
# print("weak indices (initial):", [str(v) for v in weak_indices])
# print("off indices (initial):", [str(v) for v in off_indices])
already_explored = np.zeros_like(to_explore)
while len(unexplored_indices) > 0:
# print("exploring indices ", [str(v) for v in indices])
# print(indices)
neighbours = neighbourhood(unexplored_indices, image_width, image_height)
is_neighbour = np.zeros_like(weak)
is_neighbour[index_with(neighbours)] = True
is_weak_neighbour = is_neighbour & weak
weak_neighbours = aggregate(np.nonzero(is_weak_neighbour))
# weak_neighbours = common_rows_between(neighbours, weak_indices)
# print("The neighbours of (", ",".join(str(pixel) for pixel in indices), ") are ", neighbours)
# print("weak neighbours:", [str(v) for v in weak_neighbours])
strong[index_with(weak_neighbours)] = True
weak[index_with(weak_neighbours)] = False
# mark that we need to explore these:
already_explored[index_with(unexplored_indices)] = True
# explore the indices of the weak neighbours, if they haven't been explored already.
to_explore[index_with(weak_neighbours)] = True
# do not re-explore already explored indices.
to_explore &= ~already_explored
unexplored_indices = aggregate(np.nonzero(to_explore))
out = np.zeros_like(image_gradients)
out[~strong] = 0
out[strong] = 255
print("AFTER HYSTERISIS THRESHOLDING:", out)
return out
def aggregate(list_of_indices):
return np.concatenate(np.dstack(list_of_indices))
def indices_where(condition):
return np.concatenate(np.dstack(np.where(condition)))
def index_with(list_of_indices):
return list_of_indices[:, 0], list_of_indices[:, 1]
def neighbourhood(index, image_width, image_height):
"""Returns the coordinates of the neighbours of a given coordinate or list of coordinates.
Arguments:
index {np.ndarray} -- either a list of coordinates (as an ndarray) or a coordinate itself, in the form (i, j)
|
NOTE: the pixels neighbours are clipped of (image_height-1, )
Returns:
np.ndarray -- ndarray of shape [?, 2], which contains the indices of the neighbouring pixels
"""
neighbourhoods = np.concatenate(np.dstack((np.indices([3,3]) - 1)))
if len(index.shape) == 2:
neighbourhoods = neighbourhoods[:, np.newaxis, :]
neighbours_and_itself = index + neighbourhoods
keep = np.ones(9, dtype=bool)
keep[4] = False # drop the point itself, but keep the neighbours.
neighbours = neighbours_and_itself[keep]
if len(index.shape) == 2:
neighbours = np.stack(neighbours, axis=1)
mask = np.ones_like(neighbours, dtype=bool)
# remove all neighbours that have either a negative value in them
negative = np.where(neighbours < 0)
mask[negative] = False
# or a value equal to image_height in x
greater_than_image_height = np.where(neighbours[..., 0] >= image_height)
mask[greater_than_image_height] = False
# or image_width in z
greater_than_image_width = np.where(neighbours[..., 1] >= image_height)
mask[greater_than_image_width] = False
# or that correspond to an index in 'index'
tiled = np.expand_dims(index, 1)
tiled = np.tile(tiled, (1, neighbours.shape[1], 1))
equal_to_index = np.equal(neighbours, tiled)
equal_to_index = np.all(equal_to_index, axis=-1)
mask[equal_to_index] = False
mask = np.all(mask, axis=-1)
# print(mask)
# for i, (m, n) in enumerate(zip(mask, neighbours)):
# if len(index.shape) == 2:
# for keep, (i, j) in zip(m, n):
# print("point", i, j, "is good:", keep)
# else:
# keep = m
# i, j = n
# print("point", i, j, "is good:", keep)
neighbours = neighbours[mask]
# get rid of duplicates:
neighbours = np.unique(neighbours, axis=0)
return neighbours
# # print(image[row, col])
# min_x = max(i-1, 0)
# max_x = min(i+1, image_w-1)
# min_y = max(j-1, 0)
# max_y = min(j+1, image_h-1)
# indices = set(
# (x, y)
# for x in range(min_x, max_x + 1)
# for y in range(min_y, max_y + 1)
# )
# print(indices)
# indices.discard((i, j))
# return indices
# # return np.array(indices)
def common_rows_between(array_1, array_2):
"""TAKEN FROM https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays
Arguments:
array_1 {np.ndarray} -- a 2d array
array_2 {np.ndarray} -- another 2d array
Returns:
np.ndarray -- a 2d array containing the common rows in both array_1 and array_2.
"""
nrows, ncols = array_1.shape
dtype={
'names': ['f{}'.format(i) for i in range(ncols)],
'formats': ncols * [array_1.dtype]
}
C = np.intersect1d(array_1.view(dtype), array_2.view(dtype))
# This last bit is optional if you're okay with "C" being a structured array...
C = C.view(array_1.dtype).reshape(-1, ncols)
return C
def non_maximum_suppression(image, image_gradients, gradient_directions):
"""Non-maximum suppression
To be honest, I'm very proud of this piece of code. No for-loops were needed.
Arguments:
image {[type]} -- the image to preform non-maximum suppresion on.
gradient_directions {[type]} -- the gradient directions
"""
print("Before non-maximum suppression:", image)
# Get where to check depending on the "direction"
direction_offset_x = np.round(np.cos(gradient_directions)).astype(int)
direction_offset_y = np.round(np.sin(gradient_directions)).astype(int)
direction_offset = np.dstack((direction_offset_x, direction_offset_y))
# the (i, j) indices of all points in the image.
row, col = np.indices(image.shape)
# in order not to cause any indexing errors, we create a
# padded version of the image with the edge values duplicated.
# a pixel at (row, col) in the image is located at (row+1, col+1) in the padded image.
image_ = np.pad(image, 1, mode="edge")
row_, col_ = row + 1, col + 1
# get the image pixels before and after each pixel in the image.
pixel_middle = image[row, col]
pixel_forward = image_[row_ + direction_offset_x, col_ + direction_offset_y]
pixel_backward = image_[row_ - direction_offset_x, col_ - direction
|
random_line_split
|
|
apitest.go
|
query map[string]string
queryCollection map[string][]string
headers map[string]string
cookies map[string]string
basicAuth string
apiTest *APITest
}
type pair struct {
l string
r string
}
var DumpHttp Observe = func(res *http.Response, req *http.Request) {
requestDump, err := httputil.DumpRequest(req, true)
if err == nil {
fmt.Println("--> http request dump\n\n" + string(requestDump))
}
responseDump, err := httputil.DumpResponse(res, true)
if err == nil {
fmt.Println("<-- http response dump:\n\n" + string(responseDump))
}
}
// Observe is a builder method for setting the observer
func (r *Request) Observe(observer Observe) *Request {
r.apiTest.observer = observer
return r
}
// Method is a builder method for setting the http method of the request
func (r *Request) Method(method string) *Request {
r.method = method
return r
}
// URL is a builder method for setting the url of the request
func (r *Request) URL(url string) *Request {
r.url = url
return r
}
// Get is a convenience method for setting the request as http.MethodGet
func (r *Request) Get(url string) *Request {
r.method = http.MethodGet
r.url = url
return r
}
// Post is a convenience method for setting the request as http.MethodPost
func (r *Request) Post(url string) *Request {
r.method = http.MethodPost
r.url = url
return r
}
// Put is a convenience method for setting the request as http.MethodPut
func (r *Request) Put(url string) *Request {
r.method = http.MethodPut
r.url = url
return r
}
// Delete is a convenience method for setting the request as http.MethodDelete
func (r *Request) Delete(url string) *Request {
r.method = http.MethodDelete
r.url = url
return r
}
// Patch is a convenience method for setting the request as http.MethodPatch
func (r *Request) Patch(url string) *Request {
r.method = http.MethodPatch
r.url = url
return r
}
// Body is a builder method to set the request body
func (r *Request) Body(b string) *Request {
r.body = b
return r
}
// Query is a builder method to set the request query parameters.
// This can be used in combination with request.QueryCollection
func (r *Request) Query(q map[string]string) *Request {
r.query = q
return r
}
// Query is a builder method to set the request query parameters
// This can be used in combination with request.Query
func (r *Request) QueryCollection(q map[string][]string) *Request {
r.queryCollection = q
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Headers(h map[string]string) *Request {
r.headers = h
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Cookies(c map[string]string) *Request {
r.cookies = c
return r
}
// BasicAuth is a builder method to sets basic auth on the request.
// The credentials should be provided delimited by a colon, e.g. "username:password"
func (r *Request) BasicAuth(auth string) *Request {
r.basicAuth = auth
return r
}
// Expect marks the request spec as complete and following code will define the expected response
func (r *Request) Expect(t *testing.T) *Response {
r.apiTest.t = t
return r.apiTest.response
}
// Response is the user defined expected response from the application under test
type Response struct {
status int
body string
headers map[string]string
cookies map[string]string
cookiesPresent []string
cookiesNotPresent []string
httpCookies []http.Cookie
jsonPathExpression string
jsonPathAssert func(interface{})
apiTest *APITest
assert Assert
}
// Assert is a user defined custom assertion function
type Assert func(*http.Response, *http.Request) error
// Body is the expected response body
func (r *Response) Body(b string) *Response {
r.body = b
return r
}
// Cookies is the expected response cookies
func (r *Response) Cookies(cookies map[string]string) *Response {
r.cookies = cookies
return r
}
// HttpCookies is the expected response cookies
func (r *Response) HttpCookies(cookies []http.Cookie) *Response {
r.httpCookies = cookies
return r
}
// CookiePresent is used to assert that a cookie is present in the response,
// regardless of its value
func (r *Response) CookiePresent(cookieName string) *Response {
r.cookiesPresent = append(r.cookiesPresent, cookieName)
return r
}
|
func (r *Response) CookieNotPresent(cookieName string) *Response {
r.cookiesNotPresent = append(r.cookiesNotPresent, cookieName)
return r
}
// Headers is the expected response headers
func (r *Response) Headers(headers map[string]string) *Response {
r.headers = headers
return r
}
// Status is the expected response http status code
func (r *Response) Status(s int) *Response {
r.status = s
return r
}
// Assert allows the consumer to provide a user defined function containing their own
// custom assertions
func (r *Response) Assert(fn func(*http.Response, *http.Request) error) *Response {
r.assert = fn
return r.apiTest.response
}
// JSONPath provides support for jsonpath expectations as defined by https://goessner.net/articles/JsonPath/
func (r *Response) JSONPath(expression string, assert func(interface{})) *Response {
r.jsonPathExpression = expression
r.jsonPathAssert = assert
return r.apiTest.response
}
// End runs the test and all defined assertions
func (r *Response) End() {
r.apiTest.run()
}
func (a *APITest) run() {
res, req := a.runTest()
if a.observer != nil {
a.observer(res.Result(), req)
}
a.assertResponse(res)
a.assertHeaders(res)
a.assertCookies(res)
a.assertJSONPath(res)
if a.response.assert != nil {
err := a.response.assert(res.Result(), req)
if err != nil {
a.t.Fatal(err.Error())
}
}
}
func (a *APITest) runTest() (*httptest.ResponseRecorder, *http.Request) {
req := a.buildRequestFromTestCase()
res := httptest.NewRecorder()
a.handler.ServeHTTP(res, req)
return res, req
}
func (a *APITest) buildRequestFromTestCase() *http.Request {
req, _ := http.NewRequest(a.request.method, a.request.url, bytes.NewBufferString(a.request.body))
query := req.URL.Query()
if a.request.queryCollection != nil {
for _, param := range buildQueryCollection(a.request.queryCollection) {
query.Add(param.l, param.r)
}
}
if a.request.query != nil {
for k, v := range a.request.query {
query.Add(k, v)
}
}
if len(query) > 0 {
req.URL.RawQuery = query.Encode()
}
for k, v := range a.request.headers {
req.Header.Set(k, v)
}
for k, v := range a.request.cookies {
cookie := &http.Cookie{Name: k, Value: v}
req.AddCookie(cookie)
}
if a.request.basicAuth != "" {
parts := strings.Split(a.request.basicAuth, ":")
req.SetBasicAuth(parts[0], parts[1])
}
return req
}
func buildQueryCollection(params map[string][]string) []pair {
if len(params) == 0 {
return []pair{}
}
var pairs []pair
for k, v := range params {
for _, paramValue := range v {
pairs = append(pairs, pair{l: k, r: paramValue})
}
}
return pairs
}
func (a *APITest) assertResponse(res *httptest.ResponseRecorder) {
if a.response.status != 0 {
assert.Equal(a.t, a.response.status, res.Code)
}
if a.response.body != "" {
if isJSON(a.response.body) {
assert.JSONEq(a.t, a.response.body, res.Body.String())
} else {
assert.Equal(a.t, a.response.body, res.Body.String())
}
}
}
func (a *APITest) assertCookies(response *httptest.ResponseRecorder) {
if a.response.cookies != nil {
for name, value := range a.response.cookies {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == name && cookie.Value == value {
foundCookie = true
}
}
assert.Equal(a.t, true, foundCookie, "Cookie not found - "+name)
}
}
if len(a.response.cookiesPresent) > 0 {
for _, cookieName := range a.response.cookiesPresent {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == cookieName {
foundCookie = true
}
}
assert.True(a.t, foundCookie, "Cookie not found - "+cookieName)
}
}
if len(a.response.cookiesNotPresent) > 0
|
// CookieNotPresent is used to assert that a cookie is not present in the response
|
random_line_split
|
apitest.go
|
map[string]string
queryCollection map[string][]string
headers map[string]string
cookies map[string]string
basicAuth string
apiTest *APITest
}
type pair struct {
l string
r string
}
var DumpHttp Observe = func(res *http.Response, req *http.Request) {
requestDump, err := httputil.DumpRequest(req, true)
if err == nil {
fmt.Println("--> http request dump\n\n" + string(requestDump))
}
responseDump, err := httputil.DumpResponse(res, true)
if err == nil {
fmt.Println("<-- http response dump:\n\n" + string(responseDump))
}
}
// Observe is a builder method for setting the observer
func (r *Request) Observe(observer Observe) *Request {
r.apiTest.observer = observer
return r
}
// Method is a builder method for setting the http method of the request
func (r *Request) Method(method string) *Request {
r.method = method
return r
}
// URL is a builder method for setting the url of the request
func (r *Request) URL(url string) *Request {
r.url = url
return r
}
// Get is a convenience method for setting the request as http.MethodGet
func (r *Request) Get(url string) *Request {
r.method = http.MethodGet
r.url = url
return r
}
// Post is a convenience method for setting the request as http.MethodPost
func (r *Request)
|
(url string) *Request {
r.method = http.MethodPost
r.url = url
return r
}
// Put is a convenience method for setting the request as http.MethodPut
func (r *Request) Put(url string) *Request {
r.method = http.MethodPut
r.url = url
return r
}
// Delete is a convenience method for setting the request as http.MethodDelete
func (r *Request) Delete(url string) *Request {
r.method = http.MethodDelete
r.url = url
return r
}
// Patch is a convenience method for setting the request as http.MethodPatch
func (r *Request) Patch(url string) *Request {
r.method = http.MethodPatch
r.url = url
return r
}
// Body is a builder method to set the request body
func (r *Request) Body(b string) *Request {
r.body = b
return r
}
// Query is a builder method to set the request query parameters.
// This can be used in combination with request.QueryCollection
func (r *Request) Query(q map[string]string) *Request {
r.query = q
return r
}
// Query is a builder method to set the request query parameters
// This can be used in combination with request.Query
func (r *Request) QueryCollection(q map[string][]string) *Request {
r.queryCollection = q
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Headers(h map[string]string) *Request {
r.headers = h
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Cookies(c map[string]string) *Request {
r.cookies = c
return r
}
// BasicAuth is a builder method to sets basic auth on the request.
// The credentials should be provided delimited by a colon, e.g. "username:password"
func (r *Request) BasicAuth(auth string) *Request {
r.basicAuth = auth
return r
}
// Expect marks the request spec as complete and following code will define the expected response
func (r *Request) Expect(t *testing.T) *Response {
r.apiTest.t = t
return r.apiTest.response
}
// Response is the user defined expected response from the application under test
type Response struct {
status int
body string
headers map[string]string
cookies map[string]string
cookiesPresent []string
cookiesNotPresent []string
httpCookies []http.Cookie
jsonPathExpression string
jsonPathAssert func(interface{})
apiTest *APITest
assert Assert
}
// Assert is a user defined custom assertion function
type Assert func(*http.Response, *http.Request) error
// Body is the expected response body
func (r *Response) Body(b string) *Response {
r.body = b
return r
}
// Cookies is the expected response cookies
func (r *Response) Cookies(cookies map[string]string) *Response {
r.cookies = cookies
return r
}
// HttpCookies is the expected response cookies
func (r *Response) HttpCookies(cookies []http.Cookie) *Response {
r.httpCookies = cookies
return r
}
// CookiePresent is used to assert that a cookie is present in the response,
// regardless of its value
func (r *Response) CookiePresent(cookieName string) *Response {
r.cookiesPresent = append(r.cookiesPresent, cookieName)
return r
}
// CookieNotPresent is used to assert that a cookie is not present in the response
func (r *Response) CookieNotPresent(cookieName string) *Response {
r.cookiesNotPresent = append(r.cookiesNotPresent, cookieName)
return r
}
// Headers is the expected response headers
func (r *Response) Headers(headers map[string]string) *Response {
r.headers = headers
return r
}
// Status is the expected response http status code
func (r *Response) Status(s int) *Response {
r.status = s
return r
}
// Assert allows the consumer to provide a user defined function containing their own
// custom assertions
func (r *Response) Assert(fn func(*http.Response, *http.Request) error) *Response {
r.assert = fn
return r.apiTest.response
}
// JSONPath provides support for jsonpath expectations as defined by https://goessner.net/articles/JsonPath/
func (r *Response) JSONPath(expression string, assert func(interface{})) *Response {
r.jsonPathExpression = expression
r.jsonPathAssert = assert
return r.apiTest.response
}
// End runs the test and all defined assertions
func (r *Response) End() {
r.apiTest.run()
}
func (a *APITest) run() {
res, req := a.runTest()
if a.observer != nil {
a.observer(res.Result(), req)
}
a.assertResponse(res)
a.assertHeaders(res)
a.assertCookies(res)
a.assertJSONPath(res)
if a.response.assert != nil {
err := a.response.assert(res.Result(), req)
if err != nil {
a.t.Fatal(err.Error())
}
}
}
func (a *APITest) runTest() (*httptest.ResponseRecorder, *http.Request) {
req := a.buildRequestFromTestCase()
res := httptest.NewRecorder()
a.handler.ServeHTTP(res, req)
return res, req
}
func (a *APITest) buildRequestFromTestCase() *http.Request {
req, _ := http.NewRequest(a.request.method, a.request.url, bytes.NewBufferString(a.request.body))
query := req.URL.Query()
if a.request.queryCollection != nil {
for _, param := range buildQueryCollection(a.request.queryCollection) {
query.Add(param.l, param.r)
}
}
if a.request.query != nil {
for k, v := range a.request.query {
query.Add(k, v)
}
}
if len(query) > 0 {
req.URL.RawQuery = query.Encode()
}
for k, v := range a.request.headers {
req.Header.Set(k, v)
}
for k, v := range a.request.cookies {
cookie := &http.Cookie{Name: k, Value: v}
req.AddCookie(cookie)
}
if a.request.basicAuth != "" {
parts := strings.Split(a.request.basicAuth, ":")
req.SetBasicAuth(parts[0], parts[1])
}
return req
}
func buildQueryCollection(params map[string][]string) []pair {
if len(params) == 0 {
return []pair{}
}
var pairs []pair
for k, v := range params {
for _, paramValue := range v {
pairs = append(pairs, pair{l: k, r: paramValue})
}
}
return pairs
}
func (a *APITest) assertResponse(res *httptest.ResponseRecorder) {
if a.response.status != 0 {
assert.Equal(a.t, a.response.status, res.Code)
}
if a.response.body != "" {
if isJSON(a.response.body) {
assert.JSONEq(a.t, a.response.body, res.Body.String())
} else {
assert.Equal(a.t, a.response.body, res.Body.String())
}
}
}
func (a *APITest) assertCookies(response *httptest.ResponseRecorder) {
if a.response.cookies != nil {
for name, value := range a.response.cookies {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == name && cookie.Value == value {
foundCookie = true
}
}
assert.Equal(a.t, true, foundCookie, "Cookie not found - "+name)
}
}
if len(a.response.cookiesPresent) > 0 {
for _, cookieName := range a.response.cookiesPresent {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == cookieName {
foundCookie = true
}
}
assert.True(a.t, foundCookie, "Cookie not found - "+cookieName)
}
}
if len(a.response.cookiesNotPresent) >
|
Post
|
identifier_name
|
apitest.go
|
used in combination with request.QueryCollection
func (r *Request) Query(q map[string]string) *Request {
r.query = q
return r
}
// Query is a builder method to set the request query parameters
// This can be used in combination with request.Query
func (r *Request) QueryCollection(q map[string][]string) *Request {
r.queryCollection = q
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Headers(h map[string]string) *Request {
r.headers = h
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Cookies(c map[string]string) *Request {
r.cookies = c
return r
}
// BasicAuth is a builder method to sets basic auth on the request.
// The credentials should be provided delimited by a colon, e.g. "username:password"
func (r *Request) BasicAuth(auth string) *Request {
r.basicAuth = auth
return r
}
// Expect marks the request spec as complete and following code will define the expected response
func (r *Request) Expect(t *testing.T) *Response {
r.apiTest.t = t
return r.apiTest.response
}
// Response is the user defined expected response from the application under test
type Response struct {
status int
body string
headers map[string]string
cookies map[string]string
cookiesPresent []string
cookiesNotPresent []string
httpCookies []http.Cookie
jsonPathExpression string
jsonPathAssert func(interface{})
apiTest *APITest
assert Assert
}
// Assert is a user defined custom assertion function
type Assert func(*http.Response, *http.Request) error
// Body is the expected response body
func (r *Response) Body(b string) *Response {
r.body = b
return r
}
// Cookies is the expected response cookies
func (r *Response) Cookies(cookies map[string]string) *Response {
r.cookies = cookies
return r
}
// HttpCookies is the expected response cookies
func (r *Response) HttpCookies(cookies []http.Cookie) *Response {
r.httpCookies = cookies
return r
}
// CookiePresent is used to assert that a cookie is present in the response,
// regardless of its value
func (r *Response) CookiePresent(cookieName string) *Response {
r.cookiesPresent = append(r.cookiesPresent, cookieName)
return r
}
// CookieNotPresent is used to assert that a cookie is not present in the response
func (r *Response) CookieNotPresent(cookieName string) *Response {
r.cookiesNotPresent = append(r.cookiesNotPresent, cookieName)
return r
}
// Headers is the expected response headers
func (r *Response) Headers(headers map[string]string) *Response {
r.headers = headers
return r
}
// Status is the expected response http status code
func (r *Response) Status(s int) *Response {
r.status = s
return r
}
// Assert allows the consumer to provide a user defined function containing their own
// custom assertions
func (r *Response) Assert(fn func(*http.Response, *http.Request) error) *Response {
r.assert = fn
return r.apiTest.response
}
// JSONPath provides support for jsonpath expectations as defined by https://goessner.net/articles/JsonPath/
func (r *Response) JSONPath(expression string, assert func(interface{})) *Response {
r.jsonPathExpression = expression
r.jsonPathAssert = assert
return r.apiTest.response
}
// End runs the test and all defined assertions
func (r *Response) End() {
r.apiTest.run()
}
func (a *APITest) run() {
res, req := a.runTest()
if a.observer != nil {
a.observer(res.Result(), req)
}
a.assertResponse(res)
a.assertHeaders(res)
a.assertCookies(res)
a.assertJSONPath(res)
if a.response.assert != nil {
err := a.response.assert(res.Result(), req)
if err != nil {
a.t.Fatal(err.Error())
}
}
}
func (a *APITest) runTest() (*httptest.ResponseRecorder, *http.Request) {
req := a.buildRequestFromTestCase()
res := httptest.NewRecorder()
a.handler.ServeHTTP(res, req)
return res, req
}
func (a *APITest) buildRequestFromTestCase() *http.Request {
req, _ := http.NewRequest(a.request.method, a.request.url, bytes.NewBufferString(a.request.body))
query := req.URL.Query()
if a.request.queryCollection != nil {
for _, param := range buildQueryCollection(a.request.queryCollection) {
query.Add(param.l, param.r)
}
}
if a.request.query != nil {
for k, v := range a.request.query {
query.Add(k, v)
}
}
if len(query) > 0 {
req.URL.RawQuery = query.Encode()
}
for k, v := range a.request.headers {
req.Header.Set(k, v)
}
for k, v := range a.request.cookies {
cookie := &http.Cookie{Name: k, Value: v}
req.AddCookie(cookie)
}
if a.request.basicAuth != "" {
parts := strings.Split(a.request.basicAuth, ":")
req.SetBasicAuth(parts[0], parts[1])
}
return req
}
func buildQueryCollection(params map[string][]string) []pair {
if len(params) == 0 {
return []pair{}
}
var pairs []pair
for k, v := range params {
for _, paramValue := range v {
pairs = append(pairs, pair{l: k, r: paramValue})
}
}
return pairs
}
func (a *APITest) assertResponse(res *httptest.ResponseRecorder) {
if a.response.status != 0 {
assert.Equal(a.t, a.response.status, res.Code)
}
if a.response.body != "" {
if isJSON(a.response.body) {
assert.JSONEq(a.t, a.response.body, res.Body.String())
} else {
assert.Equal(a.t, a.response.body, res.Body.String())
}
}
}
func (a *APITest) assertCookies(response *httptest.ResponseRecorder) {
if a.response.cookies != nil {
for name, value := range a.response.cookies {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == name && cookie.Value == value {
foundCookie = true
}
}
assert.Equal(a.t, true, foundCookie, "Cookie not found - "+name)
}
}
if len(a.response.cookiesPresent) > 0 {
for _, cookieName := range a.response.cookiesPresent {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == cookieName {
foundCookie = true
}
}
assert.True(a.t, foundCookie, "Cookie not found - "+cookieName)
}
}
if len(a.response.cookiesNotPresent) > 0 {
for _, cookieName := range a.response.cookiesNotPresent {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == cookieName {
foundCookie = true
}
}
assert.False(a.t, foundCookie, "Cookie found - "+cookieName)
}
}
if len(a.response.httpCookies) > 0 {
for _, httpCookie := range a.response.httpCookies {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if compareHttpCookies(cookie, &httpCookie) {
foundCookie = true
}
}
assert.True(a.t, foundCookie, "Cookie not found - "+httpCookie.Name)
}
}
}
// only compare a subset of fields for flexibility
func compareHttpCookies(l *http.Cookie, r *http.Cookie) bool {
return l.Name == r.Name &&
l.Value == r.Value &&
l.Domain == r.Domain &&
l.Expires == r.Expires &&
l.MaxAge == r.MaxAge &&
l.Secure == r.Secure &&
l.HttpOnly == r.HttpOnly &&
l.SameSite == r.SameSite
}
func getResponseCookies(response *httptest.ResponseRecorder) []*http.Cookie {
for _, rawCookieString := range response.Result().Header["Set-Cookie"] {
rawRequest := fmt.Sprintf("GET / HTTP/1.0\r\nCookie: %s\r\n\r\n", rawCookieString)
req, err := http.ReadRequest(bufio.NewReader(strings.NewReader(rawRequest)))
if err != nil {
panic("failed to parse response cookies. error: " + err.Error())
}
return req.Cookies()
}
return []*http.Cookie{}
}
func (a *APITest) assertHeaders(res *httptest.ResponseRecorder) {
if a.response.headers != nil {
for k, v := range a.response.headers {
header := res.Header().Get(k)
assert.Equal(a.t, v, header, fmt.Sprintf("'%s' header should be equal", k))
}
}
}
func (a *APITest) assertJSONPath(res *httptest.ResponseRecorder) {
if a.response.jsonPathExpression != "" {
v := interface{}(nil)
err := json.Unmarshal(res.Body.Bytes(), &v)
value, err := jsonpath.Get(a.response.jsonPathExpression, v)
if err != nil
|
{
assert.Nil(a.t, err)
}
|
conditional_block
|
|
apitest.go
|
map[string]string
queryCollection map[string][]string
headers map[string]string
cookies map[string]string
basicAuth string
apiTest *APITest
}
type pair struct {
l string
r string
}
var DumpHttp Observe = func(res *http.Response, req *http.Request) {
requestDump, err := httputil.DumpRequest(req, true)
if err == nil {
fmt.Println("--> http request dump\n\n" + string(requestDump))
}
responseDump, err := httputil.DumpResponse(res, true)
if err == nil {
fmt.Println("<-- http response dump:\n\n" + string(responseDump))
}
}
// Observe is a builder method for setting the observer
func (r *Request) Observe(observer Observe) *Request {
r.apiTest.observer = observer
return r
}
// Method is a builder method for setting the http method of the request
func (r *Request) Method(method string) *Request {
r.method = method
return r
}
// URL is a builder method for setting the url of the request
func (r *Request) URL(url string) *Request {
r.url = url
return r
}
// Get is a convenience method for setting the request as http.MethodGet
func (r *Request) Get(url string) *Request {
r.method = http.MethodGet
r.url = url
return r
}
// Post is a convenience method for setting the request as http.MethodPost
func (r *Request) Post(url string) *Request {
r.method = http.MethodPost
r.url = url
return r
}
// Put is a convenience method for setting the request as http.MethodPut
func (r *Request) Put(url string) *Request {
r.method = http.MethodPut
r.url = url
return r
}
// Delete is a convenience method for setting the request as http.MethodDelete
func (r *Request) Delete(url string) *Request {
r.method = http.MethodDelete
r.url = url
return r
}
// Patch is a convenience method for setting the request as http.MethodPatch
func (r *Request) Patch(url string) *Request {
r.method = http.MethodPatch
r.url = url
return r
}
// Body is a builder method to set the request body
func (r *Request) Body(b string) *Request {
r.body = b
return r
}
// Query is a builder method to set the request query parameters.
// This can be used in combination with request.QueryCollection
func (r *Request) Query(q map[string]string) *Request {
r.query = q
return r
}
// Query is a builder method to set the request query parameters
// This can be used in combination with request.Query
func (r *Request) QueryCollection(q map[string][]string) *Request {
r.queryCollection = q
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Headers(h map[string]string) *Request {
r.headers = h
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Cookies(c map[string]string) *Request {
r.cookies = c
return r
}
// BasicAuth is a builder method to sets basic auth on the request.
// The credentials should be provided delimited by a colon, e.g. "username:password"
func (r *Request) BasicAuth(auth string) *Request {
r.basicAuth = auth
return r
}
// Expect marks the request spec as complete and following code will define the expected response
func (r *Request) Expect(t *testing.T) *Response {
r.apiTest.t = t
return r.apiTest.response
}
// Response is the user defined expected response from the application under test
type Response struct {
status int
body string
headers map[string]string
cookies map[string]string
cookiesPresent []string
cookiesNotPresent []string
httpCookies []http.Cookie
jsonPathExpression string
jsonPathAssert func(interface{})
apiTest *APITest
assert Assert
}
// Assert is a user defined custom assertion function
type Assert func(*http.Response, *http.Request) error
// Body is the expected response body
func (r *Response) Body(b string) *Response {
r.body = b
return r
}
// Cookies is the expected response cookies
func (r *Response) Cookies(cookies map[string]string) *Response {
r.cookies = cookies
return r
}
// HttpCookies is the expected response cookies
func (r *Response) HttpCookies(cookies []http.Cookie) *Response {
r.httpCookies = cookies
return r
}
// CookiePresent is used to assert that a cookie is present in the response,
// regardless of its value
func (r *Response) CookiePresent(cookieName string) *Response {
r.cookiesPresent = append(r.cookiesPresent, cookieName)
return r
}
// CookieNotPresent is used to assert that a cookie is not present in the response
func (r *Response) CookieNotPresent(cookieName string) *Response {
r.cookiesNotPresent = append(r.cookiesNotPresent, cookieName)
return r
}
// Headers is the expected response headers
func (r *Response) Headers(headers map[string]string) *Response {
r.headers = headers
return r
}
// Status is the expected response http status code
func (r *Response) Status(s int) *Response {
r.status = s
return r
}
// Assert allows the consumer to provide a user defined function containing their own
// custom assertions
func (r *Response) Assert(fn func(*http.Response, *http.Request) error) *Response {
r.assert = fn
return r.apiTest.response
}
// JSONPath provides support for jsonpath expectations as defined by https://goessner.net/articles/JsonPath/
func (r *Response) JSONPath(expression string, assert func(interface{})) *Response {
r.jsonPathExpression = expression
r.jsonPathAssert = assert
return r.apiTest.response
}
// End runs the test and all defined assertions
func (r *Response) End() {
r.apiTest.run()
}
func (a *APITest) run() {
res, req := a.runTest()
if a.observer != nil {
a.observer(res.Result(), req)
}
a.assertResponse(res)
a.assertHeaders(res)
a.assertCookies(res)
a.assertJSONPath(res)
if a.response.assert != nil {
err := a.response.assert(res.Result(), req)
if err != nil {
a.t.Fatal(err.Error())
}
}
}
func (a *APITest) runTest() (*httptest.ResponseRecorder, *http.Request) {
req := a.buildRequestFromTestCase()
res := httptest.NewRecorder()
a.handler.ServeHTTP(res, req)
return res, req
}
func (a *APITest) buildRequestFromTestCase() *http.Request
|
for k, v := range a.request.headers {
req.Header.Set(k, v)
}
for k, v := range a.request.cookies {
cookie := &http.Cookie{Name: k, Value: v}
req.AddCookie(cookie)
}
if a.request.basicAuth != "" {
parts := strings.Split(a.request.basicAuth, ":")
req.SetBasicAuth(parts[0], parts[1])
}
return req
}
func buildQueryCollection(params map[string][]string) []pair {
if len(params) == 0 {
return []pair{}
}
var pairs []pair
for k, v := range params {
for _, paramValue := range v {
pairs = append(pairs, pair{l: k, r: paramValue})
}
}
return pairs
}
func (a *APITest) assertResponse(res *httptest.ResponseRecorder) {
if a.response.status != 0 {
assert.Equal(a.t, a.response.status, res.Code)
}
if a.response.body != "" {
if isJSON(a.response.body) {
assert.JSONEq(a.t, a.response.body, res.Body.String())
} else {
assert.Equal(a.t, a.response.body, res.Body.String())
}
}
}
func (a *APITest) assertCookies(response *httptest.ResponseRecorder) {
if a.response.cookies != nil {
for name, value := range a.response.cookies {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == name && cookie.Value == value {
foundCookie = true
}
}
assert.Equal(a.t, true, foundCookie, "Cookie not found - "+name)
}
}
if len(a.response.cookiesPresent) > 0 {
for _, cookieName := range a.response.cookiesPresent {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == cookieName {
foundCookie = true
}
}
assert.True(a.t, foundCookie, "Cookie not found - "+cookieName)
}
}
if len(a.response.cookiesNotPresent) >
|
{
req, _ := http.NewRequest(a.request.method, a.request.url, bytes.NewBufferString(a.request.body))
query := req.URL.Query()
if a.request.queryCollection != nil {
for _, param := range buildQueryCollection(a.request.queryCollection) {
query.Add(param.l, param.r)
}
}
if a.request.query != nil {
for k, v := range a.request.query {
query.Add(k, v)
}
}
if len(query) > 0 {
req.URL.RawQuery = query.Encode()
}
|
identifier_body
|
raft.go
|
}
}
r.state = candidate
r.CurrentTerm++
r.VotedFor = r.id
}
// StoreClientData allows a client to send data to the raft cluster via RPC for storage
// We fill the reply struct with "success = true" if we are leader and store the data successfully.
// If we are not leader, we will reply with the id of another node, and the client
// must detect this and retry at that node.
// If we do not know or do not yet have a leader, we will reply with leader = -1 and
// client may choose to retry at us or another random node.
// TODO - need a version of StoreClientData that ensures some form of commitment after leader responds to a message?
func (r *RaftNode) StoreClientData(cd ClientDataStruct, response *ClientResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("############ StoreClientData()")
}
// NOTE - if we do not yet know leader, client will see response.leader = -1.
// They should wait before recontact, and may recontact us or another random node
defer r.persistState()
defer r.executeLog()
response.Leader = r.currentLeader
response.Success = false // by default, assume we will fail
if r.state != leader {
return nil
}
// Try to short-circuit based on the client serial num
if haveNewer, prevReply := r.haveNewerSerialNum(cd.ClientID, cd.ClientSerialNum); haveNewer {
response.Success = prevReply.Success
// response.leader = prevReply.leader
// NOTE - we do not want to notify about the previous leader, because if it is not us, the client will
// just get confused and contact the wrong node next time
// this situation only arises if the client's previous attempt was partially successful, but leader crashed before replying
return nil
}
// We are the leader and this is a new entry. Attempt to replicate this to all peer logs
response.Success = true
entry := LogEntry{
Term: r.CurrentTerm,
ClientData: cd.Data,
ClientID: cd.ClientID,
ClientSerialNum: cd.ClientSerialNum,
ClientResponse: ClientResponse{
Success: response.Success,
Leader: r.id}}
r.append(entry)
go r.heartbeatAppendEntriesRPC()
return nil
}
// After sending updates to other nodes, we try to advance our commitIndex
// At the end, we try to execute log
func (r *RaftNode) updateCommitIndex() {
// If there exists an N such that:
// 1) N > commitIndex,
// 2) a majority of matchIndex[i] >= N, and
// 3) log[N].term == currentTerm
// Then:
// set commitIndex = N
for n := r.commitIndex + 1; n <= r.getLastLogIndex(); n++ {
if r.Log[n].Term != r.CurrentTerm {
if r.verbose {
log.Printf("commitIndex %d ineligible because of log entry %s", n, r.Log[n].String())
}
continue
}
peersAtThisLevel := make(map[HostID]bool)
for hostID := range r.hosts {
if hostID == r.id {
peersAtThisLevel[hostID] = true
} else {
peersAtThisLevel[hostID] = r.matchIndex[hostID] >= n
}
}
if haveMajority(peersAtThisLevel, "COMMIT IDX", r.verbose) {
r.commitIndex = n
}
}
}
// Based on our commit index, apply any log entries that are ready for commit
// This function should be idempotent and safe to apply often.
func (r *RaftNode) executeLog() {
for r.commitIndex > r.lastApplied {
r.lastApplied++
r.StateMachine.apply(r.Log[r.lastApplied])
}
}
// AppendEntries is called by RPC from the leader to modify the log of a follower.
// TODO - some amount of duplicated logic in AppendEntries() and Vote()
// Returns false if entries were rejected, or true if accepted
func (r *RaftNode)
|
(ae AppendEntriesStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Printf("AppendEntries(). ae: %s", ae.String())
log.Printf("My log: %s", r.Log.String())
}
response.Term = r.CurrentTerm
if ae.LeaderID == r.currentLeader {
if r.verbose {
log.Println("AppendEntries from leader - reset tickers")
}
r.resetTickers()
}
// Reply false if term < currentTerm
if ae.Term < r.CurrentTerm {
if r.verbose {
log.Println("AE from stale term")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// NOTE - shifting to follower each time might sound misleading, but keeps things uniform
r.shiftToFollower(ae.Term, ae.LeaderID)
// Reply false if log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm
if int(ae.PrevLogIndex) >= len(r.Log) || // index out-of-bounds
r.Log[ae.PrevLogIndex].Term != ae.PrevLogTerm {
if r.verbose {
log.Println("my PrevLogTerm does not match theirs")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// If an existing entry conflicts with a new one (same index, but different terms),
// delete the existing entry and all that follow it
if r.verbose {
log.Println("Applying entries...")
}
offset := int(ae.PrevLogIndex) + 1
for i, entry := range ae.Entries {
if i+offset >= len(r.Log) { // We certainly have no conflict
if r.verbose {
log.Printf("Apply without conflict: index=%d", i+offset)
}
r.append(entry)
} else {
if r.Log[i+offset].Term != ae.Entries[i].Term { // We have conflicting entry
if r.verbose {
log.Printf("Conflict - delete suffix! (we have term=%d, they have term=%d). Delete our log from index=%d onwards.", r.Log[i+offset].Term, ae.Entries[i].Term, i+offset)
}
r.Log = r.Log[:i+offset] // delete the existing entry and all that follow it
r.append(entry) // append the current entry
log.Printf("\n\nLog: %s\n\n", stringOneLog(r.Log))
} else if r.Log[i+offset] != entry {
log.Printf("\nOURS: %s\n\nTHEIRS: %s", r.Log[i+offset].String(), entry.String())
panic("log safety violation occurred somewhere")
}
}
}
response.Success = true
lastIndex := r.getLastLogIndex()
// Now we need to decide how to set our local commit index
if ae.LeaderCommit > r.commitIndex {
r.commitIndex = min(lastIndex, ae.LeaderCommit)
}
r.executeLog()
r.persistState()
return nil
}
// CandidateLooksEligible allows a raft node to decide whether another host's log is sufficiently up-to-date to become leader
// Returns true if the incoming RequestVote shows that the peer is at least as up-to-date as we are
// See paper section 5.4
func (r *RaftNode) CandidateLooksEligible(candLastLogIdx LogIndex, candLastLogTerm Term) bool {
ourLastLogTerm := r.getLastLogTerm()
ourLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("We have: lastLogTerm=%d, lastLogIdx=%d. They have: lastLogTerm=%d, lastLogIdx=%d", ourLastLogTerm, ourLastLogIdx, candLastLogTerm, candLastLogIdx)
}
if ourLastLogTerm == candLastLogTerm {
return candLastLogIdx >= ourLastLogIdx
}
return candLastLogTerm >= ourLastLogTerm
}
// Vote is called by RPC from a candidate. We can observe the following from the raft.github.io simulation:
// 1) If we get a requestVoteRPC from a future term, we immediately jump to that term and send our vote
// 2) If we are already collecting votes for the next election, and simultaneously get a request from another node to vote for them, we do NOT give them our vote
// (we've already voted for ourselves!)
// 3) if we've been offline, and wakeup and try to get votes: we get rejections, that also tell us the new term, and we immediately jump to that term as a follower
func (r *RaftNode) Vote(rv RequestVoteStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("Vote()")
}
defer r.persistState()
response.Term = r.CurrentTerm
myLastLogTerm := r.getLastLogTerm()
myLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("RequestVoteStruct: %s. \nMy node: term: %d, votedFor %d, last
|
AppendEntries
|
identifier_name
|
raft.go
|
false
}
}
r.state = candidate
r.CurrentTerm++
r.VotedFor = r.id
}
// StoreClientData allows a client to send data to the raft cluster via RPC for storage
// We fill the reply struct with "success = true" if we are leader and store the data successfully.
// If we are not leader, we will reply with the id of another node, and the client
// must detect this and retry at that node.
// If we do not know or do not yet have a leader, we will reply with leader = -1 and
// client may choose to retry at us or another random node.
// TODO - need a version of StoreClientData that ensures some form of commitment after leader responds to a message?
func (r *RaftNode) StoreClientData(cd ClientDataStruct, response *ClientResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("############ StoreClientData()")
}
// NOTE - if we do not yet know leader, client will see response.leader = -1.
// They should wait before recontact, and may recontact us or another random node
defer r.persistState()
defer r.executeLog()
response.Leader = r.currentLeader
response.Success = false // by default, assume we will fail
if r.state != leader {
return nil
}
// Try to short-circuit based on the client serial num
if haveNewer, prevReply := r.haveNewerSerialNum(cd.ClientID, cd.ClientSerialNum); haveNewer {
response.Success = prevReply.Success
// response.leader = prevReply.leader
// NOTE - we do not want to notify about the previous leader, because if it is not us, the client will
// just get confused and contact the wrong node next time
// this situation only arises if the client's previous attempt was partially successful, but leader crashed before replying
return nil
}
// We are the leader and this is a new entry. Attempt to replicate this to all peer logs
response.Success = true
entry := LogEntry{
Term: r.CurrentTerm,
ClientData: cd.Data,
ClientID: cd.ClientID,
ClientSerialNum: cd.ClientSerialNum,
ClientResponse: ClientResponse{
Success: response.Success,
Leader: r.id}}
r.append(entry)
go r.heartbeatAppendEntriesRPC()
return nil
}
// After sending updates to other nodes, we try to advance our commitIndex
// At the end, we try to execute log
func (r *RaftNode) updateCommitIndex() {
// If there exists an N such that:
// 1) N > commitIndex,
// 2) a majority of matchIndex[i] >= N, and
// 3) log[N].term == currentTerm
|
log.Printf("commitIndex %d ineligible because of log entry %s", n, r.Log[n].String())
}
continue
}
peersAtThisLevel := make(map[HostID]bool)
for hostID := range r.hosts {
if hostID == r.id {
peersAtThisLevel[hostID] = true
} else {
peersAtThisLevel[hostID] = r.matchIndex[hostID] >= n
}
}
if haveMajority(peersAtThisLevel, "COMMIT IDX", r.verbose) {
r.commitIndex = n
}
}
}
// Based on our commit index, apply any log entries that are ready for commit
// This function should be idempotent and safe to apply often.
func (r *RaftNode) executeLog() {
for r.commitIndex > r.lastApplied {
r.lastApplied++
r.StateMachine.apply(r.Log[r.lastApplied])
}
}
// AppendEntries is called by RPC from the leader to modify the log of a follower.
// TODO - some amount of duplicated logic in AppendEntries() and Vote()
// Returns false if entries were rejected, or true if accepted
func (r *RaftNode) AppendEntries(ae AppendEntriesStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Printf("AppendEntries(). ae: %s", ae.String())
log.Printf("My log: %s", r.Log.String())
}
response.Term = r.CurrentTerm
if ae.LeaderID == r.currentLeader {
if r.verbose {
log.Println("AppendEntries from leader - reset tickers")
}
r.resetTickers()
}
// Reply false if term < currentTerm
if ae.Term < r.CurrentTerm {
if r.verbose {
log.Println("AE from stale term")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// NOTE - shifting to follower each time might sound misleading, but keeps things uniform
r.shiftToFollower(ae.Term, ae.LeaderID)
// Reply false if log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm
if int(ae.PrevLogIndex) >= len(r.Log) || // index out-of-bounds
r.Log[ae.PrevLogIndex].Term != ae.PrevLogTerm {
if r.verbose {
log.Println("my PrevLogTerm does not match theirs")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// If an existing entry conflicts with a new one (same index, but different terms),
// delete the existing entry and all that follow it
if r.verbose {
log.Println("Applying entries...")
}
offset := int(ae.PrevLogIndex) + 1
for i, entry := range ae.Entries {
if i+offset >= len(r.Log) { // We certainly have no conflict
if r.verbose {
log.Printf("Apply without conflict: index=%d", i+offset)
}
r.append(entry)
} else {
if r.Log[i+offset].Term != ae.Entries[i].Term { // We have conflicting entry
if r.verbose {
log.Printf("Conflict - delete suffix! (we have term=%d, they have term=%d). Delete our log from index=%d onwards.", r.Log[i+offset].Term, ae.Entries[i].Term, i+offset)
}
r.Log = r.Log[:i+offset] // delete the existing entry and all that follow it
r.append(entry) // append the current entry
log.Printf("\n\nLog: %s\n\n", stringOneLog(r.Log))
} else if r.Log[i+offset] != entry {
log.Printf("\nOURS: %s\n\nTHEIRS: %s", r.Log[i+offset].String(), entry.String())
panic("log safety violation occurred somewhere")
}
}
}
response.Success = true
lastIndex := r.getLastLogIndex()
// Now we need to decide how to set our local commit index
if ae.LeaderCommit > r.commitIndex {
r.commitIndex = min(lastIndex, ae.LeaderCommit)
}
r.executeLog()
r.persistState()
return nil
}
// CandidateLooksEligible allows a raft node to decide whether another host's log is sufficiently up-to-date to become leader
// Returns true if the incoming RequestVote shows that the peer is at least as up-to-date as we are
// See paper section 5.4
func (r *RaftNode) CandidateLooksEligible(candLastLogIdx LogIndex, candLastLogTerm Term) bool {
ourLastLogTerm := r.getLastLogTerm()
ourLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("We have: lastLogTerm=%d, lastLogIdx=%d. They have: lastLogTerm=%d, lastLogIdx=%d", ourLastLogTerm, ourLastLogIdx, candLastLogTerm, candLastLogIdx)
}
if ourLastLogTerm == candLastLogTerm {
return candLastLogIdx >= ourLastLogIdx
}
return candLastLogTerm >= ourLastLogTerm
}
// Vote is called by RPC from a candidate. We can observe the following from the raft.github.io simulation:
// 1) If we get a requestVoteRPC from a future term, we immediately jump to that term and send our vote
// 2) If we are already collecting votes for the next election, and simultaneously get a request from another node to vote for them, we do NOT give them our vote
// (we've already voted for ourselves!)
// 3) if we've been offline, and wakeup and try to get votes: we get rejections, that also tell us the new term, and we immediately jump to that term as a follower
func (r *RaftNode) Vote(rv RequestVoteStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("Vote()")
}
defer r.persistState()
response.Term = r.CurrentTerm
myLastLogTerm := r.getLastLogTerm()
myLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("RequestVoteStruct: %s. \nMy node: term: %d, votedFor %d, lastLog
|
// Then:
// set commitIndex = N
for n := r.commitIndex + 1; n <= r.getLastLogIndex(); n++ {
if r.Log[n].Term != r.CurrentTerm {
if r.verbose {
|
random_line_split
|
raft.go
|
because if it is not us, the client will
// just get confused and contact the wrong node next time
// this situation only arises if the client's previous attempt was partially successful, but leader crashed before replying
return nil
}
// We are the leader and this is a new entry. Attempt to replicate this to all peer logs
response.Success = true
entry := LogEntry{
Term: r.CurrentTerm,
ClientData: cd.Data,
ClientID: cd.ClientID,
ClientSerialNum: cd.ClientSerialNum,
ClientResponse: ClientResponse{
Success: response.Success,
Leader: r.id}}
r.append(entry)
go r.heartbeatAppendEntriesRPC()
return nil
}
// After sending updates to other nodes, we try to advance our commitIndex
// At the end, we try to execute log
func (r *RaftNode) updateCommitIndex() {
// If there exists an N such that:
// 1) N > commitIndex,
// 2) a majority of matchIndex[i] >= N, and
// 3) log[N].term == currentTerm
// Then:
// set commitIndex = N
for n := r.commitIndex + 1; n <= r.getLastLogIndex(); n++ {
if r.Log[n].Term != r.CurrentTerm {
if r.verbose {
log.Printf("commitIndex %d ineligible because of log entry %s", n, r.Log[n].String())
}
continue
}
peersAtThisLevel := make(map[HostID]bool)
for hostID := range r.hosts {
if hostID == r.id {
peersAtThisLevel[hostID] = true
} else {
peersAtThisLevel[hostID] = r.matchIndex[hostID] >= n
}
}
if haveMajority(peersAtThisLevel, "COMMIT IDX", r.verbose) {
r.commitIndex = n
}
}
}
// Based on our commit index, apply any log entries that are ready for commit
// This function should be idempotent and safe to apply often.
func (r *RaftNode) executeLog() {
for r.commitIndex > r.lastApplied {
r.lastApplied++
r.StateMachine.apply(r.Log[r.lastApplied])
}
}
// AppendEntries is called by RPC from the leader to modify the log of a follower.
// TODO - some amount of duplicated logic in AppendEntries() and Vote()
// Returns false if entries were rejected, or true if accepted
func (r *RaftNode) AppendEntries(ae AppendEntriesStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Printf("AppendEntries(). ae: %s", ae.String())
log.Printf("My log: %s", r.Log.String())
}
response.Term = r.CurrentTerm
if ae.LeaderID == r.currentLeader {
if r.verbose {
log.Println("AppendEntries from leader - reset tickers")
}
r.resetTickers()
}
// Reply false if term < currentTerm
if ae.Term < r.CurrentTerm {
if r.verbose {
log.Println("AE from stale term")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// NOTE - shifting to follower each time might sound misleading, but keeps things uniform
r.shiftToFollower(ae.Term, ae.LeaderID)
// Reply false if log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm
if int(ae.PrevLogIndex) >= len(r.Log) || // index out-of-bounds
r.Log[ae.PrevLogIndex].Term != ae.PrevLogTerm {
if r.verbose {
log.Println("my PrevLogTerm does not match theirs")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// If an existing entry conflicts with a new one (same index, but different terms),
// delete the existing entry and all that follow it
if r.verbose {
log.Println("Applying entries...")
}
offset := int(ae.PrevLogIndex) + 1
for i, entry := range ae.Entries {
if i+offset >= len(r.Log) { // We certainly have no conflict
if r.verbose {
log.Printf("Apply without conflict: index=%d", i+offset)
}
r.append(entry)
} else {
if r.Log[i+offset].Term != ae.Entries[i].Term { // We have conflicting entry
if r.verbose {
log.Printf("Conflict - delete suffix! (we have term=%d, they have term=%d). Delete our log from index=%d onwards.", r.Log[i+offset].Term, ae.Entries[i].Term, i+offset)
}
r.Log = r.Log[:i+offset] // delete the existing entry and all that follow it
r.append(entry) // append the current entry
log.Printf("\n\nLog: %s\n\n", stringOneLog(r.Log))
} else if r.Log[i+offset] != entry {
log.Printf("\nOURS: %s\n\nTHEIRS: %s", r.Log[i+offset].String(), entry.String())
panic("log safety violation occurred somewhere")
}
}
}
response.Success = true
lastIndex := r.getLastLogIndex()
// Now we need to decide how to set our local commit index
if ae.LeaderCommit > r.commitIndex {
r.commitIndex = min(lastIndex, ae.LeaderCommit)
}
r.executeLog()
r.persistState()
return nil
}
// CandidateLooksEligible allows a raft node to decide whether another host's log is sufficiently up-to-date to become leader
// Returns true if the incoming RequestVote shows that the peer is at least as up-to-date as we are
// See paper section 5.4
func (r *RaftNode) CandidateLooksEligible(candLastLogIdx LogIndex, candLastLogTerm Term) bool {
ourLastLogTerm := r.getLastLogTerm()
ourLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("We have: lastLogTerm=%d, lastLogIdx=%d. They have: lastLogTerm=%d, lastLogIdx=%d", ourLastLogTerm, ourLastLogIdx, candLastLogTerm, candLastLogIdx)
}
if ourLastLogTerm == candLastLogTerm {
return candLastLogIdx >= ourLastLogIdx
}
return candLastLogTerm >= ourLastLogTerm
}
// Vote is called by RPC from a candidate. We can observe the following from the raft.github.io simulation:
// 1) If we get a requestVoteRPC from a future term, we immediately jump to that term and send our vote
// 2) If we are already collecting votes for the next election, and simultaneously get a request from another node to vote for them, we do NOT give them our vote
// (we've already voted for ourselves!)
// 3) if we've been offline, and wakeup and try to get votes: we get rejections, that also tell us the new term, and we immediately jump to that term as a follower
func (r *RaftNode) Vote(rv RequestVoteStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("Vote()")
}
defer r.persistState()
response.Term = r.CurrentTerm
myLastLogTerm := r.getLastLogTerm()
myLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("RequestVoteStruct: %s. \nMy node: term: %d, votedFor %d, lastLogTerm: %d, lastLogIdx: %d",
rv.String(), r.CurrentTerm, r.VotedFor, myLastLogTerm, myLastLogIdx)
}
looksEligible := r.CandidateLooksEligible(rv.LastLogIdx, rv.LastLogTerm)
if rv.Term > r.CurrentTerm {
r.shiftToFollower(rv.Term, HostID(-1)) // We do not yet know who is leader for this term
}
if rv.Term < r.CurrentTerm {
if r.verbose {
log.Println("RV from prior term - do not grant vote")
}
response.Success = false
} else if (r.VotedFor == -1 || r.VotedFor == rv.CandidateID) && looksEligible {
if r.verbose {
log.Println("Grant vote")
}
r.resetTickers()
response.Success = true
r.VotedFor = rv.CandidateID
} else {
if r.verbose {
log.Println("Do not grant vote")
}
response.Success = false
}
return nil
}
func (r *RaftNode) getLastLogIndex() LogIndex {
if len(r.Log) > 0 {
return LogIndex(len(r.Log) - 1)
}
return LogIndex(0)
}
func (r *RaftNode) getLastLogTerm() Term {
return Term(r.Log[int(r.getLastLogIndex())].Term)
}
func max(x LogIndex, y LogIndex) LogIndex {
if x > y {
return x
}
return y
}
func min(x LogIndex, y LogIndex) LogIndex
|
{
if x < y {
return x
}
return y
}
|
identifier_body
|
|
raft.go
|
prevReply.Success
// response.leader = prevReply.leader
// NOTE - we do not want to notify about the previous leader, because if it is not us, the client will
// just get confused and contact the wrong node next time
// this situation only arises if the client's previous attempt was partially successful, but leader crashed before replying
return nil
}
// We are the leader and this is a new entry. Attempt to replicate this to all peer logs
response.Success = true
entry := LogEntry{
Term: r.CurrentTerm,
ClientData: cd.Data,
ClientID: cd.ClientID,
ClientSerialNum: cd.ClientSerialNum,
ClientResponse: ClientResponse{
Success: response.Success,
Leader: r.id}}
r.append(entry)
go r.heartbeatAppendEntriesRPC()
return nil
}
// After sending updates to other nodes, we try to advance our commitIndex
// At the end, we try to execute log
func (r *RaftNode) updateCommitIndex() {
// If there exists an N such that:
// 1) N > commitIndex,
// 2) a majority of matchIndex[i] >= N, and
// 3) log[N].term == currentTerm
// Then:
// set commitIndex = N
for n := r.commitIndex + 1; n <= r.getLastLogIndex(); n++ {
if r.Log[n].Term != r.CurrentTerm {
if r.verbose {
log.Printf("commitIndex %d ineligible because of log entry %s", n, r.Log[n].String())
}
continue
}
peersAtThisLevel := make(map[HostID]bool)
for hostID := range r.hosts {
if hostID == r.id {
peersAtThisLevel[hostID] = true
} else {
peersAtThisLevel[hostID] = r.matchIndex[hostID] >= n
}
}
if haveMajority(peersAtThisLevel, "COMMIT IDX", r.verbose) {
r.commitIndex = n
}
}
}
// Based on our commit index, apply any log entries that are ready for commit
// This function should be idempotent and safe to apply often.
func (r *RaftNode) executeLog() {
for r.commitIndex > r.lastApplied {
r.lastApplied++
r.StateMachine.apply(r.Log[r.lastApplied])
}
}
// AppendEntries is called by RPC from the leader to modify the log of a follower.
// TODO - some amount of duplicated logic in AppendEntries() and Vote()
// Returns false if entries were rejected, or true if accepted
func (r *RaftNode) AppendEntries(ae AppendEntriesStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Printf("AppendEntries(). ae: %s", ae.String())
log.Printf("My log: %s", r.Log.String())
}
response.Term = r.CurrentTerm
if ae.LeaderID == r.currentLeader {
if r.verbose {
log.Println("AppendEntries from leader - reset tickers")
}
r.resetTickers()
}
// Reply false if term < currentTerm
if ae.Term < r.CurrentTerm {
if r.verbose {
log.Println("AE from stale term")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// NOTE - shifting to follower each time might sound misleading, but keeps things uniform
r.shiftToFollower(ae.Term, ae.LeaderID)
// Reply false if log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm
if int(ae.PrevLogIndex) >= len(r.Log) || // index out-of-bounds
r.Log[ae.PrevLogIndex].Term != ae.PrevLogTerm {
if r.verbose {
log.Println("my PrevLogTerm does not match theirs")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// If an existing entry conflicts with a new one (same index, but different terms),
// delete the existing entry and all that follow it
if r.verbose {
log.Println("Applying entries...")
}
offset := int(ae.PrevLogIndex) + 1
for i, entry := range ae.Entries {
if i+offset >= len(r.Log) { // We certainly have no conflict
if r.verbose {
log.Printf("Apply without conflict: index=%d", i+offset)
}
r.append(entry)
} else {
if r.Log[i+offset].Term != ae.Entries[i].Term { // We have conflicting entry
if r.verbose {
log.Printf("Conflict - delete suffix! (we have term=%d, they have term=%d). Delete our log from index=%d onwards.", r.Log[i+offset].Term, ae.Entries[i].Term, i+offset)
}
r.Log = r.Log[:i+offset] // delete the existing entry and all that follow it
r.append(entry) // append the current entry
log.Printf("\n\nLog: %s\n\n", stringOneLog(r.Log))
} else if r.Log[i+offset] != entry {
log.Printf("\nOURS: %s\n\nTHEIRS: %s", r.Log[i+offset].String(), entry.String())
panic("log safety violation occurred somewhere")
}
}
}
response.Success = true
lastIndex := r.getLastLogIndex()
// Now we need to decide how to set our local commit index
if ae.LeaderCommit > r.commitIndex {
r.commitIndex = min(lastIndex, ae.LeaderCommit)
}
r.executeLog()
r.persistState()
return nil
}
// CandidateLooksEligible allows a raft node to decide whether another host's log is sufficiently up-to-date to become leader
// Returns true if the incoming RequestVote shows that the peer is at least as up-to-date as we are
// See paper section 5.4
func (r *RaftNode) CandidateLooksEligible(candLastLogIdx LogIndex, candLastLogTerm Term) bool {
ourLastLogTerm := r.getLastLogTerm()
ourLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("We have: lastLogTerm=%d, lastLogIdx=%d. They have: lastLogTerm=%d, lastLogIdx=%d", ourLastLogTerm, ourLastLogIdx, candLastLogTerm, candLastLogIdx)
}
if ourLastLogTerm == candLastLogTerm {
return candLastLogIdx >= ourLastLogIdx
}
return candLastLogTerm >= ourLastLogTerm
}
// Vote is called by RPC from a candidate. We can observe the following from the raft.github.io simulation:
// 1) If we get a requestVoteRPC from a future term, we immediately jump to that term and send our vote
// 2) If we are already collecting votes for the next election, and simultaneously get a request from another node to vote for them, we do NOT give them our vote
// (we've already voted for ourselves!)
// 3) if we've been offline, and wakeup and try to get votes: we get rejections, that also tell us the new term, and we immediately jump to that term as a follower
func (r *RaftNode) Vote(rv RequestVoteStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("Vote()")
}
defer r.persistState()
response.Term = r.CurrentTerm
myLastLogTerm := r.getLastLogTerm()
myLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("RequestVoteStruct: %s. \nMy node: term: %d, votedFor %d, lastLogTerm: %d, lastLogIdx: %d",
rv.String(), r.CurrentTerm, r.VotedFor, myLastLogTerm, myLastLogIdx)
}
looksEligible := r.CandidateLooksEligible(rv.LastLogIdx, rv.LastLogTerm)
if rv.Term > r.CurrentTerm {
r.shiftToFollower(rv.Term, HostID(-1)) // We do not yet know who is leader for this term
}
if rv.Term < r.CurrentTerm {
if r.verbose {
log.Println("RV from prior term - do not grant vote")
}
response.Success = false
} else if (r.VotedFor == -1 || r.VotedFor == rv.CandidateID) && looksEligible {
if r.verbose {
log.Println("Grant vote")
}
r.resetTickers()
response.Success = true
r.VotedFor = rv.CandidateID
} else {
if r.verbose {
log.Println("Do not grant vote")
}
response.Success = false
}
return nil
}
func (r *RaftNode) getLastLogIndex() LogIndex {
if len(r.Log) > 0 {
return LogIndex(len(r.Log) - 1)
}
return LogIndex(0)
}
func (r *RaftNode) getLastLogTerm() Term {
return Term(r.Log[int(r.getLastLogIndex())].Term)
}
func max(x LogIndex, y LogIndex) LogIndex {
if x > y
|
{
return x
}
|
conditional_block
|
|
svgfrags.py
|
+= options.margin[1]
ymin -= options.margin[2]
ymax += options.margin[3]
# and calculate bbox's dimensions
dx = xmax - xmin
dy = ymax - ymin
if eq_id is not None:
# more then one reference, create new node <use>
equation = XML.createElement('use')
equation.setAttributeNS('xlink', 'xlink:href', '#'+eq_id)
def put_equation(x, y, sx, sy):
# calculate desired point in equation BBox
xo = xmin + (xmax - xmin)*px
yo = ymin + (ymax - ymin)*py
# move (xo,yo) to (x,y)
if sx == sy:
equation.setAttribute(
'transform',
('translate(%s,%s)' % (SVG.c2s(x), SVG.c2s(y))) + \
('scale(%s)' % SVG.s2s(sx)) + \
('translate(%s,%s)' % (SVG.c2s(-xo), SVG.c2s(-yo)))
)
else:
equation.setAttribute(
'transform',
('translate(%s,%s)' % (SVG.c2s(x), SVG.c2s(y))) + \
('scale(%s,%s)' % (SVG.s2s(sx), SVG.s2s(sy))) + \
('translate(%s,%s)' % (SVG.c2s(-xo), SVG.c2s(-yo)))
)
return equation
# string or text object
if kind == 'string':
object = value
if options.scale == 'fit':
log.warning("%s is a text object, can't fit to rectangle", value)
sx = sy = 1.0
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
# get <text> object coords
x = frags.safe_float(object.getAttribute('x'))
y = frags.safe_float(object.getAttribute('y'))
# (DEBUG)
if DEBUG:
c = XML.createElement("circle")
c.setAttribute("cx", str(x))
c.setAttribute("cy", str(y))
c.setAttribute("r", "3")
c.setAttribute("fill", 'red')
object.parentNode.insertBefore(c, object)
put_equation(x, y, sx, sy)
# copy fill color from text node
fill = object.getAttribute('fill') or \
frags.CSS_value(object, 'fill')
if fill:
equation.setAttribute('fill', fill)
# insert equation into XML tree
object.parentNode.insertBefore(equation, object)
# explicity given point
elif kind == 'point':
if options.scale == 'fit':
log.warning("%s is a text object, can't fit to rectangle", value)
sx = sy = 1.0
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
# insert equation into XML tree
x, y = value
XML.documentElement.appendChild(
put_equation(x, y, sx, sy)
)
# rectangle or object with known bbox
elif kind == 'id' or kind == 'rect':
# get bounding box
if kind == 'rect':
Xmin, Ymin, Xmax, Ymax = value # rect
else:
Xmin, Ymin, Xmax, Ymax = frags.get_bbox(value) # object
DX = Xmax - Xmin
DY = Ymax - Ymin
# reference point
x = Xmin + (Xmax - Xmin)*px
y = Ymin + (Ymax - Ymin)*py
# and set default scale
sx = 1.0
sy = 1.0
# Fit in rectangle
if options.scale == 'fit':
tmp_x = DX/(xmax - xmin)
tmp_y = DY/(ymax - ymin)
if tmp_x < tmp_y:
sx = sy = tmp_x
else:
sx = sy = tmp_y
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this':
sx = DX/dx
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this':
sx = DX/dx
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this':
sy = DY/dy
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this':
sy = DY/dy
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
#endif
# move&scale equation
put_equation(x, y, sx, sy)
# and append to XML tree
if kind == 'rect':
XML.documentElement.appendChild(equation)
else: # kind == 'id'
# in case of existing object, place them
# just "above" them
pn = value.parentNode
if value == pn.lastChild:
pn.appendChild(equation)
else:
pn.insertBefore(equation, value.nextSibling)
#for
# 9. modify replaced <text> nodes according to options
if setup.options.frags_removetext: # remove nodes
for node in text_nodes:
node.parentNode.removeChild(node)
elif setup.options.frags_hidetext: # hide nodes
for node in text_nodes:
node.setAttribute('display', 'none')
SVG.save(output_svg)
def cleanup(tmp_filename):
"
|
remove temporary files"
extensions = ['.aux', '.log']
if not setup.options.frags_keeptex:
extensions.append('.tex')
if not setup.options.frags_keepdvi:
extensions.append('.dvi')
for ext in extensions:
frags.remove_file(tmp_filename + ext)
|
identifier_body
|
|
svgfrags.py
|
sys.exit(1)
elif not os.path.exists(input_txt):
log.error("Rules file '%s' don't exist", input_txt)
sys.exit(1)
if not input_svg:
log.error("Input SVG file not provided, use switch -i or --input")
sys.exit(1)
elif not os.path.exists(input_svg):
log.error("Input SVG file '%s' don't exist", input_svg)
sys.exit(1)
if not output_svg:
log.error("Output SVG file not provided, use switch -i or --output")
sys.exit(1)
elif os.path.exists(output_svg) and not setup.options.frags_overwrite_file:
log.error("File %s already exists, and cannot be overwritten. Use switch -f or --force-overwrite to change this behaviour.", output_svg)
sys.exit(1)
# 1. Load SVG file
XML = xml.dom.minidom.parse(input_svg)
# 1.1. Create 'defs' tag (if doesn't exists), and add xlink namespace
if not XML.getElementsByTagName('defs'):
XML.documentElement.insertBefore(
XML.createElement('defs'),
XML.documentElement.firstChild
)
if not XML.documentElement.getAttribute('xmlns:xlink'):
XML.documentElement.setAttribute('xmlns:xlink', "http://www.w3.org/1999/xlink")
if True:
# XXX: hack; for unknown reason expat do not read id attribute
# and getElementById always fails
ID = {}
frags.collect_Id(XML, ID)
def my_getElementById(id):
try:
return ID[id]
except KeyError:
return None
XML.getElementById = my_getElementById
# 1.2. find all text objects
text_objects = {} # text -> node
for node in XML.getElementsByTagName('text'):
try:
text = frags.get_text(node, setup.options.frags_strip)
# add to list
if text in text_objects:
text_objects[text].append(node)
else:
text_objects[text] = [node]
except ValueError:
pass
#for
# 2. Load & parse replace pairs
input = open(input_txt, 'r').read()
from frags.parse_subst import parse
repl_defs = frags.Dict() # valid defs
text_nodes = set() # text nodes to remove/hide
try:
for item in parse(input):
((kind, value), tex, options) = item
if tex is None: # i.e. "this"
if kind == 'string':
if setup.options.frags_strip:
tex = value.strip()
else:
tex = value
elif kind == 'id':
node = XML.getElementById(value[1:])
if frags.istextnode(node):
tex = frags.get_text(node)
if tex is None:
log.error("Keyword 'this' is not allowed for rect/points object")
continue
if kind == 'string':
if setup.options.frags_strip:
value = value.strip()
try:
for node in text_objects[value]:
text_nodes.add(node)
repl_defs[tex] = ((kind, node), tex, options)
except KeyError:
log.warning("String '%s' doesn't found in SVG, skipping repl", value)
elif kind == 'id':
object = XML.getElementById(value[1:])
if object:
# "forget" id, save object
if object.nodeName in ['rect', 'ellipse', 'circle']:
repl_defs[tex] = ((kind, object), tex, options)
elif object.nodeName == 'text':
repl_defs[tex] = (('string', object), tex, options)
else:
log.warning("Object with id=%s is not text, rect, ellipse nor circle - skipping repl", value)
else:
log.warning("Object with id=%s doesn't found in SVG, skipping repl", value)
else: # point, rect -- no additional tests needed
repl_defs[tex] = ((kind, value), tex, options)
except frags.parse_subst.SyntaxError, e:
log.error("Syntax error: %s", str(e))
sys.exit(1)
if not repl_defs:
log.info("No rules - bye.")
sys.exit()
# make tmp name based on hash input & timestamp of input_txt file
tmp_filename = "svgfrags-%08x-%08x" % (
hash(input) & 0xffffffff,
os.path.getmtime(input_txt)
)
atexit.register(cleanup, tmp_filename)
if not os.path.exists(tmp_filename + ".dvi"):
# 3. prepare LaTeX source
tmp_lines = [
'\\batchmode',
'\\documentclass{article}',
'\\pagestyle{empty}'
'\\begin{document}',
]
for tex in repl_defs:
tmp_lines.append(tex) # each TeX expression at new page
tmp_lines.append("\\newpage")
# 4. write & compile TeX source
tmp_lines.append("\end{document}")
tmp = open(tmp_filename + '.tex', 'w')
for line in tmp_lines:
|
exitstatus = os.system("latex %s.tex > /dev/null" % tmp_filename)
if exitstatus:
log.error("LaTeX failed - error code %d; check log file '%s.log'", exitstatus, tmp_filename)
sys.exit(2)
else:
log.error("Program 'latex' isn't avaialable.")
sys.exit(3)
else:
log.info("File %s not changed, used existing DVI file (%s)", input_txt, tmp_filename)
# 5. Load DVI
dvi = binfile(tmp_filename + ".dvi", 'rb')
comment, (num, den, mag, u, l), page_offset, fonts = dviparser.dviinfo(dvi)
unit_mm = num/(den*10000.0)
scale = unit_mm * 72.27/25.4
mag = mag/1000.0
# 6. Preload fonts used in DVI & other stuff
fontsel.preload()
missing = []
for k in fonts:
_, s, d, fontname = fonts[k]
log.debug("Font %s=%s" % (k, fontname))
#print "Font %s=%s" % (k, fontname)
try:
fontsel.create_DVI_font(fontname, k, s, d, setup.options.enc_methods)
except fontsel.FontError, e:
log.error("Can't find font '%s': %s" % (fontname, str(e)))
missing.append((k, fontname))
if missing:
log.error("There were some unavailable fonts; list of missing fonts: %s" % (dvi.name, ", ".join("%d=%s" % kf for kf in missing)))
sys.exit(1)
# 7. Substitute
eq_id_n = 0
# helper functions
def get_width(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_width(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
def get_height(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_height(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
SVG = EquationsManager(XML, 1.25 * mag, scale, unit_mm)
for pageno, items in enumerate(repl_defs.values()):
dvi.seek(page_offset[pageno])
SVG.new_page()
dvi2svg.convert_page(dvi, SVG)
assert SVG.lastpage is not None, "Fatal error!"
assert SVG.lastbbox is not None, "Fatal error!"
if len(items) > 1:
# there are more then one referenco to this TeX object, so
# we have to **define** it, and then reference to, with <use>
eq_id = 'svgfrags-%x' % eq_id_n
eq_id_n += 1
SVG.lastpage.setAttribute('id', eq_id)
XML.getElementsByTagName('defs')[0].appendChild(SVG.lastpage)
else:
# just one reference, use node crated by SVGDocument
equation = SVG.lastpage
eq_id = None
# process
for ((kind, value), tex, options) in items:
px, py = options.position
if px == 'inherit':
if frags.istextnode(value):
px = frags.get_anchor(value)
else:
px = 0.0
# bounding box of equation
(xmin, ymin, xmax, ymax) = SVG.lastbbox
# enlarge with margin values
xmin -= options.margin[0]
xmax += options.margin[1]
ymin -= options.margin[2]
ymax += options.margin[3]
# and calculate bbox's dimensions
dx = xmax - xmin
dy = ymax - ymin
|
tmp.write(line + "\n")
tmp.close()
if which('latex'):
|
random_line_split
|
svgfrags.py
|
self, filename):
defs = self.document.getElementsByTagName('defs')[0]
for element in self.flush_glyphs():
defs.appendChild(element)
# save file
f = open(filename, 'wb')
if setup.options.prettyXML:
f.write(self.document.toprettyxml())
else:
f.write(self.document.toxml())
f.close()
def main(args):
from frags.cmdopts import parse_args
(setup.options, args) = parse_args(args)
# fixed options
setup.options.use_bbox = True
setup.options.prettyXML = False
input_txt = setup.options.input_txt
input_svg = setup.options.input_svg
output_svg = setup.options.output_svg
if not input_txt:
log.error("Rules file not provided, use switch -r or --rules")
sys.exit(1)
elif not os.path.exists(input_txt):
log.error("Rules file '%s' don't exist", input_txt)
sys.exit(1)
if not input_svg:
log.error("Input SVG file not provided, use switch -i or --input")
sys.exit(1)
elif not os.path.exists(input_svg):
log.error("Input SVG file '%s' don't exist", input_svg)
sys.exit(1)
if not output_svg:
log.error("Output SVG file not provided, use switch -i or --output")
sys.exit(1)
elif os.path.exists(output_svg) and not setup.options.frags_overwrite_file:
log.error("File %s already exists, and cannot be overwritten. Use switch -f or --force-overwrite to change this behaviour.", output_svg)
sys.exit(1)
# 1. Load SVG file
XML = xml.dom.minidom.parse(input_svg)
# 1.1. Create 'defs' tag (if doesn't exists), and add xlink namespace
if not XML.getElementsByTagName('defs'):
XML.documentElement.insertBefore(
XML.createElement('defs'),
XML.documentElement.firstChild
)
if not XML.documentElement.getAttribute('xmlns:xlink'):
XML.documentElement.setAttribute('xmlns:xlink', "http://www.w3.org/1999/xlink")
if True:
# XXX: hack; for unknown reason expat do not read id attribute
# and getElementById always fails
ID = {}
frags.collect_Id(XML, ID)
def my_getElementById(id):
try:
return ID[id]
except KeyError:
return None
XML.getElementById = my_getElementById
# 1.2. find all text objects
text_objects = {} # text -> node
for node in XML.getElementsByTagName('text'):
try:
text = frags.get_text(node, setup.options.frags_strip)
# add to list
if text in text_objects:
text_objects[text].append(node)
else:
text_objects[text] = [node]
except ValueError:
pass
#for
# 2. Load & parse replace pairs
input = open(input_txt, 'r').read()
from frags.parse_subst import parse
repl_defs = frags.Dict() # valid defs
text_nodes = set() # text nodes to remove/hide
try:
for item in parse(input):
((kind, value), tex, options) = item
if tex is None: # i.e. "this"
if kind == 'string':
if setup.options.frags_strip:
tex = value.strip()
else:
tex = value
elif kind == 'id':
node = XML.getElementById(value[1:])
if frags.istextnode(node):
tex = frags.get_text(node)
if tex is None:
log.error("Keyword 'this' is not allowed for rect/points object")
continue
if kind == 'string':
if setup.options.frags_strip:
value = value.strip()
try:
for node in text_objects[value]:
text_nodes.add(node)
repl_defs[tex] = ((kind, node), tex, options)
except KeyError:
log.warning("String '%s' doesn't found in SVG, skipping repl", value)
elif kind == 'id':
object = XML.getElementById(value[1:])
if object:
# "forget" id, save object
if object.nodeName in ['rect', 'ellipse', 'circle']:
repl_defs[tex] = ((kind, object), tex, options)
elif object.nodeName == 'text':
repl_defs[tex] = (('string', object), tex, options)
else:
log.warning("Object with id=%s is not text, rect, ellipse nor circle - skipping repl", value)
else:
log.warning("Object with id=%s doesn't found in SVG, skipping repl", value)
else: # point, rect -- no additional tests needed
repl_defs[tex] = ((kind, value), tex, options)
except frags.parse_subst.SyntaxError, e:
log.error("Syntax error: %s", str(e))
sys.exit(1)
if not repl_defs:
log.info("No rules - bye.")
sys.exit()
# make tmp name based on hash input & timestamp of input_txt file
tmp_filename = "svgfrags-%08x-%08x" % (
hash(input) & 0xffffffff,
os.path.getmtime(input_txt)
)
atexit.register(cleanup, tmp_filename)
if not os.path.exists(tmp_filename + ".dvi"):
# 3. prepare LaTeX source
tmp_lines = [
'\\batchmode',
'\\documentclass{article}',
'\\pagestyle{empty}'
'\\begin{document}',
]
for tex in repl_defs:
tmp_lines.append(tex) # each TeX expression at new page
tmp_lines.append("\\newpage")
# 4. write & compile TeX source
tmp_lines.append("\end{document}")
tmp = open(tmp_filename + '.tex', 'w')
for line in tmp_lines:
tmp.write(line + "\n")
tmp.close()
if which('latex'):
exitstatus = os.system("latex %s.tex > /dev/null" % tmp_filename)
if exitstatus:
log.error("LaTeX failed - error code %d; check log file '%s.log'", exitstatus, tmp_filename)
sys.exit(2)
else:
log.error("Program 'latex' isn't avaialable.")
sys.exit(3)
else:
log.info("File %s not changed, used existing DVI file (%s)", input_txt, tmp_filename)
# 5. Load DVI
dvi = binfile(tmp_filename + ".dvi", 'rb')
comment, (num, den, mag, u, l), page_offset, fonts = dviparser.dviinfo(dvi)
unit_mm = num/(den*10000.0)
scale = unit_mm * 72.27/25.4
mag = mag/1000.0
# 6. Preload fonts used in DVI & other stuff
fontsel.preload()
missing = []
for k in fonts:
_, s, d, fontname = fonts[k]
log.debug("Font %s=%s" % (k, fontname))
#print "Font %s=%s" % (k, fontname)
try:
fontsel.create_DVI_font(fontname, k, s, d, setup.options.enc_methods)
except fontsel.FontError, e:
log.error("Can't find font '%s': %s" % (fontname, str(e)))
missing.append((k, fontname))
if missing:
log.error("There were some unavailable fonts; list of missing fonts: %s" % (dvi.name, ", ".join("%d=%s" % kf for kf in missing)))
sys.exit(1)
# 7. Substitute
eq_id_n = 0
# helper functions
def get_width(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_width(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
def get_height(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_height(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
SVG = EquationsManager(XML, 1.25 * mag, scale, unit_mm)
for pageno, items in enumerate(repl_defs.values()):
dvi.seek(page_offset[pageno])
SVG.new_page()
dvi2svg.convert_page(dvi, SVG)
assert SVG.lastpage is not None, "Fatal error!"
assert SVG.lastbbox is not None, "Fatal error!"
if len(items) > 1:
# there are more then one referenco to this TeX object, so
# we have to **define** it, and then reference to, with <use>
eq_id = 'svgfrags-%x' % eq_id_n
eq_id_n += 1
SVG.lastpage.setAttribute('id', eq_id)
XML.getElementsByTagName('defs')[0].appendChild(SVG.lastpage)
else:
# just one reference, use node crated by SVGDocument
equ
|
ave(
|
identifier_name
|
|
svgfrags.py
|
.path.getmtime(input_txt)
)
atexit.register(cleanup, tmp_filename)
if not os.path.exists(tmp_filename + ".dvi"):
# 3. prepare LaTeX source
tmp_lines = [
'\\batchmode',
'\\documentclass{article}',
'\\pagestyle{empty}'
'\\begin{document}',
]
for tex in repl_defs:
tmp_lines.append(tex) # each TeX expression at new page
tmp_lines.append("\\newpage")
# 4. write & compile TeX source
tmp_lines.append("\end{document}")
tmp = open(tmp_filename + '.tex', 'w')
for line in tmp_lines:
tmp.write(line + "\n")
tmp.close()
if which('latex'):
exitstatus = os.system("latex %s.tex > /dev/null" % tmp_filename)
if exitstatus:
log.error("LaTeX failed - error code %d; check log file '%s.log'", exitstatus, tmp_filename)
sys.exit(2)
else:
log.error("Program 'latex' isn't avaialable.")
sys.exit(3)
else:
log.info("File %s not changed, used existing DVI file (%s)", input_txt, tmp_filename)
# 5. Load DVI
dvi = binfile(tmp_filename + ".dvi", 'rb')
comment, (num, den, mag, u, l), page_offset, fonts = dviparser.dviinfo(dvi)
unit_mm = num/(den*10000.0)
scale = unit_mm * 72.27/25.4
mag = mag/1000.0
# 6. Preload fonts used in DVI & other stuff
fontsel.preload()
missing = []
for k in fonts:
_, s, d, fontname = fonts[k]
log.debug("Font %s=%s" % (k, fontname))
#print "Font %s=%s" % (k, fontname)
try:
fontsel.create_DVI_font(fontname, k, s, d, setup.options.enc_methods)
except fontsel.FontError, e:
log.error("Can't find font '%s': %s" % (fontname, str(e)))
missing.append((k, fontname))
if missing:
log.error("There were some unavailable fonts; list of missing fonts: %s" % (dvi.name, ", ".join("%d=%s" % kf for kf in missing)))
sys.exit(1)
# 7. Substitute
eq_id_n = 0
# helper functions
def get_width(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_width(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
def get_height(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_height(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
SVG = EquationsManager(XML, 1.25 * mag, scale, unit_mm)
for pageno, items in enumerate(repl_defs.values()):
dvi.seek(page_offset[pageno])
SVG.new_page()
dvi2svg.convert_page(dvi, SVG)
assert SVG.lastpage is not None, "Fatal error!"
assert SVG.lastbbox is not None, "Fatal error!"
if len(items) > 1:
# there are more then one referenco to this TeX object, so
# we have to **define** it, and then reference to, with <use>
eq_id = 'svgfrags-%x' % eq_id_n
eq_id_n += 1
SVG.lastpage.setAttribute('id', eq_id)
XML.getElementsByTagName('defs')[0].appendChild(SVG.lastpage)
else:
# just one reference, use node crated by SVGDocument
equation = SVG.lastpage
eq_id = None
# process
for ((kind, value), tex, options) in items:
px, py = options.position
if px == 'inherit':
if frags.istextnode(value):
px = frags.get_anchor(value)
else:
px = 0.0
# bounding box of equation
(xmin, ymin, xmax, ymax) = SVG.lastbbox
# enlarge with margin values
xmin -= options.margin[0]
xmax += options.margin[1]
ymin -= options.margin[2]
ymax += options.margin[3]
# and calculate bbox's dimensions
dx = xmax - xmin
dy = ymax - ymin
if eq_id is not None:
# more then one reference, create new node <use>
equation = XML.createElement('use')
equation.setAttributeNS('xlink', 'xlink:href', '#'+eq_id)
def put_equation(x, y, sx, sy):
# calculate desired point in equation BBox
xo = xmin + (xmax - xmin)*px
yo = ymin + (ymax - ymin)*py
# move (xo,yo) to (x,y)
if sx == sy:
equation.setAttribute(
'transform',
('translate(%s,%s)' % (SVG.c2s(x), SVG.c2s(y))) + \
('scale(%s)' % SVG.s2s(sx)) + \
('translate(%s,%s)' % (SVG.c2s(-xo), SVG.c2s(-yo)))
)
else:
equation.setAttribute(
'transform',
('translate(%s,%s)' % (SVG.c2s(x), SVG.c2s(y))) + \
('scale(%s,%s)' % (SVG.s2s(sx), SVG.s2s(sy))) + \
('translate(%s,%s)' % (SVG.c2s(-xo), SVG.c2s(-yo)))
)
return equation
# string or text object
if kind == 'string':
object = value
if options.scale == 'fit':
log.warning("%s is a text object, can't fit to rectangle", value)
sx = sy = 1.0
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
# get <text> object coords
x = frags.safe_float(object.getAttribute('x'))
y = frags.safe_float(object.getAttribute('y'))
# (DEBUG)
if DEBUG:
c = XML.createElement("circle")
c.setAttribute("cx", str(x))
c.setAttribute("cy", str(y))
c.setAttribute("r", "3")
c.setAttribute("fill", 'red')
object.parentNode.insertBefore(c, object)
put_equation(x, y, sx, sy)
# copy fill color from text node
fill = object.getAttribute('fill') or \
frags.CSS_value(object, 'fill')
if fill:
equation.setAttribute('fill', fill)
# insert equation into XML tree
object.parentNode.insertBefore(equation, object)
# explicity given point
elif kind == 'point':
if options.scale == 'fit':
log.warning("%s is a text object, can't fit to rectangle", value)
sx = sy = 1.0
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this': p
|
ass # no scale
|
conditional_block
|
|
update_configuration_files.py
|
in this script.
"""
ROOT = get_shared_data_root() / "pipette" / "definitions" / "2"
NOZZLE_LOCATION_CONFIGS = ["nozzle_offset", "nozzle_map"]
def
|
(c: str) -> str:
# Tiny helper function to convert to camelCase.
config_name = c.split("_")
if len(config_name) == 1:
return config_name[0]
return f"{config_name[0]}" + "".join(s.capitalize() for s in config_name[1::])
def list_configuration_keys() -> Tuple[List[str], Dict[int, str]]:
"""List out the model keys available to modify at the top level."""
lookup = {i: v for (i, v) in enumerate(PipetteConfigurations.__fields__)}
return [
f"{i}: {v}" for (i, v) in enumerate(PipetteConfigurations.__fields__)
], lookup
def list_available_enum(enum_type: Type[Enum]) -> List[str]:
"""List available pipette models"""
return [f"{i}: {v}" for (i, v) in enumerate(enum_type)] # type: ignore[var-annotated]
def handle_subclass_model(
top_level_configuration: List[str], base_model: BaseModel, is_basemodel: bool
) -> List[str]:
"""Handle sub-classed basemodels and update the top level model as necessary."""
if is_basemodel:
if base_model.__fields__ == SupportedTipsDefinition.__fields__:
# pydantic does something weird with the types in ModelFields so
# we cannot use isinstance checks to confirm if the base model
# is a supported tips definition
print(f"choose {PipetteTipType.__name__}:")
for row in list_available_enum(PipetteTipType):
print(f"\t{row}")
tip_type = list(PipetteTipType)[
int(input("select the tip volume size to modify"))
]
top_level_configuration.append(tip_type.name)
lookup = {i: v for (i, v) in enumerate(base_model.__fields__)}
config_list = [f"{i}: {v}" for (i, v) in enumerate(base_model.__fields__)]
print(f"you selected the basemodel {base_model.__name__}:") # type: ignore[attr-defined]
for row in config_list:
print(f"\t{row}")
configuration_to_update = lookup[
int(input("select a specific configuration from above\n"))
]
field_type = base_model.__fields__[configuration_to_update].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
top_level_configuration.append(configuration_to_update)
return handle_subclass_model(top_level_configuration, field_type, is_basemodel)
else:
return top_level_configuration
def check_from_version(version: str) -> str:
"""Check that the version requested is supported in the system."""
version_int = [int(v) for v in version.split(".")]
if version_int[0] not in PipetteModelMajorVersion:
raise ValueError(f"Major version {version_int[0]} is not supported.")
if version_int[1] not in PipetteModelMinorVersion:
raise ValueError(f"Minor version {version_int[1]} is not supported.")
return version
def save_data_to_file(
directorypath: Path,
file_name: str,
data: Dict[str, Any],
) -> None:
"""
Function used to save data to a file
"""
directorypath.mkdir(parents=True, exist_ok=True)
filepath = directorypath / f"{file_name}.json"
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
def update(
dict_to_update: Dict[str, Any], iter_of_configs: Iterator[str], value_to_update: Any
) -> Dict[str, Any]:
"""
Recursively update the given dictionary to ensure no data is lost when updating.
"""
next_key = next(iter_of_configs, None)
if next_key and isinstance(dict_to_update[next_key], dict):
dict_to_update[next_key] = update(
dict_to_update.get(next_key, {}), iter_of_configs, value_to_update
)
elif next_key:
dict_to_update[next_key] = value_to_update
return dict_to_update
def build_nozzle_map(
nozzle_offset: List[float], channels: PipetteChannelType
) -> Dict[str, List[float]]:
Y_OFFSET = 9
X_OFFSET = -9
breakpoint()
if channels == PipetteChannelType.SINGLE_CHANNEL:
return {"A1": nozzle_offset}
elif channels == PipetteChannelType.EIGHT_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}1": [
nozzle_offset[0],
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
}
elif channels == PipetteChannelType.NINETY_SIX_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}{1 + 1*col}": [
nozzle_offset[0] + X_OFFSET * col,
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
for col in range(12)
}
raise ValueError(f"Unsupported channel type {channels}")
def load_and_update_file_from_config(
config_to_update: List[str],
value_to_update: Any,
model_to_update: PipetteModelVersionType,
) -> None:
"""Update the requested config and save to disk.
Load the requested config sub type (physical, geometry or liquid). Then
update the current file and save to disk.
"""
camel_list_to_update = iter([_change_to_camel_case(i) for i in config_to_update])
if config_to_update[0] in PipetteGeometryDefinition.__fields__:
geometry = _geometry(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
if config_to_update[0] == "nozzle_map":
nozzle_to_use = (
value_to_update if value_to_update else geometry["nozzleOffset"]
)
geometry["nozzleMap"] = build_nozzle_map(
nozzle_to_use, model_to_update.pipette_channels
)
elif config_to_update[0] == "nozzle_offset":
geometry["nozzleMap"] = build_nozzle_map(
value_to_update, model_to_update.pipette_channels
)
geometry["nozzleOffset"] = value_to_update
else:
geometry = update(geometry, camel_list_to_update, value_to_update)
PipetteGeometryDefinition.parse_obj(geometry)
filepath = (
ROOT
/ "geometry"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
geometry,
)
elif config_to_update[0] in PipettePhysicalPropertiesDefinition.__fields__:
physical = _physical(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
physical = update(physical, camel_list_to_update, value_to_update)
PipettePhysicalPropertiesDefinition.parse_obj(physical)
filepath = (
ROOT
/ "general"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
physical,
)
elif config_to_update[0] in PipetteLiquidPropertiesDefinition.__fields__:
liquid = _liquid(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
liquid = update(physical, camel_list_to_update, value_to_update)
PipetteLiquidPropertiesDefinition.parse_obj(liquid)
filepath = (
ROOT
/ "liquid"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
liquid,
)
else:
raise KeyError(
f"{config_to_update} is not saved to a file. Check `pipette_definition.py` for more information."
)
def _update_single_model(configuration_to_update: List[str]) -> None:
"""Helper function to update single model."""
print(f"choose {PipetteModelType.__name__}:")
for row in list_available_enum(PipetteModelType):
print(f"\t{row}")
model = list(PipetteModelType)[int(input("Please select from above\n"))]
print(f"choose {PipetteChannelType.__name__}:")
for row in list_available_enum(PipetteChannelType):
print(f"\t{row}")
channels = list(PipetteChannelType)[int(input("Please select from above\n"))]
version = PipetteVersionType.convert_from_float(
float(check_from_version(input("Please input
|
_change_to_camel_case
|
identifier_name
|
update_configuration_files.py
|
in this script.
"""
ROOT = get_shared_data_root() / "pipette" / "definitions" / "2"
NOZZLE_LOCATION_CONFIGS = ["nozzle_offset", "nozzle_map"]
def _change_to_camel_case(c: str) -> str:
# Tiny helper function to convert to camelCase.
config_name = c.split("_")
if len(config_name) == 1:
return config_name[0]
return f"{config_name[0]}" + "".join(s.capitalize() for s in config_name[1::])
def list_configuration_keys() -> Tuple[List[str], Dict[int, str]]:
"""List out the model keys available to modify at the top level."""
lookup = {i: v for (i, v) in enumerate(PipetteConfigurations.__fields__)}
return [
f"{i}: {v}" for (i, v) in enumerate(PipetteConfigurations.__fields__)
], lookup
def list_available_enum(enum_type: Type[Enum]) -> List[str]:
"""List available pipette models"""
return [f"{i}: {v}" for (i, v) in enumerate(enum_type)] # type: ignore[var-annotated]
def handle_subclass_model(
top_level_configuration: List[str], base_model: BaseModel, is_basemodel: bool
) -> List[str]:
"""Handle sub-classed basemodels and update the top level model as necessary."""
if is_basemodel:
if base_model.__fields__ == SupportedTipsDefinition.__fields__:
# pydantic does something weird with the types in ModelFields so
# we cannot use isinstance checks to confirm if the base model
# is a supported tips definition
print(f"choose {PipetteTipType.__name__}:")
for row in list_available_enum(PipetteTipType):
print(f"\t{row}")
tip_type = list(PipetteTipType)[
int(input("select the tip volume size to modify"))
]
top_level_configuration.append(tip_type.name)
lookup = {i: v for (i, v) in enumerate(base_model.__fields__)}
config_list = [f"{i}: {v}" for (i, v) in enumerate(base_model.__fields__)]
print(f"you selected the basemodel {base_model.__name__}:") # type: ignore[attr-defined]
for row in config_list:
print(f"\t{row}")
configuration_to_update = lookup[
int(input("select a specific configuration from above\n"))
]
field_type = base_model.__fields__[configuration_to_update].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
top_level_configuration.append(configuration_to_update)
return handle_subclass_model(top_level_configuration, field_type, is_basemodel)
else:
return top_level_configuration
def check_from_version(version: str) -> str:
"""Check that the version requested is supported in the system."""
version_int = [int(v) for v in version.split(".")]
if version_int[0] not in PipetteModelMajorVersion:
raise ValueError(f"Major version {version_int[0]} is not supported.")
if version_int[1] not in PipetteModelMinorVersion:
raise ValueError(f"Minor version {version_int[1]} is not supported.")
return version
def save_data_to_file(
directorypath: Path,
file_name: str,
data: Dict[str, Any],
) -> None:
"""
Function used to save data to a file
"""
directorypath.mkdir(parents=True, exist_ok=True)
filepath = directorypath / f"{file_name}.json"
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
def update(
dict_to_update: Dict[str, Any], iter_of_configs: Iterator[str], value_to_update: Any
) -> Dict[str, Any]:
|
def build_nozzle_map(
nozzle_offset: List[float], channels: PipetteChannelType
) -> Dict[str, List[float]]:
Y_OFFSET = 9
X_OFFSET = -9
breakpoint()
if channels == PipetteChannelType.SINGLE_CHANNEL:
return {"A1": nozzle_offset}
elif channels == PipetteChannelType.EIGHT_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}1": [
nozzle_offset[0],
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
}
elif channels == PipetteChannelType.NINETY_SIX_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}{1 + 1*col}": [
nozzle_offset[0] + X_OFFSET * col,
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
for col in range(12)
}
raise ValueError(f"Unsupported channel type {channels}")
def load_and_update_file_from_config(
config_to_update: List[str],
value_to_update: Any,
model_to_update: PipetteModelVersionType,
) -> None:
"""Update the requested config and save to disk.
Load the requested config sub type (physical, geometry or liquid). Then
update the current file and save to disk.
"""
camel_list_to_update = iter([_change_to_camel_case(i) for i in config_to_update])
if config_to_update[0] in PipetteGeometryDefinition.__fields__:
geometry = _geometry(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
if config_to_update[0] == "nozzle_map":
nozzle_to_use = (
value_to_update if value_to_update else geometry["nozzleOffset"]
)
geometry["nozzleMap"] = build_nozzle_map(
nozzle_to_use, model_to_update.pipette_channels
)
elif config_to_update[0] == "nozzle_offset":
geometry["nozzleMap"] = build_nozzle_map(
value_to_update, model_to_update.pipette_channels
)
geometry["nozzleOffset"] = value_to_update
else:
geometry = update(geometry, camel_list_to_update, value_to_update)
PipetteGeometryDefinition.parse_obj(geometry)
filepath = (
ROOT
/ "geometry"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
geometry,
)
elif config_to_update[0] in PipettePhysicalPropertiesDefinition.__fields__:
physical = _physical(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
physical = update(physical, camel_list_to_update, value_to_update)
PipettePhysicalPropertiesDefinition.parse_obj(physical)
filepath = (
ROOT
/ "general"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
physical,
)
elif config_to_update[0] in PipetteLiquidPropertiesDefinition.__fields__:
liquid = _liquid(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
liquid = update(physical, camel_list_to_update, value_to_update)
PipetteLiquidPropertiesDefinition.parse_obj(liquid)
filepath = (
ROOT
/ "liquid"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
liquid,
)
else:
raise KeyError(
f"{config_to_update} is not saved to a file. Check `pipette_definition.py` for more information."
)
def _update_single_model(configuration_to_update: List[str]) -> None:
"""Helper function to update single model."""
print(f"choose {PipetteModelType.__name__}:")
for row in list_available_enum(PipetteModelType):
print(f"\t{row}")
model = list(PipetteModelType)[int(input("Please select from above\n"))]
print(f"choose {PipetteChannelType.__name__}:")
for row in list_available_enum(PipetteChannelType):
print(f"\t{row}")
channels = list(PipetteChannelType)[int(input("Please select from above\n"))]
version = PipetteVersionType.convert_from_float(
float(check_from_version(input("Please input
|
"""
Recursively update the given dictionary to ensure no data is lost when updating.
"""
next_key = next(iter_of_configs, None)
if next_key and isinstance(dict_to_update[next_key], dict):
dict_to_update[next_key] = update(
dict_to_update.get(next_key, {}), iter_of_configs, value_to_update
)
elif next_key:
dict_to_update[next_key] = value_to_update
return dict_to_update
|
identifier_body
|
update_configuration_files.py
|
in this script.
"""
ROOT = get_shared_data_root() / "pipette" / "definitions" / "2"
NOZZLE_LOCATION_CONFIGS = ["nozzle_offset", "nozzle_map"]
def _change_to_camel_case(c: str) -> str:
# Tiny helper function to convert to camelCase.
config_name = c.split("_")
if len(config_name) == 1:
return config_name[0]
return f"{config_name[0]}" + "".join(s.capitalize() for s in config_name[1::])
def list_configuration_keys() -> Tuple[List[str], Dict[int, str]]:
"""List out the model keys available to modify at the top level."""
lookup = {i: v for (i, v) in enumerate(PipetteConfigurations.__fields__)}
return [
f"{i}: {v}" for (i, v) in enumerate(PipetteConfigurations.__fields__)
], lookup
def list_available_enum(enum_type: Type[Enum]) -> List[str]:
"""List available pipette models"""
return [f"{i}: {v}" for (i, v) in enumerate(enum_type)] # type: ignore[var-annotated]
def handle_subclass_model(
top_level_configuration: List[str], base_model: BaseModel, is_basemodel: bool
) -> List[str]:
"""Handle sub-classed basemodels and update the top level model as necessary."""
if is_basemodel:
if base_model.__fields__ == SupportedTipsDefinition.__fields__:
# pydantic does something weird with the types in ModelFields so
# we cannot use isinstance checks to confirm if the base model
# is a supported tips definition
print(f"choose {PipetteTipType.__name__}:")
for row in list_available_enum(PipetteTipType):
print(f"\t{row}")
tip_type = list(PipetteTipType)[
int(input("select the tip volume size to modify"))
]
top_level_configuration.append(tip_type.name)
lookup = {i: v for (i, v) in enumerate(base_model.__fields__)}
config_list = [f"{i}: {v}" for (i, v) in enumerate(base_model.__fields__)]
print(f"you selected the basemodel {base_model.__name__}:") # type: ignore[attr-defined]
for row in config_list:
print(f"\t{row}")
configuration_to_update = lookup[
int(input("select a specific configuration from above\n"))
]
field_type = base_model.__fields__[configuration_to_update].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
top_level_configuration.append(configuration_to_update)
return handle_subclass_model(top_level_configuration, field_type, is_basemodel)
else:
return top_level_configuration
def check_from_version(version: str) -> str:
"""Check that the version requested is supported in the system."""
version_int = [int(v) for v in version.split(".")]
if version_int[0] not in PipetteModelMajorVersion:
raise ValueError(f"Major version {version_int[0]} is not supported.")
if version_int[1] not in PipetteModelMinorVersion:
raise ValueError(f"Minor version {version_int[1]} is not supported.")
return version
def save_data_to_file(
directorypath: Path,
file_name: str,
data: Dict[str, Any],
) -> None:
"""
Function used to save data to a file
"""
directorypath.mkdir(parents=True, exist_ok=True)
filepath = directorypath / f"{file_name}.json"
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
def update(
dict_to_update: Dict[str, Any], iter_of_configs: Iterator[str], value_to_update: Any
) -> Dict[str, Any]:
"""
Recursively update the given dictionary to ensure no data is lost when updating.
"""
next_key = next(iter_of_configs, None)
if next_key and isinstance(dict_to_update[next_key], dict):
dict_to_update[next_key] = update(
dict_to_update.get(next_key, {}), iter_of_configs, value_to_update
)
elif next_key:
dict_to_update[next_key] = value_to_update
return dict_to_update
def build_nozzle_map(
nozzle_offset: List[float], channels: PipetteChannelType
) -> Dict[str, List[float]]:
Y_OFFSET = 9
X_OFFSET = -9
breakpoint()
if channels == PipetteChannelType.SINGLE_CHANNEL:
return {"A1": nozzle_offset}
elif channels == PipetteChannelType.EIGHT_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}1": [
nozzle_offset[0],
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
}
elif channels == PipetteChannelType.NINETY_SIX_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}{1 + 1*col}": [
nozzle_offset[0] + X_OFFSET * col,
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
for col in range(12)
}
raise ValueError(f"Unsupported channel type {channels}")
def load_and_update_file_from_config(
config_to_update: List[str],
value_to_update: Any,
model_to_update: PipetteModelVersionType,
) -> None:
"""Update the requested config and save to disk.
Load the requested config sub type (physical, geometry or liquid). Then
update the current file and save to disk.
"""
camel_list_to_update = iter([_change_to_camel_case(i) for i in config_to_update])
if config_to_update[0] in PipetteGeometryDefinition.__fields__:
geometry = _geometry(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
if config_to_update[0] == "nozzle_map":
nozzle_to_use = (
value_to_update if value_to_update else geometry["nozzleOffset"]
)
geometry["nozzleMap"] = build_nozzle_map(
nozzle_to_use, model_to_update.pipette_channels
)
elif config_to_update[0] == "nozzle_offset":
geometry["nozzleMap"] = build_nozzle_map(
value_to_update, model_to_update.pipette_channels
)
geometry["nozzleOffset"] = value_to_update
else:
geometry = update(geometry, camel_list_to_update, value_to_update)
PipetteGeometryDefinition.parse_obj(geometry)
filepath = (
ROOT
/ "geometry"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
geometry,
)
elif config_to_update[0] in PipettePhysicalPropertiesDefinition.__fields__:
physical = _physical(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
physical = update(physical, camel_list_to_update, value_to_update)
PipettePhysicalPropertiesDefinition.parse_obj(physical)
filepath = (
ROOT
/ "general"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
physical,
)
elif config_to_update[0] in PipetteLiquidPropertiesDefinition.__fields__:
liquid = _liquid(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
liquid = update(physical, camel_list_to_update, value_to_update)
PipetteLiquidPropertiesDefinition.parse_obj(liquid)
filepath = (
ROOT
/ "liquid"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
liquid,
)
else:
raise KeyError(
f"{config_to_update} is not saved to a file. Check `pipette_definition.py` for more information."
)
def _update_single_model(configuration_to_update: List[str]) -> None:
"""Helper function to update single model."""
print(f"choose {PipetteModelType.__name__}:")
for row in list_available_enum(PipetteModelType):
print(f"\t{row}")
model = list(PipetteModelType)[int(input("Please select from above\n"))]
print(f"choose {PipetteChannelType.__name__}:")
for row in list_available_enum(PipetteChannelType):
|
channels = list(PipetteChannelType)[int(input("Please select from above\n"))]
version = PipetteVersionType.convert_from_float(
float(check_from_version(input("Please
|
print(f"\t{row}")
|
conditional_block
|
update_configuration_files.py
|
print(f"\t{row}")
tip_type = list(PipetteTipType)[
int(input("select the tip volume size to modify"))
]
top_level_configuration.append(tip_type.name)
lookup = {i: v for (i, v) in enumerate(base_model.__fields__)}
config_list = [f"{i}: {v}" for (i, v) in enumerate(base_model.__fields__)]
print(f"you selected the basemodel {base_model.__name__}:") # type: ignore[attr-defined]
for row in config_list:
print(f"\t{row}")
configuration_to_update = lookup[
int(input("select a specific configuration from above\n"))
]
field_type = base_model.__fields__[configuration_to_update].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
top_level_configuration.append(configuration_to_update)
return handle_subclass_model(top_level_configuration, field_type, is_basemodel)
else:
return top_level_configuration
def check_from_version(version: str) -> str:
"""Check that the version requested is supported in the system."""
version_int = [int(v) for v in version.split(".")]
if version_int[0] not in PipetteModelMajorVersion:
raise ValueError(f"Major version {version_int[0]} is not supported.")
if version_int[1] not in PipetteModelMinorVersion:
raise ValueError(f"Minor version {version_int[1]} is not supported.")
return version
def save_data_to_file(
directorypath: Path,
file_name: str,
data: Dict[str, Any],
) -> None:
"""
Function used to save data to a file
"""
directorypath.mkdir(parents=True, exist_ok=True)
filepath = directorypath / f"{file_name}.json"
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
def update(
dict_to_update: Dict[str, Any], iter_of_configs: Iterator[str], value_to_update: Any
) -> Dict[str, Any]:
"""
Recursively update the given dictionary to ensure no data is lost when updating.
"""
next_key = next(iter_of_configs, None)
if next_key and isinstance(dict_to_update[next_key], dict):
dict_to_update[next_key] = update(
dict_to_update.get(next_key, {}), iter_of_configs, value_to_update
)
elif next_key:
dict_to_update[next_key] = value_to_update
return dict_to_update
def build_nozzle_map(
nozzle_offset: List[float], channels: PipetteChannelType
) -> Dict[str, List[float]]:
Y_OFFSET = 9
X_OFFSET = -9
breakpoint()
if channels == PipetteChannelType.SINGLE_CHANNEL:
return {"A1": nozzle_offset}
elif channels == PipetteChannelType.EIGHT_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}1": [
nozzle_offset[0],
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
}
elif channels == PipetteChannelType.NINETY_SIX_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}{1 + 1*col}": [
nozzle_offset[0] + X_OFFSET * col,
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
for col in range(12)
}
raise ValueError(f"Unsupported channel type {channels}")
def load_and_update_file_from_config(
config_to_update: List[str],
value_to_update: Any,
model_to_update: PipetteModelVersionType,
) -> None:
"""Update the requested config and save to disk.
Load the requested config sub type (physical, geometry or liquid). Then
update the current file and save to disk.
"""
camel_list_to_update = iter([_change_to_camel_case(i) for i in config_to_update])
if config_to_update[0] in PipetteGeometryDefinition.__fields__:
geometry = _geometry(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
if config_to_update[0] == "nozzle_map":
nozzle_to_use = (
value_to_update if value_to_update else geometry["nozzleOffset"]
)
geometry["nozzleMap"] = build_nozzle_map(
nozzle_to_use, model_to_update.pipette_channels
)
elif config_to_update[0] == "nozzle_offset":
geometry["nozzleMap"] = build_nozzle_map(
value_to_update, model_to_update.pipette_channels
)
geometry["nozzleOffset"] = value_to_update
else:
geometry = update(geometry, camel_list_to_update, value_to_update)
PipetteGeometryDefinition.parse_obj(geometry)
filepath = (
ROOT
/ "geometry"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
geometry,
)
elif config_to_update[0] in PipettePhysicalPropertiesDefinition.__fields__:
physical = _physical(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
physical = update(physical, camel_list_to_update, value_to_update)
PipettePhysicalPropertiesDefinition.parse_obj(physical)
filepath = (
ROOT
/ "general"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
physical,
)
elif config_to_update[0] in PipetteLiquidPropertiesDefinition.__fields__:
liquid = _liquid(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
liquid = update(physical, camel_list_to_update, value_to_update)
PipetteLiquidPropertiesDefinition.parse_obj(liquid)
filepath = (
ROOT
/ "liquid"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
liquid,
)
else:
raise KeyError(
f"{config_to_update} is not saved to a file. Check `pipette_definition.py` for more information."
)
def _update_single_model(configuration_to_update: List[str]) -> None:
"""Helper function to update single model."""
print(f"choose {PipetteModelType.__name__}:")
for row in list_available_enum(PipetteModelType):
print(f"\t{row}")
model = list(PipetteModelType)[int(input("Please select from above\n"))]
print(f"choose {PipetteChannelType.__name__}:")
for row in list_available_enum(PipetteChannelType):
print(f"\t{row}")
channels = list(PipetteChannelType)[int(input("Please select from above\n"))]
version = PipetteVersionType.convert_from_float(
float(check_from_version(input("Please input the version of the model\n")))
)
built_model: PipetteModel = PipetteModel(
f"{model.name}_{str(channels)}_v{version.major}.{version.minor}"
)
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[1]:
print(
"You selected nozzle_map to edit. If you wish to update the nozzle offset, enter it on the next line.\n"
)
print("Otherwise, please type 'null' on the next line.\n")
value_to_update = json.loads(
input(
f"Please select what you would like to update {configuration_to_update} to for {built_model}\n"
)
)
model_version = convert_pipette_model(built_model)
load_and_update_file_from_config(
configuration_to_update, value_to_update, model_version
)
def _update_all_models(configuration_to_update: List[str]) -> None:
paths_to_validate = ROOT / "liquid"
_channel_model_str = {
"single_channel": "single",
"ninety_six_channel": "96",
"eight_channel": "multi",
}
for channel_dir in os.listdir(paths_to_validate):
for model_dir in os.listdir(paths_to_validate / channel_dir):
for version_file in os.listdir(paths_to_validate / channel_dir / model_dir):
version_list = version_file.split(".json")[0].split("_")
built_model: PipetteModel = PipetteModel(
f"{model_dir}_{_channel_model_str[channel_dir]}_v{version_list[0]}.{version_list[1]}"
)
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[1]:
print(
|
"You selected nozzle_map to edit. If you wish to update the nozzle offset, enter it on the next line.\n"
)
|
random_line_split
|
|
adversarial_semantic_dis_trainer.py
|
from pytorch3d.renderer import Textures
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader
)
from pytorch3d.loss import mesh_laplacian_smoothing, mesh_normal_consistency
from tqdm.autonotebook import tqdm
import pandas as pd
from utils import utils, network_utils
from deformation.deformation_net import DeformationNetwork
import deformation.losses as def_losses
from deformation.semantic_discriminator_loss import SemanticDiscriminatorLoss, compute_sem_dis_loss
from adversarial.datasets import GenerationDataset, ShapenetRendersDataset
class AdversarialDiscriminatorTrainer():
def __init__(self, cfg_path, gpu_num, exp_name):
|
def train(self):
# setting up dataloaders
# https://stackoverflow.com/questions/51444059/how-to-iterate-over-two-dataloaders-simultaneously-using-pytorch
generation_dataset = GenerationDataset(cfg, self.device)
generation_loader = torch.utils.data.DataLoader(generation_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
shapenet_renders_dataset = ShapenetRendersDataset(cfg)
shapenet_renders_loader = torch.utils.data.DataLoader(shapenet_renders_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
# setting up networks and optimizers
deform_net = DeformationNetwork(self.cfg, self.mesh_num_vertices, self.device)
deform_net.to(self.device)
deform_optimizer = optim.Adam(deform_net.parameters(), lr=self.cfg["training"]["learning_rate"])
semantic_dis_net = SemanticDiscriminatorNetwork(cfg)
semantic_dis_net.to(self.device)
dis_optimizer = optim.Adam(semantic_dis_net.parameters(), lr=0.00001, weight_decay=1e-2)
# for adding noise to training labels
# real images have label 1, fake images has label 0
real_labels_dist = torch.distributions.Uniform(torch.tensor([1.0-self.label_noise]), torch.tensor([1.0]))
fake_labels_dist = torch.distributions.Uniform(torch.tensor([0.0]), torch.tensor([0.0+self.label_noise]))
# training generative deformation network and discriminator in an alternating, GAN style
for iter_i in tqdm(range(self.total_training_iters), file=self.tqdm_out):
# training discriminator; generator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = True
for param in deform_net.parameters(): param.requires_grad = False
generation_iter = iter(generation_loader)
shapenet_renders_iter = iter(shapenet_renders_loader)
for batch_idx in tqdm(range(self.num_batches_dis_train), file=self.tqdm_out):
semantic_dis_net.train()
deform_net.eval() # not sure if supposed to set this
dis_optimizer.zero_grad()
real_render_batch = next(shapenet_renders_iter).to(self.device)
pred_logits_real = semantic_dis_net(real_render_batch)
gen_batch = next(generation_iter)
gen_batch_vertices = gen_batch["mesh_verts"].to(self.device)
gen_batch_images = gen_batch["image"].to(self.device)
gen_batch_poses = gen_batch["pose"].to(self.device)
deformed_meshes = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch_vertices,
gen_batch_images, gen_batch_poses, compute_losses=False)
# TODO: fix this to turn into logits, not sigmoid
pred_logits_fake = compute_sem_dis_loss(deformed_meshes, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
batch_size = real_render_batch.shape[0]
real_labels = real_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
fake_labels = fake_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
dis_loss = F.binary_cross_entropy_with_logits(pred_logits_real, real_labels) + \
F.binary_cross_entropy_with_logits(pred_logits_fake, fake_labels)
dis_loss.backward()
dis_optimizer.step()
continue
# training generator; discriminator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = False
for param in deform_net.parameters(): param.requires_grad = True
for gen_batch in tqdm(generation_loader[:self.num_batches_gen_train], file=self.tqdm_out):
deform_net.train()
semantic_dis_net.eval()
deform_optimizer.zero_grad()
deform_loss_dict, _ = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch)
# TODO: make sure loss is correct (follows minimax loss)
total_loss = sum([deform_loss_dict[loss_name] * cfg['training'][loss_name.replace("loss", "lam")] for loss_name in deform_loss_dict])
total_loss.backward()
deform_optimizer.step()
# given a batch of meshes, masks, and poses computes a forward pass through a given deformation network and semantic discriminator network
# returns the deformed mesh and a (optionally) dict of (unweighed, raw) computed losses
# TODO: fix mesh (currently, needs to already be in device)
def refine_mesh_batched(self, deform_net, semantic_dis_net, mesh_verts_batch, img_batch, pose_batch, compute_losses=True):
# computing mesh deformation
delta_v = deform_net(pose_batch, img_batch, mesh_verts_batch)
delta_v = delta_v.reshape((-1,3))
deformed_mesh = mesh.offset_verts(delta_v)
if not compute_losses:
return deformed_mesh
else:
# prep inputs used to compute losses
pred_dist = pose_batch[:,0]
pred_elev = pose_batch[:,1]
pred_azim = pose_batch[:,2]
R, T = look_at_view_transform(pred_dist, pred_elev, pred_azim)
mask = rgba_image[:,:,3] > 0
mask_gt = torch.tensor(mask, dtype=torch.float).to(self.device)
num_vertices = mesh.verts_packed().shape[0]
zero_deformation_tensor = torch.zeros((num_vertices, 3)).to(self.device)
sym_plane_normal = [0,0,1] # TODO: make this generalizable to other classes
loss_dict = {}
# computing losses
rendered_deformed_mesh = utils.render_mesh(deformed_mesh, R, T, self.device, img_size=224, silhouette=True)
loss_dict["sil_loss"] = F.binary_cross_entropy(rendered_deformed_mesh[0, :,:, 3], mask_gt)
loss_dict["l2_loss"] = F.mse_loss(delta_v, zero_deformation_tensor)
loss_dict["lap_smoothness_loss"] = mesh_laplacian_smoothing(deformed_mesh)
loss_dict["normal_consistency_loss"] = mesh_normal_consistency(deformed_mesh)
# TODO: remove weights?
if self.img_sym_lam > 0:
loss_dict["img_sym_loss"], _ = def_losses.image_symmetry_loss(deformed_mesh, sym_plane_normal, self.cfg["training"]["img_sym_num_azim"], self.device)
else:
loss_dict["img_sym_loss"] = torch.tensor(0).to(self.device)
if self.vertex_sym_lam > 0:
loss_dict["vertex_sym_loss"] = def_losses.vertex_symmetry_loss_fast(deformed_mesh, sym_plane_normal, self.device)
else:
loss_dict["vertex_sym_loss"] = torch.tensor(0).to(self.device)
if self.semantic_dis_lam > 0:
loss_dict["semantic_dis_loss"], _ = compute_sem_dis_loss(deformed_mesh, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
else:
loss_dict["semantic_dis_loss"] = torch.tensor(0).to(self.device)
return loss_dict, deformed_mesh
if __name__
|
self.cfg = utils.load_config(cfg_path, "configs/default.yaml")
self.device = torch.device("cuda:"+str(gpu_num))
self.batch_size = self.cfg["semantic_dis_training"]["batch_size"]
self.total_training_iters = 2
self.num_batches_dis_train = 5
self.num_batches_gen_train = 5
self.mesh_num_vertices = 1498
self.label_noise = 0
self.semantic_dis_loss_num_render = 8
self.training_output_dir = os.path.join(cfg['semantic_dis_training']['output_dir'], "{}_{}".format(time.strftime("%Y_%m_%d--%H_%M_%S"), exp_name))
if not os.path.exists(self.training_output_dir):
os.makedirs(self.training_output_dir)
self.tqdm_out = utils.TqdmPrintEvery()
|
identifier_body
|
adversarial_semantic_dis_trainer.py
|
from pytorch3d.renderer import Textures
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader
)
from pytorch3d.loss import mesh_laplacian_smoothing, mesh_normal_consistency
from tqdm.autonotebook import tqdm
import pandas as pd
from utils import utils, network_utils
from deformation.deformation_net import DeformationNetwork
import deformation.losses as def_losses
from deformation.semantic_discriminator_loss import SemanticDiscriminatorLoss, compute_sem_dis_loss
from adversarial.datasets import GenerationDataset, ShapenetRendersDataset
class AdversarialDiscriminatorTrainer():
def __init__(self, cfg_path, gpu_num, exp_name):
self.cfg = utils.load_config(cfg_path, "configs/default.yaml")
self.device = torch.device("cuda:"+str(gpu_num))
self.batch_size = self.cfg["semantic_dis_training"]["batch_size"]
self.total_training_iters = 2
self.num_batches_dis_train = 5
self.num_batches_gen_train = 5
self.mesh_num_vertices = 1498
self.label_noise = 0
self.semantic_dis_loss_num_render = 8
self.training_output_dir = os.path.join(cfg['semantic_dis_training']['output_dir'], "{}_{}".format(time.strftime("%Y_%m_%d--%H_%M_%S"), exp_name))
if not os.path.exists(self.training_output_dir):
os.makedirs(self.training_output_dir)
self.tqdm_out = utils.TqdmPrintEvery()
def train(self):
# setting up dataloaders
# https://stackoverflow.com/questions/51444059/how-to-iterate-over-two-dataloaders-simultaneously-using-pytorch
generation_dataset = GenerationDataset(cfg, self.device)
generation_loader = torch.utils.data.DataLoader(generation_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
shapenet_renders_dataset = ShapenetRendersDataset(cfg)
shapenet_renders_loader = torch.utils.data.DataLoader(shapenet_renders_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
# setting up networks and optimizers
deform_net = DeformationNetwork(self.cfg, self.mesh_num_vertices, self.device)
deform_net.to(self.device)
deform_optimizer = optim.Adam(deform_net.parameters(), lr=self.cfg["training"]["learning_rate"])
semantic_dis_net = SemanticDiscriminatorNetwork(cfg)
semantic_dis_net.to(self.device)
dis_optimizer = optim.Adam(semantic_dis_net.parameters(), lr=0.00001, weight_decay=1e-2)
# for adding noise to training labels
# real images have label 1, fake images has label 0
real_labels_dist = torch.distributions.Uniform(torch.tensor([1.0-self.label_noise]), torch.tensor([1.0]))
fake_labels_dist = torch.distributions.Uniform(torch.tensor([0.0]), torch.tensor([0.0+self.label_noise]))
# training generative deformation network and discriminator in an alternating, GAN style
for iter_i in tqdm(range(self.total_training_iters), file=self.tqdm_out):
# training discriminator; generator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = True
for param in deform_net.parameters(): param.requires_grad = False
generation_iter = iter(generation_loader)
shapenet_renders_iter = iter(shapenet_renders_loader)
for batch_idx in tqdm(range(self.num_batches_dis_train), file=self.tqdm_out):
semantic_dis_net.train()
deform_net.eval() # not sure if supposed to set this
dis_optimizer.zero_grad()
real_render_batch = next(shapenet_renders_iter).to(self.device)
pred_logits_real = semantic_dis_net(real_render_batch)
gen_batch = next(generation_iter)
gen_batch_vertices = gen_batch["mesh_verts"].to(self.device)
gen_batch_images = gen_batch["image"].to(self.device)
gen_batch_poses = gen_batch["pose"].to(self.device)
deformed_meshes = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch_vertices,
gen_batch_images, gen_batch_poses, compute_losses=False)
# TODO: fix this to turn into logits, not sigmoid
pred_logits_fake = compute_sem_dis_loss(deformed_meshes, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
batch_size = real_render_batch.shape[0]
real_labels = real_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
fake_labels = fake_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
dis_loss = F.binary_cross_entropy_with_logits(pred_logits_real, real_labels) + \
F.binary_cross_entropy_with_logits(pred_logits_fake, fake_labels)
dis_loss.backward()
dis_optimizer.step()
continue
# training generator; discriminator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = False
for param in deform_net.parameters(): param.requires_grad = True
for gen_batch in tqdm(generation_loader[:self.num_batches_gen_train], file=self.tqdm_out):
deform_net.train()
semantic_dis_net.eval()
deform_optimizer.zero_grad()
deform_loss_dict, _ = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch)
# TODO: make sure loss is correct (follows minimax loss)
total_loss = sum([deform_loss_dict[loss_name] * cfg['training'][loss_name.replace("loss", "lam")] for loss_name in deform_loss_dict])
total_loss.backward()
deform_optimizer.step()
# given a batch of meshes, masks, and poses computes a forward pass through a given deformation network and semantic discriminator network
# returns the deformed mesh and a (optionally) dict of (unweighed, raw) computed losses
# TODO: fix mesh (currently, needs to already be in device)
def refine_mesh_batched(self, deform_net, semantic_dis_net, mesh_verts_batch, img_batch, pose_batch, compute_losses=True):
# computing mesh deformation
delta_v = deform_net(pose_batch, img_batch, mesh_verts_batch)
delta_v = delta_v.reshape((-1,3))
deformed_mesh = mesh.offset_verts(delta_v)
if not compute_losses:
return deformed_mesh
else:
# prep inputs used to compute losses
pred_dist = pose_batch[:,0]
pred_elev = pose_batch[:,1]
pred_azim = pose_batch[:,2]
R, T = look_at_view_transform(pred_dist, pred_elev, pred_azim)
mask = rgba_image[:,:,3] > 0
mask_gt = torch.tensor(mask, dtype=torch.float).to(self.device)
num_vertices = mesh.verts_packed().shape[0]
zero_deformation_tensor = torch.zeros((num_vertices, 3)).to(self.device)
sym_plane_normal = [0,0,1] # TODO: make this generalizable to other classes
loss_dict = {}
# computing losses
rendered_deformed_mesh = utils.render_mesh(deformed_mesh, R, T, self.device, img_size=224, silhouette=True)
loss_dict["sil_loss"] = F.binary_cross_entropy(rendered_deformed_mesh[0, :,:, 3], mask_gt)
loss_dict["l2_loss"] = F.mse_loss(delta_v, zero_deformation_tensor)
loss_dict["lap_smoothness_loss"] = mesh_laplacian_smoothing(deformed_mesh)
loss_dict["normal_consistency_loss"] = mesh_normal_consistency(deformed_mesh)
# TODO: remove weights?
if self.img_sym_lam > 0:
loss_dict["img_sym_loss"], _ = def_losses.image_symmetry_loss(deformed_mesh, sym_plane_normal, self.cfg["training"]["img_sym_num_azim"], self.device)
else:
loss_dict["img_sym_loss"] = torch.tensor(0).to(self.device)
if self.vertex_sym_lam > 0:
loss_dict["vertex_sym_loss"] = def_losses.vertex_symmetry_loss_fast(deformed_mesh, sym_plane_normal, self.device)
else:
loss_dict["vertex_sym_loss"] = torch.tensor(0).to(self.device)
if self.semantic_dis_lam > 0:
loss_dict["semantic_dis_loss"], _ = compute_sem_dis_loss(deformed_mesh, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
else:
|
return loss_dict, deformed_mesh
if __name__
|
loss_dict["semantic_dis_loss"] = torch.tensor(0).to(self.device)
|
conditional_block
|
adversarial_semantic_dis_trainer.py
|
from pytorch3d.renderer import Textures
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader
)
from pytorch3d.loss import mesh_laplacian_smoothing, mesh_normal_consistency
from tqdm.autonotebook import tqdm
import pandas as pd
from utils import utils, network_utils
from deformation.deformation_net import DeformationNetwork
import deformation.losses as def_losses
from deformation.semantic_discriminator_loss import SemanticDiscriminatorLoss, compute_sem_dis_loss
from adversarial.datasets import GenerationDataset, ShapenetRendersDataset
class AdversarialDiscriminatorTrainer():
def __init__(self, cfg_path, gpu_num, exp_name):
self.cfg = utils.load_config(cfg_path, "configs/default.yaml")
self.device = torch.device("cuda:"+str(gpu_num))
self.batch_size = self.cfg["semantic_dis_training"]["batch_size"]
self.total_training_iters = 2
self.num_batches_dis_train = 5
self.num_batches_gen_train = 5
self.mesh_num_vertices = 1498
self.label_noise = 0
self.semantic_dis_loss_num_render = 8
self.training_output_dir = os.path.join(cfg['semantic_dis_training']['output_dir'], "{}_{}".format(time.strftime("%Y_%m_%d--%H_%M_%S"), exp_name))
if not os.path.exists(self.training_output_dir):
os.makedirs(self.training_output_dir)
self.tqdm_out = utils.TqdmPrintEvery()
def train(self):
# setting up dataloaders
# https://stackoverflow.com/questions/51444059/how-to-iterate-over-two-dataloaders-simultaneously-using-pytorch
generation_dataset = GenerationDataset(cfg, self.device)
generation_loader = torch.utils.data.DataLoader(generation_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
shapenet_renders_dataset = ShapenetRendersDataset(cfg)
shapenet_renders_loader = torch.utils.data.DataLoader(shapenet_renders_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
# setting up networks and optimizers
deform_net = DeformationNetwork(self.cfg, self.mesh_num_vertices, self.device)
deform_net.to(self.device)
deform_optimizer = optim.Adam(deform_net.parameters(), lr=self.cfg["training"]["learning_rate"])
semantic_dis_net = SemanticDiscriminatorNetwork(cfg)
semantic_dis_net.to(self.device)
dis_optimizer = optim.Adam(semantic_dis_net.parameters(), lr=0.00001, weight_decay=1e-2)
# for adding noise to training labels
# real images have label 1, fake images has label 0
real_labels_dist = torch.distributions.Uniform(torch.tensor([1.0-self.label_noise]), torch.tensor([1.0]))
fake_labels_dist = torch.distributions.Uniform(torch.tensor([0.0]), torch.tensor([0.0+self.label_noise]))
# training generative deformation network and discriminator in an alternating, GAN style
for iter_i in tqdm(range(self.total_training_iters), file=self.tqdm_out):
# training discriminator; generator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = True
for param in deform_net.parameters(): param.requires_grad = False
generation_iter = iter(generation_loader)
shapenet_renders_iter = iter(shapenet_renders_loader)
for batch_idx in tqdm(range(self.num_batches_dis_train), file=self.tqdm_out):
semantic_dis_net.train()
deform_net.eval() # not sure if supposed to set this
dis_optimizer.zero_grad()
real_render_batch = next(shapenet_renders_iter).to(self.device)
pred_logits_real = semantic_dis_net(real_render_batch)
gen_batch = next(generation_iter)
gen_batch_vertices = gen_batch["mesh_verts"].to(self.device)
gen_batch_images = gen_batch["image"].to(self.device)
gen_batch_poses = gen_batch["pose"].to(self.device)
deformed_meshes = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch_vertices,
gen_batch_images, gen_batch_poses, compute_losses=False)
# TODO: fix this to turn into logits, not sigmoid
pred_logits_fake = compute_sem_dis_loss(deformed_meshes, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
batch_size = real_render_batch.shape[0]
real_labels = real_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
|
dis_loss = F.binary_cross_entropy_with_logits(pred_logits_real, real_labels) + \
F.binary_cross_entropy_with_logits(pred_logits_fake, fake_labels)
dis_loss.backward()
dis_optimizer.step()
continue
# training generator; discriminator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = False
for param in deform_net.parameters(): param.requires_grad = True
for gen_batch in tqdm(generation_loader[:self.num_batches_gen_train], file=self.tqdm_out):
deform_net.train()
semantic_dis_net.eval()
deform_optimizer.zero_grad()
deform_loss_dict, _ = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch)
# TODO: make sure loss is correct (follows minimax loss)
total_loss = sum([deform_loss_dict[loss_name] * cfg['training'][loss_name.replace("loss", "lam")] for loss_name in deform_loss_dict])
total_loss.backward()
deform_optimizer.step()
# given a batch of meshes, masks, and poses computes a forward pass through a given deformation network and semantic discriminator network
# returns the deformed mesh and a (optionally) dict of (unweighed, raw) computed losses
# TODO: fix mesh (currently, needs to already be in device)
def refine_mesh_batched(self, deform_net, semantic_dis_net, mesh_verts_batch, img_batch, pose_batch, compute_losses=True):
# computing mesh deformation
delta_v = deform_net(pose_batch, img_batch, mesh_verts_batch)
delta_v = delta_v.reshape((-1,3))
deformed_mesh = mesh.offset_verts(delta_v)
if not compute_losses:
return deformed_mesh
else:
# prep inputs used to compute losses
pred_dist = pose_batch[:,0]
pred_elev = pose_batch[:,1]
pred_azim = pose_batch[:,2]
R, T = look_at_view_transform(pred_dist, pred_elev, pred_azim)
mask = rgba_image[:,:,3] > 0
mask_gt = torch.tensor(mask, dtype=torch.float).to(self.device)
num_vertices = mesh.verts_packed().shape[0]
zero_deformation_tensor = torch.zeros((num_vertices, 3)).to(self.device)
sym_plane_normal = [0,0,1] # TODO: make this generalizable to other classes
loss_dict = {}
# computing losses
rendered_deformed_mesh = utils.render_mesh(deformed_mesh, R, T, self.device, img_size=224, silhouette=True)
loss_dict["sil_loss"] = F.binary_cross_entropy(rendered_deformed_mesh[0, :,:, 3], mask_gt)
loss_dict["l2_loss"] = F.mse_loss(delta_v, zero_deformation_tensor)
loss_dict["lap_smoothness_loss"] = mesh_laplacian_smoothing(deformed_mesh)
loss_dict["normal_consistency_loss"] = mesh_normal_consistency(deformed_mesh)
# TODO: remove weights?
if self.img_sym_lam > 0:
loss_dict["img_sym_loss"], _ = def_losses.image_symmetry_loss(deformed_mesh, sym_plane_normal, self.cfg["training"]["img_sym_num_azim"], self.device)
else:
loss_dict["img_sym_loss"] = torch.tensor(0).to(self.device)
if self.vertex_sym_lam > 0:
loss_dict["vertex_sym_loss"] = def_losses.vertex_symmetry_loss_fast(deformed_mesh, sym_plane_normal, self.device)
else:
loss_dict["vertex_sym_loss"] = torch.tensor(0).to(self.device)
if self.semantic_dis_lam > 0:
loss_dict["semantic_dis_loss"], _ = compute_sem_dis_loss(deformed_mesh, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
else:
loss_dict["semantic_dis_loss"] = torch.tensor(0).to(self.device)
return loss_dict, deformed_mesh
if __name__
|
fake_labels = fake_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
|
random_line_split
|
adversarial_semantic_dis_trainer.py
|
from pytorch3d.renderer import Textures
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader
)
from pytorch3d.loss import mesh_laplacian_smoothing, mesh_normal_consistency
from tqdm.autonotebook import tqdm
import pandas as pd
from utils import utils, network_utils
from deformation.deformation_net import DeformationNetwork
import deformation.losses as def_losses
from deformation.semantic_discriminator_loss import SemanticDiscriminatorLoss, compute_sem_dis_loss
from adversarial.datasets import GenerationDataset, ShapenetRendersDataset
class AdversarialDiscriminatorTrainer():
def __init__(self, cfg_path, gpu_num, exp_name):
self.cfg = utils.load_config(cfg_path, "configs/default.yaml")
self.device = torch.device("cuda:"+str(gpu_num))
self.batch_size = self.cfg["semantic_dis_training"]["batch_size"]
self.total_training_iters = 2
self.num_batches_dis_train = 5
self.num_batches_gen_train = 5
self.mesh_num_vertices = 1498
self.label_noise = 0
self.semantic_dis_loss_num_render = 8
self.training_output_dir = os.path.join(cfg['semantic_dis_training']['output_dir'], "{}_{}".format(time.strftime("%Y_%m_%d--%H_%M_%S"), exp_name))
if not os.path.exists(self.training_output_dir):
os.makedirs(self.training_output_dir)
self.tqdm_out = utils.TqdmPrintEvery()
def
|
(self):
# setting up dataloaders
# https://stackoverflow.com/questions/51444059/how-to-iterate-over-two-dataloaders-simultaneously-using-pytorch
generation_dataset = GenerationDataset(cfg, self.device)
generation_loader = torch.utils.data.DataLoader(generation_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
shapenet_renders_dataset = ShapenetRendersDataset(cfg)
shapenet_renders_loader = torch.utils.data.DataLoader(shapenet_renders_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
# setting up networks and optimizers
deform_net = DeformationNetwork(self.cfg, self.mesh_num_vertices, self.device)
deform_net.to(self.device)
deform_optimizer = optim.Adam(deform_net.parameters(), lr=self.cfg["training"]["learning_rate"])
semantic_dis_net = SemanticDiscriminatorNetwork(cfg)
semantic_dis_net.to(self.device)
dis_optimizer = optim.Adam(semantic_dis_net.parameters(), lr=0.00001, weight_decay=1e-2)
# for adding noise to training labels
# real images have label 1, fake images has label 0
real_labels_dist = torch.distributions.Uniform(torch.tensor([1.0-self.label_noise]), torch.tensor([1.0]))
fake_labels_dist = torch.distributions.Uniform(torch.tensor([0.0]), torch.tensor([0.0+self.label_noise]))
# training generative deformation network and discriminator in an alternating, GAN style
for iter_i in tqdm(range(self.total_training_iters), file=self.tqdm_out):
# training discriminator; generator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = True
for param in deform_net.parameters(): param.requires_grad = False
generation_iter = iter(generation_loader)
shapenet_renders_iter = iter(shapenet_renders_loader)
for batch_idx in tqdm(range(self.num_batches_dis_train), file=self.tqdm_out):
semantic_dis_net.train()
deform_net.eval() # not sure if supposed to set this
dis_optimizer.zero_grad()
real_render_batch = next(shapenet_renders_iter).to(self.device)
pred_logits_real = semantic_dis_net(real_render_batch)
gen_batch = next(generation_iter)
gen_batch_vertices = gen_batch["mesh_verts"].to(self.device)
gen_batch_images = gen_batch["image"].to(self.device)
gen_batch_poses = gen_batch["pose"].to(self.device)
deformed_meshes = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch_vertices,
gen_batch_images, gen_batch_poses, compute_losses=False)
# TODO: fix this to turn into logits, not sigmoid
pred_logits_fake = compute_sem_dis_loss(deformed_meshes, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
batch_size = real_render_batch.shape[0]
real_labels = real_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
fake_labels = fake_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
dis_loss = F.binary_cross_entropy_with_logits(pred_logits_real, real_labels) + \
F.binary_cross_entropy_with_logits(pred_logits_fake, fake_labels)
dis_loss.backward()
dis_optimizer.step()
continue
# training generator; discriminator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = False
for param in deform_net.parameters(): param.requires_grad = True
for gen_batch in tqdm(generation_loader[:self.num_batches_gen_train], file=self.tqdm_out):
deform_net.train()
semantic_dis_net.eval()
deform_optimizer.zero_grad()
deform_loss_dict, _ = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch)
# TODO: make sure loss is correct (follows minimax loss)
total_loss = sum([deform_loss_dict[loss_name] * cfg['training'][loss_name.replace("loss", "lam")] for loss_name in deform_loss_dict])
total_loss.backward()
deform_optimizer.step()
# given a batch of meshes, masks, and poses computes a forward pass through a given deformation network and semantic discriminator network
# returns the deformed mesh and a (optionally) dict of (unweighed, raw) computed losses
# TODO: fix mesh (currently, needs to already be in device)
def refine_mesh_batched(self, deform_net, semantic_dis_net, mesh_verts_batch, img_batch, pose_batch, compute_losses=True):
# computing mesh deformation
delta_v = deform_net(pose_batch, img_batch, mesh_verts_batch)
delta_v = delta_v.reshape((-1,3))
deformed_mesh = mesh.offset_verts(delta_v)
if not compute_losses:
return deformed_mesh
else:
# prep inputs used to compute losses
pred_dist = pose_batch[:,0]
pred_elev = pose_batch[:,1]
pred_azim = pose_batch[:,2]
R, T = look_at_view_transform(pred_dist, pred_elev, pred_azim)
mask = rgba_image[:,:,3] > 0
mask_gt = torch.tensor(mask, dtype=torch.float).to(self.device)
num_vertices = mesh.verts_packed().shape[0]
zero_deformation_tensor = torch.zeros((num_vertices, 3)).to(self.device)
sym_plane_normal = [0,0,1] # TODO: make this generalizable to other classes
loss_dict = {}
# computing losses
rendered_deformed_mesh = utils.render_mesh(deformed_mesh, R, T, self.device, img_size=224, silhouette=True)
loss_dict["sil_loss"] = F.binary_cross_entropy(rendered_deformed_mesh[0, :,:, 3], mask_gt)
loss_dict["l2_loss"] = F.mse_loss(delta_v, zero_deformation_tensor)
loss_dict["lap_smoothness_loss"] = mesh_laplacian_smoothing(deformed_mesh)
loss_dict["normal_consistency_loss"] = mesh_normal_consistency(deformed_mesh)
# TODO: remove weights?
if self.img_sym_lam > 0:
loss_dict["img_sym_loss"], _ = def_losses.image_symmetry_loss(deformed_mesh, sym_plane_normal, self.cfg["training"]["img_sym_num_azim"], self.device)
else:
loss_dict["img_sym_loss"] = torch.tensor(0).to(self.device)
if self.vertex_sym_lam > 0:
loss_dict["vertex_sym_loss"] = def_losses.vertex_symmetry_loss_fast(deformed_mesh, sym_plane_normal, self.device)
else:
loss_dict["vertex_sym_loss"] = torch.tensor(0).to(self.device)
if self.semantic_dis_lam > 0:
loss_dict["semantic_dis_loss"], _ = compute_sem_dis_loss(deformed_mesh, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
else:
loss_dict["semantic_dis_loss"] = torch.tensor(0).to(self.device)
return loss_dict, deformed_mesh
if __name__
|
train
|
identifier_name
|
endpoint.rs
|
place for PCI device.function: {:?}", s);
};
let dev = with_context!(("invalid PCI device: {}", dev_s),
u8::from_str_radix(dev_s, 16).map_err(|e| e.into())
)?;
let fun = with_context!(("invalid PCI function: {}", fun_s),
Ok(u8::from_str_radix(fun_s, 8)?)
)?;
ensure!(dev < 0x20, "invalid PCI device: {} (too big)", dev);
ensure!(fun <= 0x08, "invalid PCI function: {} (too big)", fun);
Ok(SlotFunction(dev << 3 | fun))
}
}
fn read_trimmed_info_file(ep: PciEndpoint, name: &str) -> crate::AResult<String> {
with_context!(("couldn't read info file {} for PCI device {}", name, ep), {
let mut f = fs::File::open(ep.device_file(name))?;
let mut result = String::new();
f.read_to_string(&mut result)?;
Ok(result.trim().into())
})
}
fn read_hex_info_file<T>(ep: PciEndpoint, name: &str, from_str_radix: fn(&str, u32) -> Result<T, ParseIntError>) -> crate::AResult<T> {
let value = read_trimmed_info_file(ep, name)?;
ensure!(value.starts_with("0x"), "info {} for PCI device {} doesn't start with '0x': {:?}", name, ep, value);
with_context!(("couldn't parse info {} for PCI device {}", name, ep), {
Ok(from_str_radix(&value[2..], 16)?)
})
}
fn read_decimal_info_file<T>(ep: PciEndpoint, name: &str, from_str_radix: fn(&str, u32) -> Result<T, ParseIntError>) -> crate::AResult<T> {
let value = read_trimmed_info_file(ep, name)?;
with_context!(("couldn't parse info {} for PCI device {}", name, ep), {
Ok(from_str_radix(&value, 10)?)
})
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct PciBus {
pub domain: u16,
pub bus: u8,
}
impl fmt::Display for PciBus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:04x}:{:02x}", self.domain, self.bus)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct PciEndpoint {
pub bus: PciBus,
pub slot_function: SlotFunction,
}
impl PciEndpoint {
fn device_file(&self, name: &str) -> String {
format!("/sys/bus/pci/devices/{}/{}", *self, name)
}
pub fn is_enabled(&self) -> crate::AResult<bool> {
match read_trimmed_info_file(*self, "enable")?.as_str() {
"0" => Ok(false),
"1" => Ok(true),
e => bail!("Invalid 'enable' value {:?} for PCI device {}", e, self),
}
}
pub fn scoped_enable(&self) -> crate::AResult<ScopedEnable> {
if !self.is_enabled()? {
let scoped_enable = ScopedEnable { ep: Some(*self) };
self.enable()?;
Ok(scoped_enable)
} else {
Ok(ScopedEnable { ep: None })
}
}
pub fn enable(&self) -> crate::AResult<()> {
with_context!(("PCI {}: enable device", self), {
fs::OpenOptions::new().write(true).open(self.device_file("enable"))?.write_all(b"1")?;
Ok(())
})
}
pub fn disable(&self) -> crate::AResult<()> {
with_context!(("PCI {}: disable device", self), {
fs::OpenOptions::new().write(true).open(self.device_file("enable"))?.write_all(b"0")?;
Ok(())
})
}
pub fn vendor(&self) -> crate::AResult<VendorId> {
read_hex_info_file::<u16>(*self, "vendor", u16::from_str_radix).map(VendorId)
}
pub fn device(&self) -> crate::AResult<DeviceID> {
read_hex_info_file::<u16>(*self, "device", u16::from_str_radix).map(DeviceID)
}
pub fn subsystem_vendor(&self) -> crate::AResult<VendorId> {
read_hex_info_file::<u16>(*self, "subsystem_vendor", u16::from_str_radix).map(VendorId)
}
pub fn subsystem_device(&self) -> crate::AResult<DeviceID> {
read_hex_info_file::<u16>(*self, "subsystem_device", u16::from_str_radix).map(DeviceID)
}
pub fn class(&self) -> crate::AResult<Class> {
let v = read_hex_info_file::<u32>(*self, "class", u32::from_str_radix)?;
let class_code = ClassCode((v >> 16) as u8);
let subclass_code = SubClassCode((v >> 8) as u8);
let programming_interface = ProgrammingInterface(v as u8);
Ok(Class{class_code, subclass_code, programming_interface})
}
/// Bridges have a secondary bus (the bus directly connected devices on the other side are on)
pub fn secondary_bus(&self) -> crate::AResult<PciBus> {
let bus = read_decimal_info_file::<u8>(*self, "secondary_bus_number", u8::from_str_radix)?;
Ok(PciBus {
domain: self.bus.domain,
bus,
})
}
pub fn driver(&self) -> crate::AResult<Option<Driver>> {
let link = self.device_file("driver");
match fs::symlink_metadata(&link) {
Err(ref e) if e.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(e) => bail!("Couldn't locate driver for PCI device {}: {}", self, e),
Ok(attr) => if !attr.file_type().is_symlink() {
bail!("driver for PCI device {} not a symlink", self);
},
}
let path = with_context!(("Couldn't follow driver symlink for PCI device {}", self),
Ok(fs::canonicalize(link)?)
)?;
Ok(Some(Driver{path}))
}
}
impl fmt::Display for PciEndpoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}", self.bus, self.slot_function)
}
}
impl str::FromStr for PciEndpoint {
type Err = ::failure::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// max len: 0000:00:00.0
// short: 0:0.0
ensure!(s.len() <= 12, "PCI endpoint too long: {:?}", s);
let (domain, bus_s, devfun_s) = {
let mut parts = s.split(':');
let p1 = parts.next().ok_or_else(|| format_err!("Need at least one ':' in PCI endpoint: {:?}", s))?;
let p2 = parts.next().ok_or_else(|| format_err!("Need at least one ':' in PCI endpoint: {:?}", s))?;
match parts.next() {
None => (0, p1, p2),
Some(p3) => {
ensure!(parts.next().is_none(), "At most two ':' in PCI endpoint: {:?}", s);
let domain = with_context!(("invalid PCI domain: {}", p1),
Ok(u16::from_str_radix(p1, 16)?)
)?;
(domain, p2, p3)
}
}
};
let bus = with_context!(("invalid PCI bus: {}", bus_s),
Ok(u8::from_str_radix(bus_s, 16)?)
)?;
let slot_function = devfun_s.parse::<SlotFunction>()?;
let bus = PciBus {
domain,
bus,
};
Ok(PciEndpoint {
bus,
slot_function,
})
}
}
#[derive(Debug)]
pub struct ScopedEnable {
ep: Option<PciEndpoint>, // is none if already "closed" or was already enabled before
}
impl ScopedEnable {
pub fn
|
(mut self) -> crate::AResult<()> {
if let Some(ep) = self.ep.take() {
ep.disable()?;
}
Ok(())
}
}
impl Drop for ScopedEnable {
fn drop(&mut self) {
if let Some(ep) = self.ep.take() {
if let Err(e) = ep.disable() {
error!("PCI {}: Failed to disable temporarily enabled device: {}", ep, e);
}
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct VendorId(pub u16);
impl fmt::Display for VendorId {
|
close
|
identifier_name
|
endpoint.rs
|
// short: 0.0, long: 1f.7
let (dev_s, fun_s) = if r.len() == 3 && r[1] == b'.' {
(&s[0..1], &s[2..3])
} else if r.len() == 4 && r[2] == b'.' {
(&s[0..2], &s[3..4])
} else {
bail!("Couldn't find '.' in valid place for PCI device.function: {:?}", s);
};
let dev = with_context!(("invalid PCI device: {}", dev_s),
u8::from_str_radix(dev_s, 16).map_err(|e| e.into())
)?;
let fun = with_context!(("invalid PCI function: {}", fun_s),
Ok(u8::from_str_radix(fun_s, 8)?)
)?;
ensure!(dev < 0x20, "invalid PCI device: {} (too big)", dev);
ensure!(fun <= 0x08, "invalid PCI function: {} (too big)", fun);
Ok(SlotFunction(dev << 3 | fun))
}
}
fn read_trimmed_info_file(ep: PciEndpoint, name: &str) -> crate::AResult<String> {
with_context!(("couldn't read info file {} for PCI device {}", name, ep), {
let mut f = fs::File::open(ep.device_file(name))?;
let mut result = String::new();
f.read_to_string(&mut result)?;
Ok(result.trim().into())
})
}
fn read_hex_info_file<T>(ep: PciEndpoint, name: &str, from_str_radix: fn(&str, u32) -> Result<T, ParseIntError>) -> crate::AResult<T> {
let value = read_trimmed_info_file(ep, name)?;
ensure!(value.starts_with("0x"), "info {} for PCI device {} doesn't start with '0x': {:?}", name, ep, value);
with_context!(("couldn't parse info {} for PCI device {}", name, ep), {
Ok(from_str_radix(&value[2..], 16)?)
})
}
fn read_decimal_info_file<T>(ep: PciEndpoint, name: &str, from_str_radix: fn(&str, u32) -> Result<T, ParseIntError>) -> crate::AResult<T> {
let value = read_trimmed_info_file(ep, name)?;
with_context!(("couldn't parse info {} for PCI device {}", name, ep), {
Ok(from_str_radix(&value, 10)?)
})
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct PciBus {
pub domain: u16,
pub bus: u8,
}
impl fmt::Display for PciBus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:04x}:{:02x}", self.domain, self.bus)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct PciEndpoint {
pub bus: PciBus,
pub slot_function: SlotFunction,
}
impl PciEndpoint {
fn device_file(&self, name: &str) -> String {
format!("/sys/bus/pci/devices/{}/{}", *self, name)
}
pub fn is_enabled(&self) -> crate::AResult<bool> {
match read_trimmed_info_file(*self, "enable")?.as_str() {
"0" => Ok(false),
"1" => Ok(true),
e => bail!("Invalid 'enable' value {:?} for PCI device {}", e, self),
}
}
pub fn scoped_enable(&self) -> crate::AResult<ScopedEnable> {
if !self.is_enabled()? {
let scoped_enable = ScopedEnable { ep: Some(*self) };
self.enable()?;
Ok(scoped_enable)
} else {
Ok(ScopedEnable { ep: None })
}
}
pub fn enable(&self) -> crate::AResult<()> {
with_context!(("PCI {}: enable device", self), {
fs::OpenOptions::new().write(true).open(self.device_file("enable"))?.write_all(b"1")?;
Ok(())
})
}
pub fn disable(&self) -> crate::AResult<()> {
with_context!(("PCI {}: disable device", self), {
fs::OpenOptions::new().write(true).open(self.device_file("enable"))?.write_all(b"0")?;
Ok(())
})
}
pub fn vendor(&self) -> crate::AResult<VendorId> {
read_hex_info_file::<u16>(*self, "vendor", u16::from_str_radix).map(VendorId)
}
pub fn device(&self) -> crate::AResult<DeviceID> {
read_hex_info_file::<u16>(*self, "device", u16::from_str_radix).map(DeviceID)
}
pub fn subsystem_vendor(&self) -> crate::AResult<VendorId> {
read_hex_info_file::<u16>(*self, "subsystem_vendor", u16::from_str_radix).map(VendorId)
}
pub fn subsystem_device(&self) -> crate::AResult<DeviceID> {
read_hex_info_file::<u16>(*self, "subsystem_device", u16::from_str_radix).map(DeviceID)
}
pub fn class(&self) -> crate::AResult<Class> {
let v = read_hex_info_file::<u32>(*self, "class", u32::from_str_radix)?;
let class_code = ClassCode((v >> 16) as u8);
let subclass_code = SubClassCode((v >> 8) as u8);
let programming_interface = ProgrammingInterface(v as u8);
Ok(Class{class_code, subclass_code, programming_interface})
}
/// Bridges have a secondary bus (the bus directly connected devices on the other side are on)
pub fn secondary_bus(&self) -> crate::AResult<PciBus> {
let bus = read_decimal_info_file::<u8>(*self, "secondary_bus_number", u8::from_str_radix)?;
Ok(PciBus {
domain: self.bus.domain,
bus,
})
}
pub fn driver(&self) -> crate::AResult<Option<Driver>> {
let link = self.device_file("driver");
match fs::symlink_metadata(&link) {
Err(ref e) if e.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(e) => bail!("Couldn't locate driver for PCI device {}: {}", self, e),
Ok(attr) => if !attr.file_type().is_symlink() {
bail!("driver for PCI device {} not a symlink", self);
},
}
let path = with_context!(("Couldn't follow driver symlink for PCI device {}", self),
Ok(fs::canonicalize(link)?)
)?;
Ok(Some(Driver{path}))
}
}
impl fmt::Display for PciEndpoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}", self.bus, self.slot_function)
}
}
impl str::FromStr for PciEndpoint {
type Err = ::failure::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// max len: 0000:00:00.0
// short: 0:0.0
ensure!(s.len() <= 12, "PCI endpoint too long: {:?}", s);
let (domain, bus_s, devfun_s) = {
let mut parts = s.split(':');
let p1 = parts.next().ok_or_else(|| format_err!("Need at least one ':' in PCI endpoint: {:?}", s))?;
let p2 = parts.next().ok_or_else(|| format_err!("Need at least one ':' in PCI endpoint: {:?}", s))?;
match parts.next() {
None => (0, p1, p2),
Some(p3) => {
ensure!(parts.next().is_none(), "At most two ':' in PCI endpoint: {:?}", s);
let domain = with_context!(("invalid PCI domain: {}", p1),
Ok(u16::from_str_radix(p1, 16)?)
)?;
(domain, p2, p3)
}
}
};
let bus = with_context!(("invalid PCI bus: {}", bus_s),
Ok(u8::from_str_radix(bus_s, 16)?)
)?;
let slot_function = devfun_s.parse::<SlotFunction>()?;
let bus = PciBus {
domain,
bus,
};
Ok(PciEndpoint {
bus,
slot_function,
})
}
}
#[derive(Debug)]
pub struct ScopedEnable {
ep: Option
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
let r = s.as_bytes();
ensure!(r.len() <= 4, "String too long for PCI device.function: {:?}", s);
|
random_line_split
|
|
common.rs
|
(size: usize) -> Option<Vec<usize>> {
if size == 0 {
return Some(vec![]);
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut num_left = size;
let mut sum_prev_peaks = 0;
let mut peaks = vec![];
while peak_size != 0 {
if num_left >= peak_size {
peaks.push(sum_prev_peaks + peak_size - 1);
sum_prev_peaks += peak_size;
num_left -= peak_size;
}
peak_size >>= 1;
}
if num_left > 0
|
Some(peaks)
}
/// Calculates the positions of the (parent, sibling) of the node at the provided position.
/// Returns an error if the pos provided would result in an underflow or overflow.
pub fn family(pos: usize) -> Result<(usize, usize), MerkleMountainRangeError> {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
// Convert to i128 so that we don't over/underflow, and then we will cast back to usize after
let pos = pos as i128;
let peak = i128::from(peak);
let peak_map = peak_map as i128;
let res = if (peak_map & peak) == 0 {
(pos + 2 * peak, pos + 2 * peak - 1)
} else {
(pos + 1, pos + 1 - 2 * peak)
};
Ok((
res.0.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
res.1.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
))
}
/// For a given starting position calculate the parent and sibling positions
/// for the branch/path from that position to the peak of the tree.
/// We will use the sibling positions to generate the "path" of a Merkle proof.
pub fn family_branch(pos: usize, last_pos: usize) -> Vec<(usize, usize)> {
// loop going up the tree, from node to parent, as long as we stay inside
// the tree (as defined by last_pos).
let (peak_map, height) = peak_map_height(pos);
let mut peak = 1 << height;
let mut branch = vec![];
let mut current = pos;
let mut sibling;
while current < last_pos {
if (peak_map & peak) == 0 {
current += 2 * peak;
sibling = current - 1;
} else {
current += 1;
sibling = current - 2 * peak;
};
if current > last_pos {
break;
}
branch.push((current, sibling));
peak <<= 1;
}
branch
}
/// The height of a node in a full binary tree from its index.
pub fn bintree_height(num: usize) -> usize {
if num == 0 {
return 0;
}
peak_map_height(num).1
}
/// return (peak_map, pos_height) of given 0-based node pos prior to its addition
/// Example: on input 4 returns (0b11, 0) as mmr state before adding 4 was
/// 2
/// / \
/// 0 1 3
/// with 0b11 indicating presence of peaks of height 0 and 1.
/// NOTE:
/// the peak map also encodes the path taken from the root to the added node since the path turns left (resp. right)
/// if-and-only-if a peak at that height is absent (resp. present)
pub fn peak_map_height(mut pos: usize) -> (usize, usize) {
if pos == 0 {
return (0, 0);
}
let mut peak_size = ALL_ONES >> pos.leading_zeros();
let mut bitmap = 0;
while peak_size != 0 {
bitmap <<= 1;
if pos >= peak_size {
pos -= peak_size;
bitmap |= 1;
}
peak_size >>= 1;
}
(bitmap, pos)
}
/// Is the node at this pos the "left" sibling of its parent?
pub fn is_left_sibling(pos: usize) -> bool {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
(peak_map & peak) == 0
}
pub fn hash_together<D: Digest + DomainDigest>(left: &[u8], right: &[u8]) -> Hash {
D::new().chain_update(left).chain_update(right).finalize().to_vec()
}
/// The number of leaves in a MMR of the provided size.
/// Example: on input 5 returns (2 + 1 + 1) as mmr state before adding 5 was
/// 2
/// / \
/// 0 1 3 4
/// None is returned if the number of leaves exceeds the maximum value of a usize
pub fn checked_n_leaves(size: usize) -> Option<usize> {
if size == 0 {
return Some(0);
}
if size == usize::MAX {
return None;
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut nleaves = 0usize;
let mut size_left = size;
while peak_size != 0 {
if size_left >= peak_size {
nleaves += (peak_size + 1) >> 1;
size_left -= peak_size;
}
peak_size >>= 1;
}
if size_left == 0 {
Some(nleaves)
} else {
Some(nleaves + 1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn leaf_to_node_indices() {
assert_eq!(node_index(LeafIndex(0)), 0);
assert_eq!(node_index(LeafIndex(1)), 1);
assert_eq!(node_index(LeafIndex(2)), 3);
assert_eq!(node_index(LeafIndex(3)), 4);
assert_eq!(node_index(LeafIndex(5)), 8);
assert_eq!(node_index(LeafIndex(6)), 10);
assert_eq!(node_index(LeafIndex(7)), 11);
assert_eq!(node_index(LeafIndex(8)), 15);
}
#[test]
fn n_leaf_nodes() {
assert_eq!(checked_n_leaves(0), Some(0));
assert_eq!(checked_n_leaves(1), Some(1));
assert_eq!(checked_n_leaves(3), Some(2));
assert_eq!(checked_n_leaves(4), Some(3));
assert_eq!(checked_n_leaves(5), Some(4));
assert_eq!(checked_n_leaves(8), Some(5));
assert_eq!(checked_n_leaves(10), Some(6));
assert_eq!(checked_n_leaves(11), Some(7));
assert_eq!(checked_n_leaves(15), Some(8));
assert_eq!(checked_n_leaves(usize::MAX - 1), Some(9223372036854775808));
// Overflowed
assert_eq!(checked_n_leaves(usize::MAX), None);
}
#[test]
fn peak_vectors() {
assert_eq!(find_peaks(0), Some(Vec::<usize>::new()));
assert_eq!(find_peaks(1), Some(vec![0]));
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3), Some(vec![2]));
assert_eq!(find_peaks(4), Some(vec![2, 3]));
assert_eq!(find_peaks(5), None);
assert_eq!(find_peaks(6), None);
assert_eq!(find_peaks(7), Some(vec![6]));
assert_eq!(find_peaks(8), Some(vec![6, 7]));
assert_eq!(find_peaks(9), None);
assert_eq!(find_peaks(10), Some(vec![6, 9]));
assert_eq!(find_peaks(11), Some(vec![6, 9, 10]));
assert_eq!(find_peaks(12), None);
assert_eq!(find_peaks(13), None);
assert_eq!(find_peaks(14), None);
assert_eq!(find_peaks(15), Some(vec![14]));
assert_eq!(find_peaks(16), Some(vec![14, 15]));
assert_eq!(find_peaks(17), None);
assert_eq!(find_peaks(
|
{
// This happens, whenever the MMR is not valid, that is, all nodes are not
// fully spawned. For example, in this case
// 2
// / \
// 0 1 3 4
// is invalid, as it can be completed to form
// 6
// / \
// 2 5
// / \ / \
// 0 1 3 4
// which is of size 7 (with single peak [6])
return None;
}
|
conditional_block
|
common.rs
|
/// Returns an error if the pos provided would result in an underflow or overflow.
pub fn family(pos: usize) -> Result<(usize, usize), MerkleMountainRangeError> {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
// Convert to i128 so that we don't over/underflow, and then we will cast back to usize after
let pos = pos as i128;
let peak = i128::from(peak);
let peak_map = peak_map as i128;
let res = if (peak_map & peak) == 0 {
(pos + 2 * peak, pos + 2 * peak - 1)
} else {
(pos + 1, pos + 1 - 2 * peak)
};
Ok((
res.0.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
res.1.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
))
}
/// For a given starting position calculate the parent and sibling positions
/// for the branch/path from that position to the peak of the tree.
/// We will use the sibling positions to generate the "path" of a Merkle proof.
pub fn family_branch(pos: usize, last_pos: usize) -> Vec<(usize, usize)> {
// loop going up the tree, from node to parent, as long as we stay inside
// the tree (as defined by last_pos).
let (peak_map, height) = peak_map_height(pos);
let mut peak = 1 << height;
let mut branch = vec![];
let mut current = pos;
let mut sibling;
while current < last_pos {
if (peak_map & peak) == 0 {
current += 2 * peak;
sibling = current - 1;
} else {
current += 1;
sibling = current - 2 * peak;
};
if current > last_pos {
break;
}
branch.push((current, sibling));
peak <<= 1;
}
branch
}
/// The height of a node in a full binary tree from its index.
pub fn bintree_height(num: usize) -> usize {
if num == 0 {
return 0;
}
peak_map_height(num).1
}
/// return (peak_map, pos_height) of given 0-based node pos prior to its addition
/// Example: on input 4 returns (0b11, 0) as mmr state before adding 4 was
/// 2
/// / \
/// 0 1 3
/// with 0b11 indicating presence of peaks of height 0 and 1.
/// NOTE:
/// the peak map also encodes the path taken from the root to the added node since the path turns left (resp. right)
/// if-and-only-if a peak at that height is absent (resp. present)
pub fn peak_map_height(mut pos: usize) -> (usize, usize) {
if pos == 0 {
return (0, 0);
}
let mut peak_size = ALL_ONES >> pos.leading_zeros();
let mut bitmap = 0;
while peak_size != 0 {
bitmap <<= 1;
if pos >= peak_size {
pos -= peak_size;
bitmap |= 1;
}
peak_size >>= 1;
}
(bitmap, pos)
}
/// Is the node at this pos the "left" sibling of its parent?
pub fn is_left_sibling(pos: usize) -> bool {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
(peak_map & peak) == 0
}
pub fn hash_together<D: Digest + DomainDigest>(left: &[u8], right: &[u8]) -> Hash {
D::new().chain_update(left).chain_update(right).finalize().to_vec()
}
/// The number of leaves in a MMR of the provided size.
/// Example: on input 5 returns (2 + 1 + 1) as mmr state before adding 5 was
/// 2
/// / \
/// 0 1 3 4
/// None is returned if the number of leaves exceeds the maximum value of a usize
pub fn checked_n_leaves(size: usize) -> Option<usize> {
if size == 0 {
return Some(0);
}
if size == usize::MAX {
return None;
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut nleaves = 0usize;
let mut size_left = size;
while peak_size != 0 {
if size_left >= peak_size {
nleaves += (peak_size + 1) >> 1;
size_left -= peak_size;
}
peak_size >>= 1;
}
if size_left == 0 {
Some(nleaves)
} else {
Some(nleaves + 1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn leaf_to_node_indices() {
assert_eq!(node_index(LeafIndex(0)), 0);
assert_eq!(node_index(LeafIndex(1)), 1);
assert_eq!(node_index(LeafIndex(2)), 3);
assert_eq!(node_index(LeafIndex(3)), 4);
assert_eq!(node_index(LeafIndex(5)), 8);
assert_eq!(node_index(LeafIndex(6)), 10);
assert_eq!(node_index(LeafIndex(7)), 11);
assert_eq!(node_index(LeafIndex(8)), 15);
}
#[test]
fn n_leaf_nodes() {
assert_eq!(checked_n_leaves(0), Some(0));
assert_eq!(checked_n_leaves(1), Some(1));
assert_eq!(checked_n_leaves(3), Some(2));
assert_eq!(checked_n_leaves(4), Some(3));
assert_eq!(checked_n_leaves(5), Some(4));
assert_eq!(checked_n_leaves(8), Some(5));
assert_eq!(checked_n_leaves(10), Some(6));
assert_eq!(checked_n_leaves(11), Some(7));
assert_eq!(checked_n_leaves(15), Some(8));
assert_eq!(checked_n_leaves(usize::MAX - 1), Some(9223372036854775808));
// Overflowed
assert_eq!(checked_n_leaves(usize::MAX), None);
}
#[test]
fn peak_vectors() {
assert_eq!(find_peaks(0), Some(Vec::<usize>::new()));
assert_eq!(find_peaks(1), Some(vec![0]));
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3), Some(vec![2]));
assert_eq!(find_peaks(4), Some(vec![2, 3]));
assert_eq!(find_peaks(5), None);
assert_eq!(find_peaks(6), None);
assert_eq!(find_peaks(7), Some(vec![6]));
assert_eq!(find_peaks(8), Some(vec![6, 7]));
assert_eq!(find_peaks(9), None);
assert_eq!(find_peaks(10), Some(vec![6, 9]));
assert_eq!(find_peaks(11), Some(vec![6, 9, 10]));
assert_eq!(find_peaks(12), None);
assert_eq!(find_peaks(13), None);
assert_eq!(find_peaks(14), None);
assert_eq!(find_peaks(15), Some(vec![14]));
assert_eq!(find_peaks(16), Some(vec![14, 15]));
assert_eq!(find_peaks(17), None);
assert_eq!(find_peaks(18), Some(vec![14, 17]));
assert_eq!(find_peaks(19), Some(vec![14, 17, 18]));
assert_eq!(find_peaks(20), None);
assert_eq!(find_peaks(21), None);
assert_eq!(find_peaks(22), Some(vec![14, 21]));
assert_eq!(find_peaks(23), Some(vec![14, 21, 22]));
assert_eq!(find_peaks(24), None);
assert_eq!(find_peaks(25), Some(vec![14, 21, 24]));
assert_eq!(find_peaks(26), Some(vec![14, 21, 24, 25]));
assert_eq!(find_peaks(27), None);
assert_eq!(find_peaks(28), None);
assert_eq!(find_peaks(56), Some(vec![30, 45, 52, 55]));
assert_eq!(find_peaks(60), None);
assert_eq!(find_peaks(123), None);
assert_eq!(find_peaks(130), Some(vec![126, 129]));
}
#[test]
fn
|
peak_map_heights
|
identifier_name
|
|
common.rs
|
0
}
pub fn hash_together<D: Digest + DomainDigest>(left: &[u8], right: &[u8]) -> Hash {
D::new().chain_update(left).chain_update(right).finalize().to_vec()
}
/// The number of leaves in a MMR of the provided size.
/// Example: on input 5 returns (2 + 1 + 1) as mmr state before adding 5 was
/// 2
/// / \
/// 0 1 3 4
/// None is returned if the number of leaves exceeds the maximum value of a usize
pub fn checked_n_leaves(size: usize) -> Option<usize> {
if size == 0 {
return Some(0);
}
if size == usize::MAX {
return None;
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut nleaves = 0usize;
let mut size_left = size;
while peak_size != 0 {
if size_left >= peak_size {
nleaves += (peak_size + 1) >> 1;
size_left -= peak_size;
}
peak_size >>= 1;
}
if size_left == 0 {
Some(nleaves)
} else {
Some(nleaves + 1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn leaf_to_node_indices() {
assert_eq!(node_index(LeafIndex(0)), 0);
assert_eq!(node_index(LeafIndex(1)), 1);
assert_eq!(node_index(LeafIndex(2)), 3);
assert_eq!(node_index(LeafIndex(3)), 4);
assert_eq!(node_index(LeafIndex(5)), 8);
assert_eq!(node_index(LeafIndex(6)), 10);
assert_eq!(node_index(LeafIndex(7)), 11);
assert_eq!(node_index(LeafIndex(8)), 15);
}
#[test]
fn n_leaf_nodes() {
assert_eq!(checked_n_leaves(0), Some(0));
assert_eq!(checked_n_leaves(1), Some(1));
assert_eq!(checked_n_leaves(3), Some(2));
assert_eq!(checked_n_leaves(4), Some(3));
assert_eq!(checked_n_leaves(5), Some(4));
assert_eq!(checked_n_leaves(8), Some(5));
assert_eq!(checked_n_leaves(10), Some(6));
assert_eq!(checked_n_leaves(11), Some(7));
assert_eq!(checked_n_leaves(15), Some(8));
assert_eq!(checked_n_leaves(usize::MAX - 1), Some(9223372036854775808));
// Overflowed
assert_eq!(checked_n_leaves(usize::MAX), None);
}
#[test]
fn peak_vectors() {
assert_eq!(find_peaks(0), Some(Vec::<usize>::new()));
assert_eq!(find_peaks(1), Some(vec![0]));
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3), Some(vec![2]));
assert_eq!(find_peaks(4), Some(vec![2, 3]));
assert_eq!(find_peaks(5), None);
assert_eq!(find_peaks(6), None);
assert_eq!(find_peaks(7), Some(vec![6]));
assert_eq!(find_peaks(8), Some(vec![6, 7]));
assert_eq!(find_peaks(9), None);
assert_eq!(find_peaks(10), Some(vec![6, 9]));
assert_eq!(find_peaks(11), Some(vec![6, 9, 10]));
assert_eq!(find_peaks(12), None);
assert_eq!(find_peaks(13), None);
assert_eq!(find_peaks(14), None);
assert_eq!(find_peaks(15), Some(vec![14]));
assert_eq!(find_peaks(16), Some(vec![14, 15]));
assert_eq!(find_peaks(17), None);
assert_eq!(find_peaks(18), Some(vec![14, 17]));
assert_eq!(find_peaks(19), Some(vec![14, 17, 18]));
assert_eq!(find_peaks(20), None);
assert_eq!(find_peaks(21), None);
assert_eq!(find_peaks(22), Some(vec![14, 21]));
assert_eq!(find_peaks(23), Some(vec![14, 21, 22]));
assert_eq!(find_peaks(24), None);
assert_eq!(find_peaks(25), Some(vec![14, 21, 24]));
assert_eq!(find_peaks(26), Some(vec![14, 21, 24, 25]));
assert_eq!(find_peaks(27), None);
assert_eq!(find_peaks(28), None);
assert_eq!(find_peaks(56), Some(vec![30, 45, 52, 55]));
assert_eq!(find_peaks(60), None);
assert_eq!(find_peaks(123), None);
assert_eq!(find_peaks(130), Some(vec![126, 129]));
}
#[test]
fn peak_map_heights() {
assert_eq!(peak_map_height(0), (0, 0));
assert_eq!(peak_map_height(4), (0b11, 0));
// 6
// 2 5
// 0 1 3 4 7 8
assert_eq!(peak_map_height(9), (0b101, 1));
// 6
// 2 5 9
// 0 1 3 4 7 8 *
assert_eq!(peak_map_height(10), (0b110, 0));
assert_eq!(peak_map_height(12), (0b111, 1));
assert_eq!(peak_map_height(33), (0b10001, 1));
assert_eq!(peak_map_height(34), (0b10010, 0));
}
#[test]
fn is_sibling_left() {
assert!(is_left_sibling(0));
assert!(!is_left_sibling(1));
assert!(is_left_sibling(2));
assert!(is_left_sibling(3));
assert!(!is_left_sibling(4));
assert!(!is_left_sibling(5));
assert!(is_left_sibling(6));
assert!(is_left_sibling(7));
assert!(!is_left_sibling(8));
assert!(is_left_sibling(9));
assert!(is_left_sibling(10));
assert!(!is_left_sibling(11));
assert!(!is_left_sibling(12));
assert!(!is_left_sibling(13));
assert!(is_left_sibling(14));
assert!(is_left_sibling(15));
}
#[test]
fn families() {
assert_eq!(family(1).unwrap(), (2, 0));
assert_eq!(family(0).unwrap(), (2, 1));
assert_eq!(family(3).unwrap(), (5, 4));
assert_eq!(family(9).unwrap(), (13, 12));
assert_eq!(family(15).unwrap(), (17, 16));
assert_eq!(family(6).unwrap(), (14, 13));
assert_eq!(family(13).unwrap(), (14, 6));
}
#[test]
fn family_branches() {
// A 3 node tree (height 1)
assert_eq!(family_branch(0, 2), [(2, 1)]);
assert_eq!(family_branch(1, 2), [(2, 0)]);
assert_eq!(family_branch(2, 2), []);
// leaf node in a larger tree of 7 nodes (height 2)
assert_eq!(family_branch(0, 6), [(2, 1), (6, 5)]);
// note these only go as far up as the local peak, not necessarily the single root
assert_eq!(family_branch(0, 3), [(2, 1)]);
// pos 4 in a tree of size 4 is a local peak
assert_eq!(family_branch(3, 3), []);
// pos 4 in a tree of size 5 is also still a local peak
assert_eq!(family_branch(3, 4), []);
// pos 4 in a tree of size 6 has a parent and a sibling
assert_eq!(family_branch(3, 5), [(5, 4)]);
// a tree of size 7 is all under a single root
|
assert_eq!(family_branch(3, 6), [(5, 4), (6, 2)]);
|
random_line_split
|
|
common.rs
|
(size: usize) -> Option<Vec<usize>> {
if size == 0 {
return Some(vec![]);
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut num_left = size;
let mut sum_prev_peaks = 0;
let mut peaks = vec![];
while peak_size != 0 {
if num_left >= peak_size {
peaks.push(sum_prev_peaks + peak_size - 1);
sum_prev_peaks += peak_size;
num_left -= peak_size;
}
peak_size >>= 1;
}
if num_left > 0 {
// This happens, whenever the MMR is not valid, that is, all nodes are not
// fully spawned. For example, in this case
// 2
// / \
// 0 1 3 4
// is invalid, as it can be completed to form
// 6
// / \
// 2 5
// / \ / \
// 0 1 3 4
// which is of size 7 (with single peak [6])
return None;
}
Some(peaks)
}
/// Calculates the positions of the (parent, sibling) of the node at the provided position.
/// Returns an error if the pos provided would result in an underflow or overflow.
pub fn family(pos: usize) -> Result<(usize, usize), MerkleMountainRangeError> {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
// Convert to i128 so that we don't over/underflow, and then we will cast back to usize after
let pos = pos as i128;
let peak = i128::from(peak);
let peak_map = peak_map as i128;
let res = if (peak_map & peak) == 0 {
(pos + 2 * peak, pos + 2 * peak - 1)
} else {
(pos + 1, pos + 1 - 2 * peak)
};
Ok((
res.0.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
res.1.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
))
}
/// For a given starting position calculate the parent and sibling positions
/// for the branch/path from that position to the peak of the tree.
/// We will use the sibling positions to generate the "path" of a Merkle proof.
pub fn family_branch(pos: usize, last_pos: usize) -> Vec<(usize, usize)> {
// loop going up the tree, from node to parent, as long as we stay inside
// the tree (as defined by last_pos).
let (peak_map, height) = peak_map_height(pos);
let mut peak = 1 << height;
let mut branch = vec![];
let mut current = pos;
let mut sibling;
while current < last_pos {
if (peak_map & peak) == 0 {
current += 2 * peak;
sibling = current - 1;
} else {
current += 1;
sibling = current - 2 * peak;
};
if current > last_pos {
break;
}
branch.push((current, sibling));
peak <<= 1;
}
branch
}
/// The height of a node in a full binary tree from its index.
pub fn bintree_height(num: usize) -> usize {
if num == 0 {
return 0;
}
peak_map_height(num).1
}
/// return (peak_map, pos_height) of given 0-based node pos prior to its addition
/// Example: on input 4 returns (0b11, 0) as mmr state before adding 4 was
/// 2
/// / \
/// 0 1 3
/// with 0b11 indicating presence of peaks of height 0 and 1.
/// NOTE:
/// the peak map also encodes the path taken from the root to the added node since the path turns left (resp. right)
/// if-and-only-if a peak at that height is absent (resp. present)
pub fn peak_map_height(mut pos: usize) -> (usize, usize)
|
/// Is the node at this pos the "left" sibling of its parent?
pub fn is_left_sibling(pos: usize) -> bool {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
(peak_map & peak) == 0
}
pub fn hash_together<D: Digest + DomainDigest>(left: &[u8], right: &[u8]) -> Hash {
D::new().chain_update(left).chain_update(right).finalize().to_vec()
}
/// The number of leaves in a MMR of the provided size.
/// Example: on input 5 returns (2 + 1 + 1) as mmr state before adding 5 was
/// 2
/// / \
/// 0 1 3 4
/// None is returned if the number of leaves exceeds the maximum value of a usize
pub fn checked_n_leaves(size: usize) -> Option<usize> {
if size == 0 {
return Some(0);
}
if size == usize::MAX {
return None;
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut nleaves = 0usize;
let mut size_left = size;
while peak_size != 0 {
if size_left >= peak_size {
nleaves += (peak_size + 1) >> 1;
size_left -= peak_size;
}
peak_size >>= 1;
}
if size_left == 0 {
Some(nleaves)
} else {
Some(nleaves + 1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn leaf_to_node_indices() {
assert_eq!(node_index(LeafIndex(0)), 0);
assert_eq!(node_index(LeafIndex(1)), 1);
assert_eq!(node_index(LeafIndex(2)), 3);
assert_eq!(node_index(LeafIndex(3)), 4);
assert_eq!(node_index(LeafIndex(5)), 8);
assert_eq!(node_index(LeafIndex(6)), 10);
assert_eq!(node_index(LeafIndex(7)), 11);
assert_eq!(node_index(LeafIndex(8)), 15);
}
#[test]
fn n_leaf_nodes() {
assert_eq!(checked_n_leaves(0), Some(0));
assert_eq!(checked_n_leaves(1), Some(1));
assert_eq!(checked_n_leaves(3), Some(2));
assert_eq!(checked_n_leaves(4), Some(3));
assert_eq!(checked_n_leaves(5), Some(4));
assert_eq!(checked_n_leaves(8), Some(5));
assert_eq!(checked_n_leaves(10), Some(6));
assert_eq!(checked_n_leaves(11), Some(7));
assert_eq!(checked_n_leaves(15), Some(8));
assert_eq!(checked_n_leaves(usize::MAX - 1), Some(9223372036854775808));
// Overflowed
assert_eq!(checked_n_leaves(usize::MAX), None);
}
#[test]
fn peak_vectors() {
assert_eq!(find_peaks(0), Some(Vec::<usize>::new()));
assert_eq!(find_peaks(1), Some(vec![0]));
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3), Some(vec![2]));
assert_eq!(find_peaks(4), Some(vec![2, 3]));
assert_eq!(find_peaks(5), None);
assert_eq!(find_peaks(6), None);
assert_eq!(find_peaks(7), Some(vec![6]));
assert_eq!(find_peaks(8), Some(vec![6, 7]));
assert_eq!(find_peaks(9), None);
assert_eq!(find_peaks(10), Some(vec![6, 9]));
assert_eq!(find_peaks(11), Some(vec![6, 9, 10]));
assert_eq!(find_peaks(12), None);
assert_eq!(find_peaks(13), None);
assert_eq!(find_peaks(14), None);
assert_eq!(find_peaks(15), Some(vec![14]));
assert_eq!(find_peaks(16), Some(vec![14, 15]));
assert_eq!(find_peaks(17), None);
assert_eq!(find_peaks(
|
{
if pos == 0 {
return (0, 0);
}
let mut peak_size = ALL_ONES >> pos.leading_zeros();
let mut bitmap = 0;
while peak_size != 0 {
bitmap <<= 1;
if pos >= peak_size {
pos -= peak_size;
bitmap |= 1;
}
peak_size >>= 1;
}
(bitmap, pos)
}
|
identifier_body
|
search_log.go
|
FilePath)
filePrefix := logFilePath[:len(logFilePath)-len(ext)]
files, err := ioutil.ReadDir(logDir)
if err != nil {
return nil, err
}
walkFn := func(path string, info os.FileInfo) error {
if info.IsDir() {
return nil
}
// All rotated log files have the same prefix and extension with the original file
if !strings.HasPrefix(path, filePrefix) {
return nil
}
if !strings.HasSuffix(path, ext) {
return nil
}
if isCtxDone(ctx) {
return ctx.Err()
}
// If we cannot open the file, we skip to search the file instead of returning
// error and abort entire searching task.
// TODO: do we need to return some warning to client?
file, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm)
if err != nil {
return nil
}
reader := bufio.NewReader(file)
firstItem, err := readFirstValidLog(ctx, reader, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
lastItem, err := readLastValidLog(ctx, file, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
// Reset position to the start and skip this file if cannot seek to start
if _, err := file.Seek(0, io.SeekStart); err != nil {
skipFiles = append(skipFiles, file)
return nil
}
if beginTime > lastItem.Time || endTime < firstItem.Time {
skipFiles = append(skipFiles, file)
} else {
logFiles = append(logFiles, logFile{
file: file,
begin: firstItem.Time,
end: lastItem.Time,
})
}
return nil
}
for _, file := range files {
err := walkFn(filepath.Join(logDir, file.Name()), file)
if err != nil {
return nil, err
}
}
defer func() {
for _, f := range skipFiles {
_ = f.Close()
}
}()
// Sort by start time
sort.Slice(logFiles, func(i, j int) bool {
return logFiles[i].begin < logFiles[j].begin
})
return logFiles, err
}
func isCtxDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
func readFirstValidLog(ctx context.Context, reader *bufio.Reader, tryLines int64) (*pb.LogMessage, error) {
var tried int64
for {
line, err := readLine(reader)
if err != nil {
return nil, err
}
item, err := parseLogItem(line)
if err == nil {
return item, nil
}
tried++
if tried >= tryLines {
break
}
if isCtxDone(ctx) {
return nil, ctx.Err()
}
}
return nil, errors.New("not a valid log file")
}
func readLastValidLog(ctx context.Context, file *os.File, tryLines int) (*pb.LogMessage, error) {
var tried int
stat, _ := file.Stat()
endCursor := stat.Size()
for {
lines, readBytes, err := readLastLines(ctx, file, endCursor)
if err != nil {
return nil, err
}
// read out the file
if readBytes == 0 {
break
}
endCursor -= int64(readBytes)
for i := len(lines) - 1; i >= 0; i-- {
item, err := parseLogItem(lines[i])
if err == nil {
return item, nil
}
}
tried += len(lines)
if tried >= tryLines {
break
}
}
return nil, errors.New("not a valid log file")
}
// Read a line from a reader.
func readLine(reader *bufio.Reader) (string, error) {
var line, b []byte
var err error
isPrefix := true
for isPrefix {
b, isPrefix, err = reader.ReadLine()
line = append(line, b...)
if err != nil {
return "", err
}
}
return string(line), nil
}
const maxReadCacheSize = 1024 * 1024 * 16
// Read lines from the end of a file
// endCursor initial value should be the file size
func readLastLines(ctx context.Context, file *os.File, endCursor int64) ([]string, int, error) {
var lines []byte
var firstNonNewlinePos int
var cursor = endCursor
var size int64 = 256
for {
// stop if we are at the begining
// check it in the start to avoid read beyond the size
if cursor <= 0 {
break
}
// enlarge the read cache to avoid too many memory move.
size = size * 2
if size > maxReadCacheSize {
size = maxReadCacheSize
}
if cursor < size {
size = cursor
|
cursor -= size
_, err := file.Seek(cursor, io.SeekStart)
if err != nil {
return nil, 0, ctx.Err()
}
chars := make([]byte, size)
_, err = file.Read(chars)
if err != nil {
return nil, 0, ctx.Err()
}
lines = append(chars, lines...)
// find first '\n' or '\r'
for i := 0; i < len(chars)-1; i++ {
// reach the line end
// the first newline may be in the line end at the first round
if i >= len(lines)-1 {
break
}
if (chars[i] == 10 || chars[i] == 13) && chars[i+1] != 10 && chars[i+1] != 13 {
firstNonNewlinePos = i + 1
break
}
}
if firstNonNewlinePos > 0 {
break
}
if isCtxDone(ctx) {
return nil, 0, ctx.Err()
}
}
finalStr := string(lines[firstNonNewlinePos:])
return strings.Split(strings.ReplaceAll(finalStr, "\r\n", "\n"), "\n"), len(finalStr), nil
}
// ParseLogLevel returns LogLevel from string and return LogLevel_Info if
// the string is an invalid level string
func ParseLogLevel(s string) pb.LogLevel {
switch s {
case "debug", "DEBUG":
return pb.LogLevel_Debug
case "info", "INFO":
return pb.LogLevel_Info
case "warn", "WARN":
return pb.LogLevel_Warn
case "trace", "TRACE":
return pb.LogLevel_Trace
case "critical", "CRITICAL":
return pb.LogLevel_Critical
case "error", "ERROR":
return pb.LogLevel_Error
default:
return pb.LogLevel_UNKNOWN
}
}
// parses single log line and returns:
// 1. the timesteamp in unix milliseconds
// 2. the log level
// 3. the log item content
//
// [2019/08/26 06:19:13.011 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v2.1.14]...
// [2019/08/26 07:19:49.529 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v3.0.2]...
// [2019/08/21 01:43:01.460 -04:00] [INFO] [util.go:60] [PD] [release-version=v3.0.2]
// [2019/08/26 07:20:23.815 -04:00] [INFO] [mod.rs:28] ["Release Version: 3.0.2"]
func parseLogItem(s string) (*pb.LogMessage, error) {
timeLeftBound := strings.Index(s, "[")
timeRightBound := strings.Index(s, "]")
if timeLeftBound == -1 || timeRightBound == -1 || timeLeftBound > timeRightBound {
return nil, fmt.Errorf("invalid log string: %s", s)
}
time, err := parseTimeStamp(s[timeLeftBound+1 : timeRightBound])
if err != nil {
return nil, err
}
levelLeftBound := strings.Index(s[timeRightBound+1:], "[")
levelRightBound := strings.Index(s[timeRightBound+1:], "]")
if levelLeftBound == -1 || levelRightBound == -1 || levelLeftBound > levelRightBound {
return nil, fmt.Errorf("invalid log string:
|
}
|
random_line_split
|
search_log.go
|
return nil
}
if !strings.HasSuffix(path, ext) {
return nil
}
if isCtxDone(ctx) {
return ctx.Err()
}
// If we cannot open the file, we skip to search the file instead of returning
// error and abort entire searching task.
// TODO: do we need to return some warning to client?
file, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm)
if err != nil {
return nil
}
reader := bufio.NewReader(file)
firstItem, err := readFirstValidLog(ctx, reader, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
lastItem, err := readLastValidLog(ctx, file, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
// Reset position to the start and skip this file if cannot seek to start
if _, err := file.Seek(0, io.SeekStart); err != nil {
skipFiles = append(skipFiles, file)
return nil
}
if beginTime > lastItem.Time || endTime < firstItem.Time {
skipFiles = append(skipFiles, file)
} else {
logFiles = append(logFiles, logFile{
file: file,
begin: firstItem.Time,
end: lastItem.Time,
})
}
return nil
}
for _, file := range files {
err := walkFn(filepath.Join(logDir, file.Name()), file)
if err != nil {
return nil, err
}
}
defer func() {
for _, f := range skipFiles {
_ = f.Close()
}
}()
// Sort by start time
sort.Slice(logFiles, func(i, j int) bool {
return logFiles[i].begin < logFiles[j].begin
})
return logFiles, err
}
func isCtxDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
func readFirstValidLog(ctx context.Context, reader *bufio.Reader, tryLines int64) (*pb.LogMessage, error) {
var tried int64
for {
line, err := readLine(reader)
if err != nil {
return nil, err
}
item, err := parseLogItem(line)
if err == nil {
return item, nil
}
tried++
if tried >= tryLines {
break
}
if isCtxDone(ctx) {
return nil, ctx.Err()
}
}
return nil, errors.New("not a valid log file")
}
func readLastValidLog(ctx context.Context, file *os.File, tryLines int) (*pb.LogMessage, error) {
var tried int
stat, _ := file.Stat()
endCursor := stat.Size()
for {
lines, readBytes, err := readLastLines(ctx, file, endCursor)
if err != nil {
return nil, err
}
// read out the file
if readBytes == 0 {
break
}
endCursor -= int64(readBytes)
for i := len(lines) - 1; i >= 0; i-- {
item, err := parseLogItem(lines[i])
if err == nil {
return item, nil
}
}
tried += len(lines)
if tried >= tryLines {
break
}
}
return nil, errors.New("not a valid log file")
}
// Read a line from a reader.
func readLine(reader *bufio.Reader) (string, error) {
var line, b []byte
var err error
isPrefix := true
for isPrefix {
b, isPrefix, err = reader.ReadLine()
line = append(line, b...)
if err != nil {
return "", err
}
}
return string(line), nil
}
const maxReadCacheSize = 1024 * 1024 * 16
// Read lines from the end of a file
// endCursor initial value should be the file size
func readLastLines(ctx context.Context, file *os.File, endCursor int64) ([]string, int, error) {
var lines []byte
var firstNonNewlinePos int
var cursor = endCursor
var size int64 = 256
for {
// stop if we are at the begining
// check it in the start to avoid read beyond the size
if cursor <= 0 {
break
}
// enlarge the read cache to avoid too many memory move.
size = size * 2
if size > maxReadCacheSize {
size = maxReadCacheSize
}
if cursor < size {
size = cursor
}
cursor -= size
_, err := file.Seek(cursor, io.SeekStart)
if err != nil {
return nil, 0, ctx.Err()
}
chars := make([]byte, size)
_, err = file.Read(chars)
if err != nil {
return nil, 0, ctx.Err()
}
lines = append(chars, lines...)
// find first '\n' or '\r'
for i := 0; i < len(chars)-1; i++ {
// reach the line end
// the first newline may be in the line end at the first round
if i >= len(lines)-1 {
break
}
if (chars[i] == 10 || chars[i] == 13) && chars[i+1] != 10 && chars[i+1] != 13 {
firstNonNewlinePos = i + 1
break
}
}
if firstNonNewlinePos > 0 {
break
}
if isCtxDone(ctx) {
return nil, 0, ctx.Err()
}
}
finalStr := string(lines[firstNonNewlinePos:])
return strings.Split(strings.ReplaceAll(finalStr, "\r\n", "\n"), "\n"), len(finalStr), nil
}
// ParseLogLevel returns LogLevel from string and return LogLevel_Info if
// the string is an invalid level string
func ParseLogLevel(s string) pb.LogLevel {
switch s {
case "debug", "DEBUG":
return pb.LogLevel_Debug
case "info", "INFO":
return pb.LogLevel_Info
case "warn", "WARN":
return pb.LogLevel_Warn
case "trace", "TRACE":
return pb.LogLevel_Trace
case "critical", "CRITICAL":
return pb.LogLevel_Critical
case "error", "ERROR":
return pb.LogLevel_Error
default:
return pb.LogLevel_UNKNOWN
}
}
// parses single log line and returns:
// 1. the timesteamp in unix milliseconds
// 2. the log level
// 3. the log item content
//
// [2019/08/26 06:19:13.011 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v2.1.14]...
// [2019/08/26 07:19:49.529 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v3.0.2]...
// [2019/08/21 01:43:01.460 -04:00] [INFO] [util.go:60] [PD] [release-version=v3.0.2]
// [2019/08/26 07:20:23.815 -04:00] [INFO] [mod.rs:28] ["Release Version: 3.0.2"]
func parseLogItem(s string) (*pb.LogMessage, error) {
timeLeftBound := strings.Index(s, "[")
timeRightBound := strings.Index(s, "]")
if timeLeftBound == -1 || timeRightBound == -1 || timeLeftBound > timeRightBound {
return nil, fmt.Errorf("invalid log string: %s", s)
}
time, err := parseTimeStamp(s[timeLeftBound+1 : timeRightBound])
if err != nil {
return nil, err
}
levelLeftBound := strings.Index(s[timeRightBound+1:], "[")
|
{
if logFilePath == "" {
return nil, errors.New("empty log file location configuration")
}
var logFiles []logFile
var skipFiles []*os.File
logDir := filepath.Dir(logFilePath)
ext := filepath.Ext(logFilePath)
filePrefix := logFilePath[:len(logFilePath)-len(ext)]
files, err := ioutil.ReadDir(logDir)
if err != nil {
return nil, err
}
walkFn := func(path string, info os.FileInfo) error {
if info.IsDir() {
return nil
}
// All rotated log files have the same prefix and extension with the original file
if !strings.HasPrefix(path, filePrefix) {
|
identifier_body
|
|
search_log.go
|
)
filePrefix := logFilePath[:len(logFilePath)-len(ext)]
files, err := ioutil.ReadDir(logDir)
if err != nil {
return nil, err
}
walkFn := func(path string, info os.FileInfo) error {
if info.IsDir() {
return nil
}
// All rotated log files have the same prefix and extension with the original file
if !strings.HasPrefix(path, filePrefix) {
return nil
}
if !strings.HasSuffix(path, ext) {
return nil
}
if isCtxDone(ctx) {
return ctx.Err()
}
// If we cannot open the file, we skip to search the file instead of returning
// error and abort entire searching task.
// TODO: do we need to return some warning to client?
file, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm)
if err != nil {
return nil
}
reader := bufio.NewReader(file)
firstItem, err := readFirstValidLog(ctx, reader, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
lastItem, err := readLastValidLog(ctx, file, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
// Reset position to the start and skip this file if cannot seek to start
if _, err := file.Seek(0, io.SeekStart); err != nil {
skipFiles = append(skipFiles, file)
return nil
}
if beginTime > lastItem.Time || endTime < firstItem.Time {
skipFiles = append(skipFiles, file)
} else {
logFiles = append(logFiles, logFile{
file: file,
begin: firstItem.Time,
end: lastItem.Time,
})
}
return nil
}
for _, file := range files {
err := walkFn(filepath.Join(logDir, file.Name()), file)
if err != nil {
return nil, err
}
}
defer func() {
for _, f := range skipFiles {
_ = f.Close()
}
}()
// Sort by start time
sort.Slice(logFiles, func(i, j int) bool {
return logFiles[i].begin < logFiles[j].begin
})
return logFiles, err
}
func isCtxDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
func readFirstValidLog(ctx context.Context, reader *bufio.Reader, tryLines int64) (*pb.LogMessage, error) {
var tried int64
for {
line, err := readLine(reader)
if err != nil {
return nil, err
}
item, err := parseLogItem(line)
if err == nil {
return item, nil
}
tried++
if tried >= tryLines {
break
}
if isCtxDone(ctx) {
return nil, ctx.Err()
}
}
return nil, errors.New("not a valid log file")
}
func readLastValidLog(ctx context.Context, file *os.File, tryLines int) (*pb.LogMessage, error) {
var tried int
stat, _ := file.Stat()
endCursor := stat.Size()
for {
lines, readBytes, err := readLastLines(ctx, file, endCursor)
if err != nil {
return nil, err
}
// read out the file
if readBytes == 0 {
break
}
endCursor -= int64(readBytes)
for i := len(lines) - 1; i >= 0; i-- {
item, err := parseLogItem(lines[i])
if err == nil {
return item, nil
}
}
tried += len(lines)
if tried >= tryLines {
break
}
}
return nil, errors.New("not a valid log file")
}
// Read a line from a reader.
func readLine(reader *bufio.Reader) (string, error) {
var line, b []byte
var err error
isPrefix := true
for isPrefix {
b, isPrefix, err = reader.ReadLine()
line = append(line, b...)
if err != nil
|
}
return string(line), nil
}
const maxReadCacheSize = 1024 * 1024 * 16
// Read lines from the end of a file
// endCursor initial value should be the file size
func readLastLines(ctx context.Context, file *os.File, endCursor int64) ([]string, int, error) {
var lines []byte
var firstNonNewlinePos int
var cursor = endCursor
var size int64 = 256
for {
// stop if we are at the begining
// check it in the start to avoid read beyond the size
if cursor <= 0 {
break
}
// enlarge the read cache to avoid too many memory move.
size = size * 2
if size > maxReadCacheSize {
size = maxReadCacheSize
}
if cursor < size {
size = cursor
}
cursor -= size
_, err := file.Seek(cursor, io.SeekStart)
if err != nil {
return nil, 0, ctx.Err()
}
chars := make([]byte, size)
_, err = file.Read(chars)
if err != nil {
return nil, 0, ctx.Err()
}
lines = append(chars, lines...)
// find first '\n' or '\r'
for i := 0; i < len(chars)-1; i++ {
// reach the line end
// the first newline may be in the line end at the first round
if i >= len(lines)-1 {
break
}
if (chars[i] == 10 || chars[i] == 13) && chars[i+1] != 10 && chars[i+1] != 13 {
firstNonNewlinePos = i + 1
break
}
}
if firstNonNewlinePos > 0 {
break
}
if isCtxDone(ctx) {
return nil, 0, ctx.Err()
}
}
finalStr := string(lines[firstNonNewlinePos:])
return strings.Split(strings.ReplaceAll(finalStr, "\r\n", "\n"), "\n"), len(finalStr), nil
}
// ParseLogLevel returns LogLevel from string and return LogLevel_Info if
// the string is an invalid level string
func ParseLogLevel(s string) pb.LogLevel {
switch s {
case "debug", "DEBUG":
return pb.LogLevel_Debug
case "info", "INFO":
return pb.LogLevel_Info
case "warn", "WARN":
return pb.LogLevel_Warn
case "trace", "TRACE":
return pb.LogLevel_Trace
case "critical", "CRITICAL":
return pb.LogLevel_Critical
case "error", "ERROR":
return pb.LogLevel_Error
default:
return pb.LogLevel_UNKNOWN
}
}
// parses single log line and returns:
// 1. the timesteamp in unix milliseconds
// 2. the log level
// 3. the log item content
//
// [2019/08/26 06:19:13.011 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v2.1.14]...
// [2019/08/26 07:19:49.529 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v3.0.2]...
// [2019/08/21 01:43:01.460 -04:00] [INFO] [util.go:60] [PD] [release-version=v3.0.2]
// [2019/08/26 07:20:23.815 -04:00] [INFO] [mod.rs:28] ["Release Version: 3.0.2"]
func parseLogItem(s string) (*pb.LogMessage, error) {
timeLeftBound := strings.Index(s, "[")
timeRightBound := strings.Index(s, "]")
if timeLeftBound == -1 || timeRightBound == -1 || timeLeftBound > timeRightBound {
return nil, fmt.Errorf("invalid log string: %s", s)
}
time, err := parseTimeStamp(s[timeLeftBound+1 : timeRightBound])
if err != nil {
return nil, err
}
levelLeftBound := strings.Index(s[timeRightBound+1:], "[")
levelRightBound := strings.Index(s[timeRightBound+1:], "]")
if levelLeftBound == -1 || levelRightBound == -1 || levelLeftBound > levelRightBound {
return nil, fmt.Errorf("invalid log
|
{
return "", err
}
|
conditional_block
|
search_log.go
|
)
filePrefix := logFilePath[:len(logFilePath)-len(ext)]
files, err := ioutil.ReadDir(logDir)
if err != nil {
return nil, err
}
walkFn := func(path string, info os.FileInfo) error {
if info.IsDir() {
return nil
}
// All rotated log files have the same prefix and extension with the original file
if !strings.HasPrefix(path, filePrefix) {
return nil
}
if !strings.HasSuffix(path, ext) {
return nil
}
if isCtxDone(ctx) {
return ctx.Err()
}
// If we cannot open the file, we skip to search the file instead of returning
// error and abort entire searching task.
// TODO: do we need to return some warning to client?
file, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm)
if err != nil {
return nil
}
reader := bufio.NewReader(file)
firstItem, err := readFirstValidLog(ctx, reader, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
lastItem, err := readLastValidLog(ctx, file, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
// Reset position to the start and skip this file if cannot seek to start
if _, err := file.Seek(0, io.SeekStart); err != nil {
skipFiles = append(skipFiles, file)
return nil
}
if beginTime > lastItem.Time || endTime < firstItem.Time {
skipFiles = append(skipFiles, file)
} else {
logFiles = append(logFiles, logFile{
file: file,
begin: firstItem.Time,
end: lastItem.Time,
})
}
return nil
}
for _, file := range files {
err := walkFn(filepath.Join(logDir, file.Name()), file)
if err != nil {
return nil, err
}
}
defer func() {
for _, f := range skipFiles {
_ = f.Close()
}
}()
// Sort by start time
sort.Slice(logFiles, func(i, j int) bool {
return logFiles[i].begin < logFiles[j].begin
})
return logFiles, err
}
func isCtxDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
func readFirstValidLog(ctx context.Context, reader *bufio.Reader, tryLines int64) (*pb.LogMessage, error) {
var tried int64
for {
line, err := readLine(reader)
if err != nil {
return nil, err
}
item, err := parseLogItem(line)
if err == nil {
return item, nil
}
tried++
if tried >= tryLines {
break
}
if isCtxDone(ctx) {
return nil, ctx.Err()
}
}
return nil, errors.New("not a valid log file")
}
func readLastValidLog(ctx context.Context, file *os.File, tryLines int) (*pb.LogMessage, error) {
var tried int
stat, _ := file.Stat()
endCursor := stat.Size()
for {
lines, readBytes, err := readLastLines(ctx, file, endCursor)
if err != nil {
return nil, err
}
// read out the file
if readBytes == 0 {
break
}
endCursor -= int64(readBytes)
for i := len(lines) - 1; i >= 0; i-- {
item, err := parseLogItem(lines[i])
if err == nil {
return item, nil
}
}
tried += len(lines)
if tried >= tryLines {
break
}
}
return nil, errors.New("not a valid log file")
}
// Read a line from a reader.
func readLine(reader *bufio.Reader) (string, error) {
var line, b []byte
var err error
isPrefix := true
for isPrefix {
b, isPrefix, err = reader.ReadLine()
line = append(line, b...)
if err != nil {
return "", err
}
}
return string(line), nil
}
const maxReadCacheSize = 1024 * 1024 * 16
// Read lines from the end of a file
// endCursor initial value should be the file size
func
|
(ctx context.Context, file *os.File, endCursor int64) ([]string, int, error) {
var lines []byte
var firstNonNewlinePos int
var cursor = endCursor
var size int64 = 256
for {
// stop if we are at the begining
// check it in the start to avoid read beyond the size
if cursor <= 0 {
break
}
// enlarge the read cache to avoid too many memory move.
size = size * 2
if size > maxReadCacheSize {
size = maxReadCacheSize
}
if cursor < size {
size = cursor
}
cursor -= size
_, err := file.Seek(cursor, io.SeekStart)
if err != nil {
return nil, 0, ctx.Err()
}
chars := make([]byte, size)
_, err = file.Read(chars)
if err != nil {
return nil, 0, ctx.Err()
}
lines = append(chars, lines...)
// find first '\n' or '\r'
for i := 0; i < len(chars)-1; i++ {
// reach the line end
// the first newline may be in the line end at the first round
if i >= len(lines)-1 {
break
}
if (chars[i] == 10 || chars[i] == 13) && chars[i+1] != 10 && chars[i+1] != 13 {
firstNonNewlinePos = i + 1
break
}
}
if firstNonNewlinePos > 0 {
break
}
if isCtxDone(ctx) {
return nil, 0, ctx.Err()
}
}
finalStr := string(lines[firstNonNewlinePos:])
return strings.Split(strings.ReplaceAll(finalStr, "\r\n", "\n"), "\n"), len(finalStr), nil
}
// ParseLogLevel returns LogLevel from string and return LogLevel_Info if
// the string is an invalid level string
func ParseLogLevel(s string) pb.LogLevel {
switch s {
case "debug", "DEBUG":
return pb.LogLevel_Debug
case "info", "INFO":
return pb.LogLevel_Info
case "warn", "WARN":
return pb.LogLevel_Warn
case "trace", "TRACE":
return pb.LogLevel_Trace
case "critical", "CRITICAL":
return pb.LogLevel_Critical
case "error", "ERROR":
return pb.LogLevel_Error
default:
return pb.LogLevel_UNKNOWN
}
}
// parses single log line and returns:
// 1. the timesteamp in unix milliseconds
// 2. the log level
// 3. the log item content
//
// [2019/08/26 06:19:13.011 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v2.1.14]...
// [2019/08/26 07:19:49.529 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v3.0.2]...
// [2019/08/21 01:43:01.460 -04:00] [INFO] [util.go:60] [PD] [release-version=v3.0.2]
// [2019/08/26 07:20:23.815 -04:00] [INFO] [mod.rs:28] ["Release Version: 3.0.2"]
func parseLogItem(s string) (*pb.LogMessage, error) {
timeLeftBound := strings.Index(s, "[")
timeRightBound := strings.Index(s, "]")
if timeLeftBound == -1 || timeRightBound == -1 || timeLeftBound > timeRightBound {
return nil, fmt.Errorf("invalid log string: %s", s)
}
time, err := parseTimeStamp(s[timeLeftBound+1 : timeRightBound])
if err != nil {
return nil, err
}
levelLeftBound := strings.Index(s[timeRightBound+1:], "[")
levelRightBound := strings.Index(s[timeRightBound+1:], "]")
if levelLeftBound == -1 || levelRightBound == -1 || levelLeftBound > levelRightBound {
return nil, fmt.Errorf("invalid log string
|
readLastLines
|
identifier_name
|
lisp.go
|
_iid) {
iid := a.instance_id
if (iid == 0xffffff) { iid = -1 }
return(fmt.Sprintf("[%d]%s", iid, a.address_string))
}
return(a.address_string)
}
//
// lisp_store_address
//
// Store and instance-ID and string representation of an IPv4 or IPv6 address
// and store in Lisp_address format.
//
func (a *Lisp_address) lisp_store_address(iid int, addr string) bool {
var address string
//
// Is this address string an address or a prefix?
//
if (strings.Contains(addr, "/")) {
split := strings.Split(addr, "/")
address = split[0]
a.mask_len, _ = strconv.Atoi(split[1])
} else {
address = addr
a.mask_len = -1
}
a.instance_id = iid
//
// Parse address string. ParseIP() will put IPv4 addresses in a 16-byte
// array. We don't want that because address []byte length will determine
// address family.
//
a.address = net.ParseIP(address)
if (strings.Contains(addr, ".")) {
a.address = a.address[12:16]
}
//
// Set mask-length and mask address.
//
if (a.mask_len == -1) {
a.mask_len = len(a.address) * 8
}
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
//
// Store string for printing.
//
a.address_string = addr
return(true)
}
//
// lisp_is_ipv4
//
// Return true if Lisp_address is IPv4.
//
func (a *Lisp_address) lisp_is_ipv4() bool {
return((len(a.address) == 4))
}
//
// lisp_is_ipv6
//
// Return true if Lisp_address is IPv6.
//
func (a *Lisp_address) lisp_is_ipv6() bool {
return((len(a.address) == 16))
}
//
// lisp_is_multicast
//
// Return true if Lisp_address is an IPv4 or IPv6 multicast group address.
//
func (a *Lisp_address) lisp_is_multicast() bool {
if (a.lisp_is_ipv4()) {
return(int(a.address[0]) >= 224 && int(a.address[0]) < 240)
}
if (a.lisp_is_ipv6()) {
return(a.address[0] == 0xff)
}
return(false)
}
//
// lisp_make_address
//
// Store and instance-ID and byte representation of an IPv4 or IPv6 address
// and store in Lisp_address format. Note that Lisp_address.address_string
// is created when it is needed (in Lisp_address.lisp_print_address()).
//
func (a *Lisp_address) lisp_make_address(iid int, addr []byte) {
a.instance_id = iid
a.address = addr
a.mask_len = len(a.address) * 8
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
}
//
// lisp_exact_match
//
// Compare two addresses and return true if they match.
//
func (a *Lisp_address) lisp_exact_match(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.mask_len != addr.mask_len) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.address.Equal(addr.address) == false) {
return(false)
}
return(true)
}
//
// lisp_more_specific
//
// Return true if the supplied address is more specific than the method
// address. If the mask-lengths are the same, a true is returned.
//
func (a *Lisp_address) lisp_more_specific(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.mask_len > addr.mask_len) {
return(false)
}
for i := 0; i < len(a.address); i++ {
if (a.mask_address[i] == 0) {
break
}
if ((a.address[i] & a.mask_address[i]) !=
(addr.address[i] & a.mask_address[i])) {
return(false)
}
}
return(true)
}
//
// lisp_hash_address
//
// Hash address to aid in selecting a source UDP port.
//
func (a *Lisp_address) lisp_hash_address() uint16 {
var hash uint = 0
for i := 0; i < len(a.address); i++ {
hash = hash ^ uint(a.address[i])
}
//
// Fold result into a short.
//
return(uint16(hash >> 16) ^ uint16(hash & 0xffff))
}
type Lisp_database struct {
eid_prefix Lisp_address
}
type Lisp_interface struct {
instance_id int
}
type Lisp_map_cache struct {
next_mc *Lisp_map_cache
eid_prefix Lisp_address
rloc_set []Lisp_rloc
rle_set []Lisp_rloc
}
type Lisp_rloc struct {
rloc Lisp_address
encap_port int
stats Lisp_stats
keys [4]*Lisp_keys
use_key_id int
}
type Lisp_keys struct {
crypto_key string
icv_key string
iv []byte
crypto_alg cipher.AEAD
hash_alg hash.Hash
}
type Lisp_stats struct {
packets uint64
bytes uint64
last_packet time.Time
}
|
//
// lisp_count
//
// Increment stats counters. Either do it for an RLOC/RLE entry or for the
// lisp_decap_stats map. Argument 'key-name' needs to be set if stats is nil.
//
func lisp_count(stats *Lisp_stats, key_name string, packet []byte) {
if (stats == nil) {
s, ok := lisp_decap_stats[key_name]
if (!ok) {
s = new(Lisp_stats)
lisp_decap_stats[key_name] = s
}
s.packets += 1
s.bytes += uint64(len(packet))
s.last_packet = time.Now()
} else {
stats.packets += 1
stats.bytes += uint64(len(packet))
stats.last_packet = time.Now()
}
}
//
// lisp_find_rloc
//
// Find RLOC entry in map-cache entry based on supplied RLOC address.
//
func (mc *Lisp_map_cache) lisp_find_rloc(rloc_addr Lisp_address) (*Lisp_rloc) {
for _, rloc := range mc.rloc_set {
if (rloc_addr.lisp_exact_match(rloc.rloc)) { return(&rloc) }
}
return(nil)
}
//
// lprint
//
// Print control-plane debug logging output when configured.
//
func lprint(format string, args ...interface{}) {
if (!lisp_debug_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// dprint
//
// Print data-plane debug logging output when configured.
//
func dprint(format string, args ...interface{}) {
if (!lisp_data_plane_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// debug
//
// For temporary debug output that highlights line in boldface red.
//
func debug(format string, args ...interface{}) {
f := red(">>>") + format + red("<<<") + "\n"
fmt.Printf(f, args...)
}
//
// debugv
//
// For temporary debug output that shows the contents of a data structure.
// Very useful for debugging.
//
func debugv(args interface{}) {
debug("%#v", args)
}
//
// lisp_command_output
//
// Execute a system command and return a string with output.
//
func lisp_command_output(command string) string {
cmd := exec.Command(command)
out, err := cmd.CombinedOutput()
if (err != nil) {
return("")
}
output := string(out)
return(output[0:len(output)-1])
}
//
// lisp_read_file
//
// Read entire file into a string.
//
func lisp_read_file(filename string) string {
fd, err := os.Open(filename)
if (err != nil) {
return("")
}
scanner := bufio.NewScanner(fd)
scanner.Scan()
fd.Close()
return(scanner.Text())
}
//
// lisp_write_file
//
// Write
|
random_line_split
|
|
lisp.go
|
_iid) {
iid := a.instance_id
if (iid == 0xffffff) { iid = -1 }
return(fmt.Sprintf("[%d]%s", iid, a.address_string))
}
return(a.address_string)
}
//
// lisp_store_address
//
// Store and instance-ID and string representation of an IPv4 or IPv6 address
// and store in Lisp_address format.
//
func (a *Lisp_address) lisp_store_address(iid int, addr string) bool {
var address string
//
// Is this address string an address or a prefix?
//
if (strings.Contains(addr, "/")) {
split := strings.Split(addr, "/")
address = split[0]
a.mask_len, _ = strconv.Atoi(split[1])
} else {
address = addr
a.mask_len = -1
}
a.instance_id = iid
//
// Parse address string. ParseIP() will put IPv4 addresses in a 16-byte
// array. We don't want that because address []byte length will determine
// address family.
//
a.address = net.ParseIP(address)
if (strings.Contains(addr, ".")) {
a.address = a.address[12:16]
}
//
// Set mask-length and mask address.
//
if (a.mask_len == -1) {
a.mask_len = len(a.address) * 8
}
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
//
// Store string for printing.
//
a.address_string = addr
return(true)
}
//
// lisp_is_ipv4
//
// Return true if Lisp_address is IPv4.
//
func (a *Lisp_address) lisp_is_ipv4() bool {
return((len(a.address) == 4))
}
//
// lisp_is_ipv6
//
// Return true if Lisp_address is IPv6.
//
func (a *Lisp_address) lisp_is_ipv6() bool {
return((len(a.address) == 16))
}
//
// lisp_is_multicast
//
// Return true if Lisp_address is an IPv4 or IPv6 multicast group address.
//
func (a *Lisp_address) lisp_is_multicast() bool {
if (a.lisp_is_ipv4()) {
return(int(a.address[0]) >= 224 && int(a.address[0]) < 240)
}
if (a.lisp_is_ipv6()) {
return(a.address[0] == 0xff)
}
return(false)
}
//
// lisp_make_address
//
// Store and instance-ID and byte representation of an IPv4 or IPv6 address
// and store in Lisp_address format. Note that Lisp_address.address_string
// is created when it is needed (in Lisp_address.lisp_print_address()).
//
func (a *Lisp_address) lisp_make_address(iid int, addr []byte) {
a.instance_id = iid
a.address = addr
a.mask_len = len(a.address) * 8
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
}
//
// lisp_exact_match
//
// Compare two addresses and return true if they match.
//
func (a *Lisp_address) lisp_exact_match(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.mask_len != addr.mask_len)
|
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.address.Equal(addr.address) == false) {
return(false)
}
return(true)
}
//
// lisp_more_specific
//
// Return true if the supplied address is more specific than the method
// address. If the mask-lengths are the same, a true is returned.
//
func (a *Lisp_address) lisp_more_specific(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.mask_len > addr.mask_len) {
return(false)
}
for i := 0; i < len(a.address); i++ {
if (a.mask_address[i] == 0) {
break
}
if ((a.address[i] & a.mask_address[i]) !=
(addr.address[i] & a.mask_address[i])) {
return(false)
}
}
return(true)
}
//
// lisp_hash_address
//
// Hash address to aid in selecting a source UDP port.
//
func (a *Lisp_address) lisp_hash_address() uint16 {
var hash uint = 0
for i := 0; i < len(a.address); i++ {
hash = hash ^ uint(a.address[i])
}
//
// Fold result into a short.
//
return(uint16(hash >> 16) ^ uint16(hash & 0xffff))
}
type Lisp_database struct {
eid_prefix Lisp_address
}
type Lisp_interface struct {
instance_id int
}
type Lisp_map_cache struct {
next_mc *Lisp_map_cache
eid_prefix Lisp_address
rloc_set []Lisp_rloc
rle_set []Lisp_rloc
}
type Lisp_rloc struct {
rloc Lisp_address
encap_port int
stats Lisp_stats
keys [4]*Lisp_keys
use_key_id int
}
type Lisp_keys struct {
crypto_key string
icv_key string
iv []byte
crypto_alg cipher.AEAD
hash_alg hash.Hash
}
type Lisp_stats struct {
packets uint64
bytes uint64
last_packet time.Time
}
//
// lisp_count
//
// Increment stats counters. Either do it for an RLOC/RLE entry or for the
// lisp_decap_stats map. Argument 'key-name' needs to be set if stats is nil.
//
func lisp_count(stats *Lisp_stats, key_name string, packet []byte) {
if (stats == nil) {
s, ok := lisp_decap_stats[key_name]
if (!ok) {
s = new(Lisp_stats)
lisp_decap_stats[key_name] = s
}
s.packets += 1
s.bytes += uint64(len(packet))
s.last_packet = time.Now()
} else {
stats.packets += 1
stats.bytes += uint64(len(packet))
stats.last_packet = time.Now()
}
}
//
// lisp_find_rloc
//
// Find RLOC entry in map-cache entry based on supplied RLOC address.
//
func (mc *Lisp_map_cache) lisp_find_rloc(rloc_addr Lisp_address) (*Lisp_rloc) {
for _, rloc := range mc.rloc_set {
if (rloc_addr.lisp_exact_match(rloc.rloc)) { return(&rloc) }
}
return(nil)
}
//
// lprint
//
// Print control-plane debug logging output when configured.
//
func lprint(format string, args ...interface{}) {
if (!lisp_debug_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// dprint
//
// Print data-plane debug logging output when configured.
//
func dprint(format string, args ...interface{}) {
if (!lisp_data_plane_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// debug
//
// For temporary debug output that highlights line in boldface red.
//
func debug(format string, args ...interface{}) {
f := red(">>>") + format + red("<<<") + "\n"
fmt.Printf(f, args...)
}
//
// debugv
//
// For temporary debug output that shows the contents of a data structure.
// Very useful for debugging.
//
func debugv(args interface{}) {
debug("%#v", args)
}
//
// lisp_command_output
//
// Execute a system command and return a string with output.
//
func lisp_command_output(command string) string {
cmd := exec.Command(command)
out, err := cmd.CombinedOutput()
if (err != nil) {
return("")
}
output := string(out)
return(output[0:len(output)-1])
}
//
// lisp_read_file
//
// Read entire file into a string.
//
func lisp_read_file(filename string) string {
fd, err := os.Open(filename)
if (err != nil) {
return("")
}
scanner := bufio.NewScanner(fd)
scanner.Scan()
fd.Close()
return(scanner.Text())
}
//
// lisp_write_file
//
//
|
{
return(false)
}
|
conditional_block
|
lisp.go
|
with_iid) {
iid := a.instance_id
if (iid == 0xffffff) { iid = -1 }
return(fmt.Sprintf("[%d]%s", iid, a.address_string))
}
return(a.address_string)
}
//
// lisp_store_address
//
// Store and instance-ID and string representation of an IPv4 or IPv6 address
// and store in Lisp_address format.
//
func (a *Lisp_address) lisp_store_address(iid int, addr string) bool {
var address string
//
// Is this address string an address or a prefix?
//
if (strings.Contains(addr, "/")) {
split := strings.Split(addr, "/")
address = split[0]
a.mask_len, _ = strconv.Atoi(split[1])
} else {
address = addr
a.mask_len = -1
}
a.instance_id = iid
//
// Parse address string. ParseIP() will put IPv4 addresses in a 16-byte
// array. We don't want that because address []byte length will determine
// address family.
//
a.address = net.ParseIP(address)
if (strings.Contains(addr, ".")) {
a.address = a.address[12:16]
}
//
// Set mask-length and mask address.
//
if (a.mask_len == -1) {
a.mask_len = len(a.address) * 8
}
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
//
// Store string for printing.
//
a.address_string = addr
return(true)
}
//
// lisp_is_ipv4
//
// Return true if Lisp_address is IPv4.
//
func (a *Lisp_address) lisp_is_ipv4() bool {
return((len(a.address) == 4))
}
//
// lisp_is_ipv6
//
// Return true if Lisp_address is IPv6.
//
func (a *Lisp_address) lisp_is_ipv6() bool {
return((len(a.address) == 16))
}
//
// lisp_is_multicast
//
// Return true if Lisp_address is an IPv4 or IPv6 multicast group address.
//
func (a *Lisp_address) lisp_is_multicast() bool {
if (a.lisp_is_ipv4()) {
return(int(a.address[0]) >= 224 && int(a.address[0]) < 240)
}
if (a.lisp_is_ipv6()) {
return(a.address[0] == 0xff)
}
return(false)
}
//
// lisp_make_address
//
// Store and instance-ID and byte representation of an IPv4 or IPv6 address
// and store in Lisp_address format. Note that Lisp_address.address_string
// is created when it is needed (in Lisp_address.lisp_print_address()).
//
func (a *Lisp_address) lisp_make_address(iid int, addr []byte) {
a.instance_id = iid
a.address = addr
a.mask_len = len(a.address) * 8
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
}
//
// lisp_exact_match
//
// Compare two addresses and return true if they match.
//
func (a *Lisp_address) lisp_exact_match(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.mask_len != addr.mask_len) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.address.Equal(addr.address) == false) {
return(false)
}
return(true)
}
//
// lisp_more_specific
//
// Return true if the supplied address is more specific than the method
// address. If the mask-lengths are the same, a true is returned.
//
func (a *Lisp_address) lisp_more_specific(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.mask_len > addr.mask_len) {
return(false)
}
for i := 0; i < len(a.address); i++ {
if (a.mask_address[i] == 0) {
break
}
if ((a.address[i] & a.mask_address[i]) !=
(addr.address[i] & a.mask_address[i])) {
return(false)
}
}
return(true)
}
//
// lisp_hash_address
//
// Hash address to aid in selecting a source UDP port.
//
func (a *Lisp_address) lisp_hash_address() uint16 {
var hash uint = 0
for i := 0; i < len(a.address); i++ {
hash = hash ^ uint(a.address[i])
}
//
// Fold result into a short.
//
return(uint16(hash >> 16) ^ uint16(hash & 0xffff))
}
type Lisp_database struct {
eid_prefix Lisp_address
}
type Lisp_interface struct {
instance_id int
}
type Lisp_map_cache struct {
next_mc *Lisp_map_cache
eid_prefix Lisp_address
rloc_set []Lisp_rloc
rle_set []Lisp_rloc
}
type Lisp_rloc struct {
rloc Lisp_address
encap_port int
stats Lisp_stats
keys [4]*Lisp_keys
use_key_id int
}
type Lisp_keys struct {
crypto_key string
icv_key string
iv []byte
crypto_alg cipher.AEAD
hash_alg hash.Hash
}
type Lisp_stats struct {
packets uint64
bytes uint64
last_packet time.Time
}
//
// lisp_count
//
// Increment stats counters. Either do it for an RLOC/RLE entry or for the
// lisp_decap_stats map. Argument 'key-name' needs to be set if stats is nil.
//
func lisp_count(stats *Lisp_stats, key_name string, packet []byte) {
if (stats == nil) {
s, ok := lisp_decap_stats[key_name]
if (!ok) {
s = new(Lisp_stats)
lisp_decap_stats[key_name] = s
}
s.packets += 1
s.bytes += uint64(len(packet))
s.last_packet = time.Now()
} else {
stats.packets += 1
stats.bytes += uint64(len(packet))
stats.last_packet = time.Now()
}
}
//
// lisp_find_rloc
//
// Find RLOC entry in map-cache entry based on supplied RLOC address.
//
func (mc *Lisp_map_cache) lisp_find_rloc(rloc_addr Lisp_address) (*Lisp_rloc) {
for _, rloc := range mc.rloc_set {
if (rloc_addr.lisp_exact_match(rloc.rloc)) { return(&rloc) }
}
return(nil)
}
//
// lprint
//
// Print control-plane debug logging output when configured.
//
func lprint(format string, args ...interface{}) {
if (!lisp_debug_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// dprint
//
// Print data-plane debug logging output when configured.
//
func dprint(format string, args ...interface{}) {
if (!lisp_data_plane_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// debug
//
// For temporary debug output that highlights line in boldface red.
//
func debug(format string, args ...interface{}) {
f := red(">>>") + format + red("<<<") + "\n"
fmt.Printf(f, args...)
}
//
// debugv
//
// For temporary debug output that shows the contents of a data structure.
// Very useful for debugging.
//
func debugv(args interface{}) {
debug("%#v", args)
}
//
// lisp_command_output
//
// Execute a system command and return a string with output.
//
func
|
(command string) string {
cmd := exec.Command(command)
out, err := cmd.CombinedOutput()
if (err != nil) {
return("")
}
output := string(out)
return(output[0:len(output)-1])
}
//
// lisp_read_file
//
// Read entire file into a string.
//
func lisp_read_file(filename string) string {
fd, err := os.Open(filename)
if (err != nil) {
return("")
}
scanner := bufio.NewScanner(fd)
scanner.Scan()
fd.Close()
return(scanner.Text())
}
//
// lisp_write_file
//
//
|
lisp_command_output
|
identifier_name
|
lisp.go
|
_iid) {
iid := a.instance_id
if (iid == 0xffffff) { iid = -1 }
return(fmt.Sprintf("[%d]%s", iid, a.address_string))
}
return(a.address_string)
}
//
// lisp_store_address
//
// Store and instance-ID and string representation of an IPv4 or IPv6 address
// and store in Lisp_address format.
//
func (a *Lisp_address) lisp_store_address(iid int, addr string) bool {
var address string
//
// Is this address string an address or a prefix?
//
if (strings.Contains(addr, "/")) {
split := strings.Split(addr, "/")
address = split[0]
a.mask_len, _ = strconv.Atoi(split[1])
} else {
address = addr
a.mask_len = -1
}
a.instance_id = iid
//
// Parse address string. ParseIP() will put IPv4 addresses in a 16-byte
// array. We don't want that because address []byte length will determine
// address family.
//
a.address = net.ParseIP(address)
if (strings.Contains(addr, ".")) {
a.address = a.address[12:16]
}
//
// Set mask-length and mask address.
//
if (a.mask_len == -1) {
a.mask_len = len(a.address) * 8
}
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
//
// Store string for printing.
//
a.address_string = addr
return(true)
}
//
// lisp_is_ipv4
//
// Return true if Lisp_address is IPv4.
//
func (a *Lisp_address) lisp_is_ipv4() bool
|
//
// lisp_is_ipv6
//
// Return true if Lisp_address is IPv6.
//
func (a *Lisp_address) lisp_is_ipv6() bool {
return((len(a.address) == 16))
}
//
// lisp_is_multicast
//
// Return true if Lisp_address is an IPv4 or IPv6 multicast group address.
//
func (a *Lisp_address) lisp_is_multicast() bool {
if (a.lisp_is_ipv4()) {
return(int(a.address[0]) >= 224 && int(a.address[0]) < 240)
}
if (a.lisp_is_ipv6()) {
return(a.address[0] == 0xff)
}
return(false)
}
//
// lisp_make_address
//
// Store and instance-ID and byte representation of an IPv4 or IPv6 address
// and store in Lisp_address format. Note that Lisp_address.address_string
// is created when it is needed (in Lisp_address.lisp_print_address()).
//
func (a *Lisp_address) lisp_make_address(iid int, addr []byte) {
a.instance_id = iid
a.address = addr
a.mask_len = len(a.address) * 8
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
}
//
// lisp_exact_match
//
// Compare two addresses and return true if they match.
//
func (a *Lisp_address) lisp_exact_match(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.mask_len != addr.mask_len) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.address.Equal(addr.address) == false) {
return(false)
}
return(true)
}
//
// lisp_more_specific
//
// Return true if the supplied address is more specific than the method
// address. If the mask-lengths are the same, a true is returned.
//
func (a *Lisp_address) lisp_more_specific(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.mask_len > addr.mask_len) {
return(false)
}
for i := 0; i < len(a.address); i++ {
if (a.mask_address[i] == 0) {
break
}
if ((a.address[i] & a.mask_address[i]) !=
(addr.address[i] & a.mask_address[i])) {
return(false)
}
}
return(true)
}
//
// lisp_hash_address
//
// Hash address to aid in selecting a source UDP port.
//
func (a *Lisp_address) lisp_hash_address() uint16 {
var hash uint = 0
for i := 0; i < len(a.address); i++ {
hash = hash ^ uint(a.address[i])
}
//
// Fold result into a short.
//
return(uint16(hash >> 16) ^ uint16(hash & 0xffff))
}
type Lisp_database struct {
eid_prefix Lisp_address
}
type Lisp_interface struct {
instance_id int
}
type Lisp_map_cache struct {
next_mc *Lisp_map_cache
eid_prefix Lisp_address
rloc_set []Lisp_rloc
rle_set []Lisp_rloc
}
type Lisp_rloc struct {
rloc Lisp_address
encap_port int
stats Lisp_stats
keys [4]*Lisp_keys
use_key_id int
}
type Lisp_keys struct {
crypto_key string
icv_key string
iv []byte
crypto_alg cipher.AEAD
hash_alg hash.Hash
}
type Lisp_stats struct {
packets uint64
bytes uint64
last_packet time.Time
}
//
// lisp_count
//
// Increment stats counters. Either do it for an RLOC/RLE entry or for the
// lisp_decap_stats map. Argument 'key-name' needs to be set if stats is nil.
//
func lisp_count(stats *Lisp_stats, key_name string, packet []byte) {
if (stats == nil) {
s, ok := lisp_decap_stats[key_name]
if (!ok) {
s = new(Lisp_stats)
lisp_decap_stats[key_name] = s
}
s.packets += 1
s.bytes += uint64(len(packet))
s.last_packet = time.Now()
} else {
stats.packets += 1
stats.bytes += uint64(len(packet))
stats.last_packet = time.Now()
}
}
//
// lisp_find_rloc
//
// Find RLOC entry in map-cache entry based on supplied RLOC address.
//
func (mc *Lisp_map_cache) lisp_find_rloc(rloc_addr Lisp_address) (*Lisp_rloc) {
for _, rloc := range mc.rloc_set {
if (rloc_addr.lisp_exact_match(rloc.rloc)) { return(&rloc) }
}
return(nil)
}
//
// lprint
//
// Print control-plane debug logging output when configured.
//
func lprint(format string, args ...interface{}) {
if (!lisp_debug_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// dprint
//
// Print data-plane debug logging output when configured.
//
func dprint(format string, args ...interface{}) {
if (!lisp_data_plane_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// debug
//
// For temporary debug output that highlights line in boldface red.
//
func debug(format string, args ...interface{}) {
f := red(">>>") + format + red("<<<") + "\n"
fmt.Printf(f, args...)
}
//
// debugv
//
// For temporary debug output that shows the contents of a data structure.
// Very useful for debugging.
//
func debugv(args interface{}) {
debug("%#v", args)
}
//
// lisp_command_output
//
// Execute a system command and return a string with output.
//
func lisp_command_output(command string) string {
cmd := exec.Command(command)
out, err := cmd.CombinedOutput()
if (err != nil) {
return("")
}
output := string(out)
return(output[0:len(output)-1])
}
//
// lisp_read_file
//
// Read entire file into a string.
//
func lisp_read_file(filename string) string {
fd, err := os.Open(filename)
if (err != nil) {
return("")
}
scanner := bufio.NewScanner(fd)
scanner.Scan()
fd.Close()
return(scanner.Text())
}
//
// lisp_write_file
//
//
|
{
return((len(a.address) == 4))
}
|
identifier_body
|
FastSlamV2.py
|
o)
def get_weight(self):
return self.weight
def normalize_weight(self, total_weight):
self.weight = self.weight / total_weight
def set_weight(self, new_weight):
self.weight = new_weight
def get_position(self):
return self.X_robot
def get_landmarkers(self):
return self.Landmarkers
def get_path(self):
return self.x_path, self.y_path
def copy(self, updated_marker):
new_particle = Particle()
del new_particle.x_path
del new_particle.y_path
del new_particle.Landmarkers
del new_particle.X_robot
new_particle.x_path = np.copy(self.x_path)
new_particle.y_path = np.copy(self.y_path)
for i in range(len(self.Landmarkers)):
if self.Landmarkers[i] != None and updated_marker[i] == True:
self.Landmarkers[i] = self.Landmarkers[i].copy()
new_particle.Landmarkers = self.Landmarkers
new_particle.X_robot = np.copy(self.X_robot)
return new_particle
class KalmanFilter():
def __init__(self):
# X_ espected value of landmarks' position (x,y)
# X_robot (x, y, yaw)
# H gradient of markers' relative position to robot (h:(x_m,y_m) -> (distance, orientation); H = dh/dX_ and X_ = X(k+1|k))
# S covariance matrix markers' position
# Q covariance matrix markers' measurement
# V diference between measurement and estimated markers' position
self.first = True
self.R_t = np.identity(number_of_dimensions, dtype='float64')*Sensor_noise #sensor noise
self.P_t = np.identity(number_of_dimensions+1, dtype='float64')*Odom_noise #sensor noise
def compute_G(self, X_robot):
x = 0 # x position
y = 1 # y position
y = self.X_[y] - X_robot[y]
x = self.X_[x] - X_robot[x]
# compute H
denominator = x**2 + y**2
g_o11 = x / np.sqrt(denominator)
g_o12 = y / np.sqrt(denominator)
g_o21 = -y / denominator
g_o22 = x / denominator
self.G_o = np.array([[g_o11, g_o12], [g_o21, g_o22]])
g_s11 = -g_o11
g_s12 = -g_o12
g_s21 = -g_o21
g_s22 = -g_o22
self.G_s = np.array([[g_s21, g_s22, 0], [g_s21, g_s22, -1]])
def Apply_EKF(self, X_robot, Z):
x = 0 # x position
y = 1 # y position
o = 2 # o orientaio
d = 0 # distance measured
fi = 1 # orientaion of the measurement
if self.first == True:
# the angle is in the direction y to x, reverse of the usual x to y
angle = (X_robot[o] + Z[fi])
self.X_ = np.array([X_robot[x] + Z[d]*np.cos(angle), X_robot[y] + Z[d]*np.sin(angle)], dtype='float64').transpose() # first landmark position
self.compute_G(X_robot)
self.S = np.linalg.inv(self.G_o.dot(np.linalg.inv(self.R_t).dot(self.G_o.T)))
self.V = np.array([0, 0], dtype='float64').transpose()
self.L_t = np.identity(number_of_dimensions, dtype='float64')
else:
# Prediction
y = self.X_[y] - X_robot[y]
x = self.X_[x] - X_robot[x]
d = np.sqrt(x**2 + y**2) # distance
fi = np.arctan2(y, x) - X_robot[o] # direction
while fi > np.pi:
fi = fi - 2*np.pi
while fi < -np.pi:
fi = fi + 2*np.pi
Z_ = np.array([d, fi], dtype='float64').transpose()
self.compute_G(X_robot)
self.Q = self.G_o.dot(self.S.dot(self.G_o.T)) + self.R_t
# Observation
self.V = np.subtract(Z, Z_) # Z = [d, teta]
def Update(self):
# Update
if self.first == False:
# K kalman gain
K = self.S.dot(self.G_o.T.dot(np.linalg.inv(self.Q)))
self.X_ = self.X_ + K.dot(self.V)
self.L_t = self.G_s.dot(self.P_t.dot(self.G_s.T)) + self.G_o.dot(self.S.dot(self.G_o.T)) + self.R_t
self.S = (np.identity(number_of_dimensions)- K.dot(self.G_o)).dot(self.S)
else:
self.first = False
def get_marker_position(self):
return self.X_
def get_marker_covariance(self):
return self.L_t
def get_marker_validity(self):
return self.V
def measurement_validition(self):
return np.dot(self.V.T, np.linalg.inv(self.L_t)).dot(self.V)
def copy(self):
new_KF = KalmanFilter()
new_KF.X_ = np.copy(self.X_)
new_KF.S = np.copy(self.S)
new_KF.L_t = np.copy(self.L_t)
new_KF.V = np.copy(self.V)
new_KF.first = False
return new_KF
class markers():
def __init__(self):
self.could_it_read = False
self.z_distance_left_eye_to_robot_wheel = camara_distance_z
self.x_distance_left_eye_to_robot_wheel = camara_distance_x
self.markers_info = [None]*NUMBER_MARKERS
self.list_ids = np.ones(NUMBER_MARKERS, dtype='int32')*KEY_NUMBER
def callback_Markers(self, data):
# static tf could be applied here: z = z + z_distance_left_eye_to_robot_wheel, x = x + x_distance_left_eye_to_robot_wheel
for i in range(NUMBER_MARKERS):
try:
marker_info = data.markers.pop()
except:
break
self.list_ids[i] = marker_info.id
self.markers_info[marker_info.id] = marker_info
def get_measerment(self, index):
x = self.markers_info[index].pose.pose.position.x # right-left
z = self.markers_info[index].pose.pose.position.z # front-back
# position of the marker relative to base_link
z = z + self.z_distance_left_eye_to_robot_wheel
x = x + self.x_distance_left_eye_to_robot_wheel
marker_distance = np.sqrt(z**2+x**2)
marker_direction = np.arctan(x/z)
return np.array([marker_distance, -marker_direction], dtype='float64').transpose()
def get_list_ids(self):
return self.list_ids
def reset_list_ids(self):
i = 0
while self.list_ids[i] != KEY_NUMBER:
self.list_ids[i] = KEY_NUMBER
i += 1
def marker_info(self, index):
return self.markers_info[index]
class odom():
def __init__(self):
self.read_move = np.array([0, 0, 0], dtype='float64').transpose()
self.first_read = True
def callback_odom(self, data):
# robo_frame
frame_id = data.header.frame_id # odom
child_frame_id = data.child_frame_id # base_link
# pose
x = data.pose.pose.position.x # front-back
y = data.pose.pose.position.y # right-left
orientation_x = data.pose.pose.orientation.x
orientation_y = data.pose.pose.orientation.y
orientation_z = data.pose.pose.orientation.z
orientation_w = data.pose.pose.orientation.w
roll, pitch, yaw = tf.transformations.euler_from_quaternion((orientation_x, orientation_y, orientation_z, orientation_w))
if self.first_read == True:
self.last_position = np.array([x, y, yaw], dtype='float64').transpose()
self.total_movement = np.array([0, 0, 0], dtype='float64').transpose()
self.first_read = False
self.odom_position = np.array([x, y, yaw], dtype='float64').transpose()
self.movement = np.subtract(self.odom_position, self.last_position)
self.total_movement = np.add(self.total_movement, np.absolute(self.movement))
if self.movement[2] > np.pi:
self.movement[2] = 2*np.pi - self.movement[2]
if self.movement[2] < -np.pi:
self.movement[2] = - 2*np.pi - self.movement[2]
self.last_position = self.odom_position
self.read_move = np.add(self.read_move, self.movement)
def
|
actual_movement
|
identifier_name
|
|
FastSlamV2.py
|
(self, marker_id, Z):
if self.Landmarkers[marker_id] == None:
self.Landmarkers[marker_id] = KalmanFilter()
return self.Landmarkers[marker_id]
def particle_prediction(self, motion_model):
#if the robot moves we just add the motion model to the previous pose to predict the particle position
x = 0
y = 1
o = 2
noise = np.array([np.random.normal(0,translation_noise), np.random.normal(0,translation_noise), np.random.normal(0,rotation_noise)], dtype='float64').transpose()
noise = noise*motion_model*noise_factor
self.X_robot = self.X_robot + motion_model + noise
while self.X_robot[o] > np.pi:
self.X_robot[o] = self.X_robot[o] - 2*np.pi
while self.X_robot[o] < -np.pi:
self.X_robot[o] = self.X_robot[o] + 2*np.pi
self.x_path = np.insert(self.x_path, 0, self.X_robot[x])
self.y_path = np.insert(self.y_path, 0, self.X_robot[y])
return self.X_robot
def update_weight(self, marker_id):
std = self.Landmarkers[marker_id].get_marker_covariance()
dev = self.Landmarkers[marker_id].get_marker_validity()
fact = np.sqrt(np.linalg.det(2* np.pi * std))
expo = - np.dot(dev.T, np.linalg.inv(std).dot(dev))/2
self.weight = self.weight / fact * np.exp(expo)
def get_weight(self):
return self.weight
def normalize_weight(self, total_weight):
self.weight = self.weight / total_weight
def set_weight(self, new_weight):
self.weight = new_weight
def get_position(self):
return self.X_robot
def get_landmarkers(self):
return self.Landmarkers
def get_path(self):
return self.x_path, self.y_path
def copy(self, updated_marker):
new_particle = Particle()
del new_particle.x_path
del new_particle.y_path
del new_particle.Landmarkers
del new_particle.X_robot
new_particle.x_path = np.copy(self.x_path)
new_particle.y_path = np.copy(self.y_path)
for i in range(len(self.Landmarkers)):
if self.Landmarkers[i] != None and updated_marker[i] == True:
self.Landmarkers[i] = self.Landmarkers[i].copy()
new_particle.Landmarkers = self.Landmarkers
new_particle.X_robot = np.copy(self.X_robot)
return new_particle
class KalmanFilter():
def __init__(self):
# X_ espected value of landmarks' position (x,y)
# X_robot (x, y, yaw)
# H gradient of markers' relative position to robot (h:(x_m,y_m) -> (distance, orientation); H = dh/dX_ and X_ = X(k+1|k))
# S covariance matrix markers' position
# Q covariance matrix markers' measurement
# V diference between measurement and estimated markers' position
self.first = True
self.R_t = np.identity(number_of_dimensions, dtype='float64')*Sensor_noise #sensor noise
self.P_t = np.identity(number_of_dimensions+1, dtype='float64')*Odom_noise #sensor noise
def compute_G(self, X_robot):
x = 0 # x position
y = 1 # y position
y = self.X_[y] - X_robot[y]
x = self.X_[x] - X_robot[x]
# compute H
denominator = x**2 + y**2
g_o11 = x / np.sqrt(denominator)
g_o12 = y / np.sqrt(denominator)
g_o21 = -y / denominator
g_o22 = x / denominator
self.G_o = np.array([[g_o11, g_o12], [g_o21, g_o22]])
g_s11 = -g_o11
g_s12 = -g_o12
g_s21 = -g_o21
g_s22 = -g_o22
self.G_s = np.array([[g_s21, g_s22, 0], [g_s21, g_s22, -1]])
def Apply_EKF(self, X_robot, Z):
x = 0 # x position
y = 1 # y position
o = 2 # o orientaio
d = 0 # distance measured
fi = 1 # orientaion of the measurement
if self.first == True:
# the angle is in the direction y to x, reverse of the usual x to y
angle = (X_robot[o] + Z[fi])
self.X_ = np.array([X_robot[x] + Z[d]*np.cos(angle), X_robot[y] + Z[d]*np.sin(angle)], dtype='float64').transpose() # first landmark position
self.compute_G(X_robot)
self.S = np.linalg.inv(self.G_o.dot(np.linalg.inv(self.R_t).dot(self.G_o.T)))
self.V = np.array([0, 0], dtype='float64').transpose()
self.L_t = np.identity(number_of_dimensions, dtype='float64')
else:
# Prediction
y = self.X_[y] - X_robot[y]
x = self.X_[x] - X_robot[x]
d = np.sqrt(x**2 + y**2) # distance
fi = np.arctan2(y, x) - X_robot[o] # direction
while fi > np.pi:
fi = fi - 2*np.pi
while fi < -np.pi:
fi = fi + 2*np.pi
Z_ = np.array([d, fi], dtype='float64').transpose()
self.compute_G(X_robot)
self.Q = self.G_o.dot(self.S.dot(self.G_o.T)) + self.R_t
# Observation
self.V = np.subtract(Z, Z_) # Z = [d, teta]
def Update(self):
# Update
if self.first == False:
# K kalman gain
K = self.S.dot(self.G_o.T.dot(np.linalg.inv(self.Q)))
self.X_ = self.X_ + K.dot(self.V)
self.L_t = self.G_s.dot(self.P_t.dot(self.G_s.T)) + self.G_o.dot(self.S.dot(self.G_o.T)) + self.R_t
self.S = (np.identity(number_of_dimensions)- K.dot(self.G_o)).dot(self.S)
else:
self.first = False
def get_marker_position(self):
return self.X_
def get_marker_covariance(self):
return self.L_t
def get_marker_validity(self):
return self.V
def measurement_validition(self):
return np.dot(self.V.T, np.linalg.inv(self.L_t)).dot(self.V)
def copy(self):
new_KF = KalmanFilter()
new_KF.X_ = np.copy(self.X_)
new_KF.S = np.copy(self.S)
new_KF.L_t = np.copy(self.L_t)
new_KF.V = np.copy(self.V)
new_KF.first = False
return new_KF
class markers():
def __init__(self):
self.could_it_read = False
self.z_distance_left_eye_to_robot_wheel = camara_distance_z
self.x_distance_left_eye_to_robot_wheel = camara_distance_x
self.markers_info = [None]*NUMBER_MARKERS
self.list_ids = np.ones(NUMBER_MARKERS, dtype='int32')*KEY_NUMBER
def callback_Markers(self, data):
# static tf could be applied here: z = z + z_distance_left_eye_to_robot_wheel, x = x + x_distance_left_eye_to_robot_wheel
for i in range(NUMBER_MARKERS):
try:
marker_info = data.markers.pop()
except:
break
self.list_ids[i] = marker_info.id
self.markers_info[marker_info.id] = marker_info
def get_measerment(self, index):
x = self.markers_info[index].pose.pose.position.x # right-left
z = self.markers_info[index].pose.pose.position.z # front-back
# position of the marker relative to base_link
z = z + self.z_distance_left_eye_to_robot_wheel
x = x + self.x_distance_left_eye_to_robot_wheel
marker_distance = np.sqrt(z**2+x**2)
marker_direction = np.arctan(x/z)
return np.array([marker_distance, -marker_direction], dtype='float64').transpose()
def get_list_ids(self):
return self.list_ids
def reset_list_ids(self):
i = 0
while self.list_ids[i] != KEY_NUMBER:
self.list_ids[i] = KEY_NUMBER
i += 1
def marker_info(self, index):
return self.markers_info[index]
class odom():
def __init__(self):
|
self.read_move = np.array([0, 0, 0], dtype='float64').transpose()
self.first_read = True
|
identifier_body
|
|
FastSlamV2.py
|
]=x_map[37]+1.00;
y_map[40]=y_map[37]-1.10;
x_map[39]=x_map[37]+2.65;
y_map[39]=y_map[37]+0.35;
x_map[38]=x_map[39];
y_map[38]=y_map[39]-2.66;
y_map[36]=y_map[34]=y_map[35]=y_map[38];
x_map[36]=x_map[38]-1.46;
x_map[34]=x_map[36]-1.96;
x_map[35]=x_map[34]-1.46;
y_map[32]=y_map[35]+0.28;
x_map[32]=x_map[35]-0.68-0.28;
camara_distance_z = 0.12 # 15.5 cm <-> 13 cm #dia 13/12/2018 <-> 12 cm => 12.5 cm inicio a 81 cm
camara_distance_x = 0.011 # 1.1 cm
# Constants
NUMBER_MARKERS = 41
KEY_NUMBER = 2**(5*5) # number of total combinations possible in aruco code
number_of_dimensions = 2
Frequency = 9.5
NUMBER_PARTICLES = 100
translation_noise = 0.1
rotation_noise = 0.1
noise_factor = 1
minimum_move = 0
Sensor_noise = 0.1
Odom_noise = 0.1
validity_threshold = 50
circle = np.arange(0, 2*np.pi, 0.1)
o_size = 0.3
line = np.arange(0, o_size, o_size)
fig, ax = plt.subplots()
robot_line, = ax.plot([0], [0], color='black', marker='o', markersize=12)
robot_orientation, = ax.plot(line, line, color='lime', marker='.', markersize=2, linewidth=2)
marker_line, = ax.plot(circle, circle, color='red', marker='.', markersize=8, linestyle="")
robot_path, = ax.plot([0], [0], color='black', marker='.', markersize=2, linewidth=0.2)
path_map, = plt.plot(x_map, y_map, color='grey', marker='*', markersize=8, linestyle="")
x_f = [circle]*NUMBER_MARKERS
y_f = [circle]*NUMBER_MARKERS
plt.ion()
plt.xlim(-10, 20)
plt.ylim(-20, 10)
plt.xlabel('X', fontsize=10) # X axis label
plt.ylabel('Y', fontsize=10) # Y axis label
plt.title('FastSlam 2.0')
#plt.legend()
plt.grid(True) # Enabling gridding
def drawing_plot(particles):
Max = 0
Max_id = 0
for i in range(NUMBER_PARTICLES):
if particles[i].get_weight() > Max:
Max = particles[i].get_weight()
Max_id = i
pose = particles[Max_id].get_position()
x = pose[0]
y = pose[1]
o = pose[2]
x_o = x + o_size*np.cos(o)
y_o = y + o_size*np.sin(o)
x_path, y_path = particles[Max_id].get_path()
plt.show(block=False)
robot_path.set_xdata(x_path)
robot_path.set_ydata(y_path)
ax.draw_artist(ax.patch)
ax.draw_artist(robot_path)
robot_line.set_xdata(x)
robot_line.set_ydata(y)
ax.draw_artist(ax.patch)
ax.draw_artist(robot_line)
robot_orientation.set_xdata([x, x_o])
robot_orientation.set_ydata([y, y_o])
ax.draw_artist(ax.patch)
ax.draw_artist(robot_orientation)
Landmarkers = particles[Max_id].get_landmarkers()
i = 0
for marker in Landmarkers:
|
marker_line.set_xdata(x_f)
marker_line.set_ydata(y_f)
ax.draw_artist(ax.patch)
ax.draw_artist(marker_line)
fig.canvas.flush_events()
def resample_particles(particles, updated_marker):
# Returns a new set of particles obtained by performing stochastic universal sampling, according to the particle weights.
# distance between pointers
step = 1.0/NUMBER_PARTICLES
# random start of first pointer
r = np.random.uniform(0,step)
# where we are along the weights
c = particles[0].get_weight()
# index of weight container and corresponding particle
i = 0
index = 0
new_particles = []
#loop over all particle weights
for particle in particles:
#go through the weights until you find the particle
u = r + index*step
while u > c:
i = i + 1
c = c + particles[i].get_weight()
#add that particle
if i == index:
new_particle = particles[i]
new_particle.set_weight(step)
else:
new_particle = particles[i].copy(updated_marker)
#new_particle = copy.deepcopy(particles[i])
#new_particle.set_weight(step)
new_particles.append(new_particle)
#increase the threshold
index += 1
del particles
return new_particles
class Particle():
#each particle has a pose(x,y,o), a weight(w) and a series of kalman filters for every landmark
#in the beggining all particles are in the origin frame of the world (0,0,0)
def __init__(self):
self.X_robot = np.array([0, 0, 0], dtype='float64').transpose()
self.weight = 1.0/NUMBER_PARTICLES
self.Landmarkers = [None]*NUMBER_MARKERS
self.x_path = np.array([0], dtype='float64')
self.y_path = np.array([0], dtype='float64')
def get_kalman_filters(self, marker_id, Z):
if self.Landmarkers[marker_id] == None:
self.Landmarkers[marker_id] = KalmanFilter()
return self.Landmarkers[marker_id]
def particle_prediction(self, motion_model):
#if the robot moves we just add the motion model to the previous pose to predict the particle position
x = 0
y = 1
o = 2
noise = np.array([np.random.normal(0,translation_noise), np.random.normal(0,translation_noise), np.random.normal(0,rotation_noise)], dtype='float64').transpose()
noise = noise*motion_model*noise_factor
self.X_robot = self.X_robot + motion_model + noise
while self.X_robot[o] > np.pi:
self.X_robot[o] = self.X_robot[o] - 2*np.pi
while self.X_robot[o] < -np.pi:
self.X_robot[o] = self.X_robot[o] + 2*np.pi
self.x_path = np.insert(self.x_path, 0, self.X_robot[x])
self.y_path = np.insert(self.y_path, 0, self.X_robot[y])
return self.X_robot
def update_weight(self, marker_id):
std = self.Landmarkers[marker_id].get_marker_covariance()
dev = self.Landmarkers[marker_id].get_marker_validity()
fact = np.sqrt(np.linalg.det(2* np.pi * std))
expo = - np.dot(dev.T, np.linalg.inv(std).dot(dev))/2
self.weight = self.weight / fact * np.exp(expo)
def get_weight(self):
return self.weight
def normalize_weight(self, total_weight):
self.weight = self.weight / total_weight
def set_weight(self, new_weight):
self.weight = new_weight
def get_position(self):
return self.X_robot
def get_landmarkers(self):
return self.Landmarkers
def get_path(self):
return self.x_path, self.y_path
def copy(self, updated_marker):
new_particle = Particle()
del new_particle.x_path
del new_particle.y_path
del new_particle.Landmarkers
del new_particle.X_robot
new_particle.x_path = np.copy(self.x_path)
new_particle.y_path = np.copy(self.y_path)
for i in range(len(self.Landmarkers)):
if self.Landmarkers[i] != None and updated_marker[i] == True:
self.Landmarkers[i] = self.Landmarkers[i].copy()
new_particle.Landmarkers = self.Land
|
if marker == None:
x_f[i] = KEY_NUMBER + circle
y_f[i] = KEY_NUMBER + circle
i += 1
continue
pose_m = marker.get_marker_position()
x_m = pose_m[0]
y_m = pose_m[1]
std_m = marker.get_marker_covariance()
x_std_m = std_m[0][0]
y_std_m = std_m[1][1]
x_f[i] = x_m + x_std_m * np.cos(circle)
y_f[i] = y_m + y_std_m * np.sin(circle)
i += 1
|
conditional_block
|
FastSlamV2.py
|
NUMBER_MARKERS
plt.ion()
plt.xlim(-10, 20)
plt.ylim(-20, 10)
plt.xlabel('X', fontsize=10) # X axis label
plt.ylabel('Y', fontsize=10) # Y axis label
plt.title('FastSlam 2.0')
#plt.legend()
plt.grid(True) # Enabling gridding
def drawing_plot(particles):
Max = 0
Max_id = 0
for i in range(NUMBER_PARTICLES):
if particles[i].get_weight() > Max:
Max = particles[i].get_weight()
Max_id = i
pose = particles[Max_id].get_position()
x = pose[0]
y = pose[1]
o = pose[2]
x_o = x + o_size*np.cos(o)
y_o = y + o_size*np.sin(o)
x_path, y_path = particles[Max_id].get_path()
plt.show(block=False)
robot_path.set_xdata(x_path)
robot_path.set_ydata(y_path)
ax.draw_artist(ax.patch)
ax.draw_artist(robot_path)
robot_line.set_xdata(x)
robot_line.set_ydata(y)
ax.draw_artist(ax.patch)
ax.draw_artist(robot_line)
robot_orientation.set_xdata([x, x_o])
robot_orientation.set_ydata([y, y_o])
ax.draw_artist(ax.patch)
ax.draw_artist(robot_orientation)
Landmarkers = particles[Max_id].get_landmarkers()
i = 0
for marker in Landmarkers:
if marker == None:
x_f[i] = KEY_NUMBER + circle
y_f[i] = KEY_NUMBER + circle
i += 1
continue
pose_m = marker.get_marker_position()
x_m = pose_m[0]
y_m = pose_m[1]
std_m = marker.get_marker_covariance()
x_std_m = std_m[0][0]
y_std_m = std_m[1][1]
x_f[i] = x_m + x_std_m * np.cos(circle)
y_f[i] = y_m + y_std_m * np.sin(circle)
i += 1
marker_line.set_xdata(x_f)
marker_line.set_ydata(y_f)
ax.draw_artist(ax.patch)
ax.draw_artist(marker_line)
fig.canvas.flush_events()
def resample_particles(particles, updated_marker):
# Returns a new set of particles obtained by performing stochastic universal sampling, according to the particle weights.
# distance between pointers
step = 1.0/NUMBER_PARTICLES
# random start of first pointer
r = np.random.uniform(0,step)
# where we are along the weights
c = particles[0].get_weight()
# index of weight container and corresponding particle
i = 0
index = 0
new_particles = []
#loop over all particle weights
for particle in particles:
#go through the weights until you find the particle
u = r + index*step
while u > c:
i = i + 1
c = c + particles[i].get_weight()
#add that particle
if i == index:
new_particle = particles[i]
new_particle.set_weight(step)
else:
new_particle = particles[i].copy(updated_marker)
#new_particle = copy.deepcopy(particles[i])
#new_particle.set_weight(step)
new_particles.append(new_particle)
#increase the threshold
index += 1
del particles
return new_particles
class Particle():
#each particle has a pose(x,y,o), a weight(w) and a series of kalman filters for every landmark
#in the beggining all particles are in the origin frame of the world (0,0,0)
def __init__(self):
self.X_robot = np.array([0, 0, 0], dtype='float64').transpose()
self.weight = 1.0/NUMBER_PARTICLES
self.Landmarkers = [None]*NUMBER_MARKERS
self.x_path = np.array([0], dtype='float64')
self.y_path = np.array([0], dtype='float64')
def get_kalman_filters(self, marker_id, Z):
if self.Landmarkers[marker_id] == None:
self.Landmarkers[marker_id] = KalmanFilter()
return self.Landmarkers[marker_id]
def particle_prediction(self, motion_model):
#if the robot moves we just add the motion model to the previous pose to predict the particle position
x = 0
y = 1
o = 2
noise = np.array([np.random.normal(0,translation_noise), np.random.normal(0,translation_noise), np.random.normal(0,rotation_noise)], dtype='float64').transpose()
noise = noise*motion_model*noise_factor
self.X_robot = self.X_robot + motion_model + noise
while self.X_robot[o] > np.pi:
self.X_robot[o] = self.X_robot[o] - 2*np.pi
while self.X_robot[o] < -np.pi:
self.X_robot[o] = self.X_robot[o] + 2*np.pi
self.x_path = np.insert(self.x_path, 0, self.X_robot[x])
self.y_path = np.insert(self.y_path, 0, self.X_robot[y])
return self.X_robot
def update_weight(self, marker_id):
std = self.Landmarkers[marker_id].get_marker_covariance()
dev = self.Landmarkers[marker_id].get_marker_validity()
fact = np.sqrt(np.linalg.det(2* np.pi * std))
expo = - np.dot(dev.T, np.linalg.inv(std).dot(dev))/2
self.weight = self.weight / fact * np.exp(expo)
def get_weight(self):
return self.weight
def normalize_weight(self, total_weight):
self.weight = self.weight / total_weight
def set_weight(self, new_weight):
self.weight = new_weight
def get_position(self):
return self.X_robot
def get_landmarkers(self):
return self.Landmarkers
def get_path(self):
return self.x_path, self.y_path
def copy(self, updated_marker):
new_particle = Particle()
del new_particle.x_path
del new_particle.y_path
del new_particle.Landmarkers
del new_particle.X_robot
new_particle.x_path = np.copy(self.x_path)
new_particle.y_path = np.copy(self.y_path)
for i in range(len(self.Landmarkers)):
if self.Landmarkers[i] != None and updated_marker[i] == True:
self.Landmarkers[i] = self.Landmarkers[i].copy()
new_particle.Landmarkers = self.Landmarkers
new_particle.X_robot = np.copy(self.X_robot)
return new_particle
class KalmanFilter():
def __init__(self):
# X_ espected value of landmarks' position (x,y)
# X_robot (x, y, yaw)
# H gradient of markers' relative position to robot (h:(x_m,y_m) -> (distance, orientation); H = dh/dX_ and X_ = X(k+1|k))
# S covariance matrix markers' position
# Q covariance matrix markers' measurement
# V diference between measurement and estimated markers' position
self.first = True
self.R_t = np.identity(number_of_dimensions, dtype='float64')*Sensor_noise #sensor noise
self.P_t = np.identity(number_of_dimensions+1, dtype='float64')*Odom_noise #sensor noise
def compute_G(self, X_robot):
x = 0 # x position
y = 1 # y position
y = self.X_[y] - X_robot[y]
x = self.X_[x] - X_robot[x]
# compute H
denominator = x**2 + y**2
g_o11 = x / np.sqrt(denominator)
g_o12 = y / np.sqrt(denominator)
g_o21 = -y / denominator
g_o22 = x / denominator
self.G_o = np.array([[g_o11, g_o12], [g_o21, g_o22]])
g_s11 = -g_o11
g_s12 = -g_o12
g_s21 = -g_o21
g_s22 = -g_o22
self.G_s = np.array([[g_s21, g_s22, 0], [g_s21, g_s22, -1]])
def Apply_EKF(self, X_robot, Z):
x = 0 # x position
y = 1 # y position
o = 2 # o orientaio
d = 0 # distance measured
fi = 1 # orientaion of the measurement
if self.first == True:
# the angle is in the direction y to x, reverse of the usual x to y
angle = (X_robot[o] + Z[fi])
|
self.X_ = np.array([X_robot[x] + Z[d]*np.cos(angle), X_robot[y] + Z[d]*np.sin(angle)], dtype='float64').transpose() # first landmark position
self.compute_G(X_robot)
|
random_line_split
|
|
hellweg.py
|
),
'title': _report_title(report.reportType, 'BeamReportType', beam_info),
'z_label': 'Number of Particles',
'summaryData': _summary_text(run_dir),
})
def extract_parameter_report(report, run_dir):
s = solver.BeamSolver(
os.path.join(str(run_dir), HELLWEG_INI_FILE),
os.path.join(str(run_dir), HELLWEG_INPUT_FILE))
s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))
y1_var, y2_var = report.reportType.split('-')
x_field = 'z'
x = s.get_structure_parameters(_parameter_index(x_field))
y1 = s.get_structure_parameters(_parameter_index(y1_var))
y1_extent = [np.min(y1), np.max(y1)]
y2 = s.get_structure_parameters(_parameter_index(y2_var))
y2_extent = [np.min(y2), np.max(y2)]
return {
'title': _enum_text('ParameterReportType', report.reportType),
'x_range': [x[0], x[-1]],
'y_label': hellweg_dump_reader.get_parameter_label(y1_var),
'x_label': hellweg_dump_reader.get_parameter_label(x_field),
'x_points': x,
'points': [
y1,
y2,
],
'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1], y2_extent[1])],
'y1_title': hellweg_dump_reader.get_parameter_title(y1_var),
'y2_title': hellweg_dump_reader.get_parameter_title(y2_var),
}
def extract_particle_report(report, run_dir):
x_field = 'z0'
particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir), report.reportType, int(report.renderCount))
x = particle_info['z_values']
return {
'title': _enum_text('ParticleReportType', report.reportType),
'x_range': [np.min(x), np.max(x)],
'y_label': hellweg_dump_reader.get_label(report.reportType),
'x_label': hellweg_dump_reader.get_label(x_field),
'x_points': x,
'points': particle_info['y_values'],
'y_range': particle_info['y_range'],
}
def fixup_old_data(data):
for m in ('beamAnimation', 'beamHistogramAnimation', 'parameterAnimation', 'particleAnimation'):
if m not in data.models:
data.models[m] = pkcollections.Dict({})
template_common.update_model_defaults(data.models[m], m, _SCHEMA)
if 'solenoidFile' not in data['models']['solenoid']:
data['models']['solenoid']['solenoidFile'] = ''
if 'beamDefinition' not in data['models']['beam']:
beam = data['models']['beam']
beam['beamDefinition'] = 'transverse_longitude'
beam['cstCompress'] = '0'
beam['transversalFile2d'] = ''
beam['transversalFile4d'] = ''
beam['longitudinalFile1d'] = ''
beam['longitudinalFile2d'] = ''
beam['cstFile'] = ''
template_common.organize_example(data)
def get_animation_name(data):
return 'animation'
def get_application_data(data):
if data['method'] == 'compute_particle_ranges':
return template_common.compute_field_range(data, _compute_range_across_files)
assert False, 'unknown application data method: {}'.format(data['method'])
def lib_files(data, source_lib):
return template_common.filename_to_path(_simulation_files(data), source_lib)
def get_simulation_frame(run_dir, data, model_data):
frame_index = int(data['frameIndex'])
if data['modelName'] == 'beamAnimation':
args = template_common.parse_animation_args(
data,
{
'1': ['reportType', 'histogramBins', 'startTime'],
'': ['reportType', 'histogramBins', 'plotRangeType', 'horizontalSize', 'horizontalOffset', 'verticalSize', 'verticalOffset', 'isRunning', 'startTime'],
},
)
return extract_beam_report(args, run_dir, frame_index)
elif data['modelName'] == 'beamHistogramAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'histogramBins', 'startTime']},
)
return extract_beam_histrogram(args, run_dir, frame_index)
elif data['modelName'] == 'particleAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'renderCount', 'startTime']},
)
return extract_particle_report(args, run_dir)
elif data['modelName'] == 'parameterAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'startTime']},
)
return extract_parameter_report(args, run_dir)
raise RuntimeError('unknown animation model: {}'.format(data['modelName']))
def models_related_to_report(data):
"""What models are required for this data['report']
Args:
data (dict): simulation
Returns:
list: Named models, model fields or values (dict, list) that affect report
"""
r = data['report']
if r == 'animation':
return []
res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [
'beam',
'ellipticalDistribution',
'energyPhaseDistribution',
'solenoid',
'sphericalDistribution',
'twissDistribution',
]
for f in template_common.lib_files(data):
res.append(f.mtime())
return res
def python_source_for_model(data, model):
return '''
from rslinac import solver
{}
with open('input.txt', 'w') as f:
f.write(input_file)
with open('defaults.ini', 'w') as f:
f.write(ini_file)
s = solver.BeamSolver('defaults.ini', 'input.txt')
s.solve()
s.save_output('output.txt')
'''.format(_generate_parameters_file(data, is_parallel=len(data.models.beamline)))
def remove_last_frame(run_dir):
pass
def validate_delete_file(data, filename, file_type):
"""Returns True if the filename is in use by the simulation data."""
return filename in _simulation_files(data)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(
data,
run_dir,
is_parallel,
),
)
def _compute_range_across_files(run_dir, data):
res = {}
for v in _SCHEMA.enum.BeamReportType:
x, y = v[0].split('-')
res[x] = []
res[y] = []
dump_file = _dump_file(run_dir)
if not os.path.exists(dump_file):
return res
beam_header = hellweg_dump_reader.beam_header(dump_file)
for frame in xrange(beam_header.NPoints):
beam_info = hellweg_dump_reader.beam_info(dump_file, frame)
for field in res:
values = hellweg_dump_reader.get_points(beam_info, field)
if not len(values):
pass
elif len(res[field]):
res[field][0] = min(min(values), res[field][0])
res[field][1] = max(max(values), res[field][1])
else:
res[field] = [min(values), max(values)]
return res
def _dump_file(run_dir):
return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)
def _enum_text(enum_name, v):
enum_values = _SCHEMA['enum'][enum_name]
for e in enum_values:
if e[0] == v:
return e[1]
raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))
def
|
(models):
# BEAM SPH2D 0.564 -15 5 NORM2D 0.30 0.0000001 90 180
beam_def = models.beam.beamDefinition
if beam_def == 'transverse_longitude':
return 'BEAM {} {}'.format(_generate_transverse_dist(models), _generate_longitude_dist(models))
if beam_def == 'cst_pit':
return 'BEAM CST_PIT {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
'COMPRESS' if models.beam.cstCompress else '',
)
if beam_def == 'cst_pid':
return 'BEAM CST_PID {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
raise RuntimeError('invalid beam def: {}'.format(beam_def))
def _generate_cell_params(el):
#TODO(pjm): add an option field to select auto-calculate
if el.attenuation == 0 and el.aperture == 0:
return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant)
return '{} {} {} {} {}'.format(el
|
_generate_beam
|
identifier_name
|
hellweg.py
|
),
'title': _report_title(report.reportType, 'BeamReportType', beam_info),
'z_label': 'Number of Particles',
'summaryData': _summary_text(run_dir),
})
def extract_parameter_report(report, run_dir):
s = solver.BeamSolver(
os.path.join(str(run_dir), HELLWEG_INI_FILE),
os.path.join(str(run_dir), HELLWEG_INPUT_FILE))
s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))
y1_var, y2_var = report.reportType.split('-')
x_field = 'z'
x = s.get_structure_parameters(_parameter_index(x_field))
y1 = s.get_structure_parameters(_parameter_index(y1_var))
y1_extent = [np.min(y1), np.max(y1)]
y2 = s.get_structure_parameters(_parameter_index(y2_var))
y2_extent = [np.min(y2), np.max(y2)]
return {
'title': _enum_text('ParameterReportType', report.reportType),
'x_range': [x[0], x[-1]],
'y_label': hellweg_dump_reader.get_parameter_label(y1_var),
'x_label': hellweg_dump_reader.get_parameter_label(x_field),
'x_points': x,
'points': [
y1,
y2,
],
'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1], y2_extent[1])],
'y1_title': hellweg_dump_reader.get_parameter_title(y1_var),
'y2_title': hellweg_dump_reader.get_parameter_title(y2_var),
}
def extract_particle_report(report, run_dir):
x_field = 'z0'
particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir), report.reportType, int(report.renderCount))
x = particle_info['z_values']
return {
'title': _enum_text('ParticleReportType', report.reportType),
'x_range': [np.min(x), np.max(x)],
'y_label': hellweg_dump_reader.get_label(report.reportType),
'x_label': hellweg_dump_reader.get_label(x_field),
'x_points': x,
'points': particle_info['y_values'],
'y_range': particle_info['y_range'],
}
def fixup_old_data(data):
for m in ('beamAnimation', 'beamHistogramAnimation', 'parameterAnimation', 'particleAnimation'):
if m not in data.models:
data.models[m] = pkcollections.Dict({})
template_common.update_model_defaults(data.models[m], m, _SCHEMA)
if 'solenoidFile' not in data['models']['solenoid']:
data['models']['solenoid']['solenoidFile'] = ''
if 'beamDefinition' not in data['models']['beam']:
beam = data['models']['beam']
beam['beamDefinition'] = 'transverse_longitude'
beam['cstCompress'] = '0'
beam['transversalFile2d'] = ''
beam['transversalFile4d'] = ''
beam['longitudinalFile1d'] = ''
beam['longitudinalFile2d'] = ''
beam['cstFile'] = ''
template_common.organize_example(data)
def get_animation_name(data):
return 'animation'
def get_application_data(data):
|
def lib_files(data, source_lib):
return template_common.filename_to_path(_simulation_files(data), source_lib)
def get_simulation_frame(run_dir, data, model_data):
frame_index = int(data['frameIndex'])
if data['modelName'] == 'beamAnimation':
args = template_common.parse_animation_args(
data,
{
'1': ['reportType', 'histogramBins', 'startTime'],
'': ['reportType', 'histogramBins', 'plotRangeType', 'horizontalSize', 'horizontalOffset', 'verticalSize', 'verticalOffset', 'isRunning', 'startTime'],
},
)
return extract_beam_report(args, run_dir, frame_index)
elif data['modelName'] == 'beamHistogramAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'histogramBins', 'startTime']},
)
return extract_beam_histrogram(args, run_dir, frame_index)
elif data['modelName'] == 'particleAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'renderCount', 'startTime']},
)
return extract_particle_report(args, run_dir)
elif data['modelName'] == 'parameterAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'startTime']},
)
return extract_parameter_report(args, run_dir)
raise RuntimeError('unknown animation model: {}'.format(data['modelName']))
def models_related_to_report(data):
"""What models are required for this data['report']
Args:
data (dict): simulation
Returns:
list: Named models, model fields or values (dict, list) that affect report
"""
r = data['report']
if r == 'animation':
return []
res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [
'beam',
'ellipticalDistribution',
'energyPhaseDistribution',
'solenoid',
'sphericalDistribution',
'twissDistribution',
]
for f in template_common.lib_files(data):
res.append(f.mtime())
return res
def python_source_for_model(data, model):
return '''
from rslinac import solver
{}
with open('input.txt', 'w') as f:
f.write(input_file)
with open('defaults.ini', 'w') as f:
f.write(ini_file)
s = solver.BeamSolver('defaults.ini', 'input.txt')
s.solve()
s.save_output('output.txt')
'''.format(_generate_parameters_file(data, is_parallel=len(data.models.beamline)))
def remove_last_frame(run_dir):
pass
def validate_delete_file(data, filename, file_type):
"""Returns True if the filename is in use by the simulation data."""
return filename in _simulation_files(data)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(
data,
run_dir,
is_parallel,
),
)
def _compute_range_across_files(run_dir, data):
res = {}
for v in _SCHEMA.enum.BeamReportType:
x, y = v[0].split('-')
res[x] = []
res[y] = []
dump_file = _dump_file(run_dir)
if not os.path.exists(dump_file):
return res
beam_header = hellweg_dump_reader.beam_header(dump_file)
for frame in xrange(beam_header.NPoints):
beam_info = hellweg_dump_reader.beam_info(dump_file, frame)
for field in res:
values = hellweg_dump_reader.get_points(beam_info, field)
if not len(values):
pass
elif len(res[field]):
res[field][0] = min(min(values), res[field][0])
res[field][1] = max(max(values), res[field][1])
else:
res[field] = [min(values), max(values)]
return res
def _dump_file(run_dir):
return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)
def _enum_text(enum_name, v):
enum_values = _SCHEMA['enum'][enum_name]
for e in enum_values:
if e[0] == v:
return e[1]
raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))
def _generate_beam(models):
# BEAM SPH2D 0.564 -15 5 NORM2D 0.30 0.0000001 90 180
beam_def = models.beam.beamDefinition
if beam_def == 'transverse_longitude':
return 'BEAM {} {}'.format(_generate_transverse_dist(models), _generate_longitude_dist(models))
if beam_def == 'cst_pit':
return 'BEAM CST_PIT {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
'COMPRESS' if models.beam.cstCompress else '',
)
if beam_def == 'cst_pid':
return 'BEAM CST_PID {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
raise RuntimeError('invalid beam def: {}'.format(beam_def))
def _generate_cell_params(el):
#TODO(pjm): add an option field to select auto-calculate
if el.attenuation == 0 and el.aperture == 0:
return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant)
return '{} {} {} {} {}'.format
|
if data['method'] == 'compute_particle_ranges':
return template_common.compute_field_range(data, _compute_range_across_files)
assert False, 'unknown application data method: {}'.format(data['method'])
|
identifier_body
|
hellweg.py
|
def extract_beam_histrogram(report, run_dir, frame):
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
points = hellweg_dump_reader.get_points(beam_info, report.reportType)
hist, edges = np.histogram(points, template_common.histogram_bins(report.histogramBins))
return {
'title': _report_title(report.reportType, 'BeamHistogramReportType', beam_info),
'x_range': [edges[0], edges[-1]],
'y_label': 'Number of Particles',
'x_label': hellweg_dump_reader.get_label(report.reportType),
'points': hist.T.tolist(),
}
def extract_beam_report(report, run_dir, frame):
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
model = data.models.beamAnimation
model.update(report)
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
x, y = report.reportType.split('-')
values = [
hellweg_dump_reader.get_points(beam_info, x),
hellweg_dump_reader.get_points(beam_info, y),
]
model['x'] = x
model['y'] = y
return template_common.heatmap(values, model, {
'x_label': hellweg_dump_reader.get_label(x),
'y_label': hellweg_dump_reader.get_label(y),
'title': _report_title(report.reportType, 'BeamReportType', beam_info),
'z_label': 'Number of Particles',
'summaryData': _summary_text(run_dir),
})
def extract_parameter_report(report, run_dir):
s = solver.BeamSolver(
os.path.join(str(run_dir), HELLWEG_INI_FILE),
os.path.join(str(run_dir), HELLWEG_INPUT_FILE))
s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))
y1_var, y2_var = report.reportType.split('-')
x_field = 'z'
x = s.get_structure_parameters(_parameter_index(x_field))
y1 = s.get_structure_parameters(_parameter_index(y1_var))
y1_extent = [np.min(y1), np.max(y1)]
y2 = s.get_structure_parameters(_parameter_index(y2_var))
y2_extent = [np.min(y2), np.max(y2)]
return {
'title': _enum_text('ParameterReportType', report.reportType),
'x_range': [x[0], x[-1]],
'y_label': hellweg_dump_reader.get_parameter_label(y1_var),
'x_label': hellweg_dump_reader.get_parameter_label(x_field),
'x_points': x,
'points': [
y1,
y2,
],
'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1], y2_extent[1])],
'y1_title': hellweg_dump_reader.get_parameter_title(y1_var),
'y2_title': hellweg_dump_reader.get_parameter_title(y2_var),
}
def extract_particle_report(report, run_dir):
x_field = 'z0'
particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir), report.reportType, int(report.renderCount))
x = particle_info['z_values']
return {
'title': _enum_text('ParticleReportType', report.reportType),
'x_range': [np.min(x), np.max(x)],
'y_label': hellweg_dump_reader.get_label(report.reportType),
'x_label': hellweg_dump_reader.get_label(x_field),
'x_points': x,
'points': particle_info['y_values'],
'y_range': particle_info['y_range'],
}
def fixup_old_data(data):
for m in ('beamAnimation', 'beamHistogramAnimation', 'parameterAnimation', 'particleAnimation'):
if m not in data.models:
data.models[m] = pkcollections.Dict({})
template_common.update_model_defaults(data.models[m], m, _SCHEMA)
if 'solenoidFile' not in data['models']['solenoid']:
data['models']['solenoid']['solenoidFile'] = ''
if 'beamDefinition' not in data['models']['beam']:
beam = data['models']['beam']
beam['beamDefinition'] = 'transverse_longitude'
beam['cstCompress'] = '0'
beam['transversalFile2d'] = ''
beam['transversalFile4d'] = ''
beam['longitudinalFile1d'] = ''
beam['longitudinalFile2d'] = ''
beam['cstFile'] = ''
template_common.organize_example(data)
def get_animation_name(data):
return 'animation'
def get_application_data(data):
if data['method'] == 'compute_particle_ranges':
return template_common.compute_field_range(data, _compute_range_across_files)
assert False, 'unknown application data method: {}'.format(data['method'])
def lib_files(data, source_lib):
return template_common.filename_to_path(_simulation_files(data), source_lib)
def get_simulation_frame(run_dir, data, model_data):
frame_index = int(data['frameIndex'])
if data['modelName'] == 'beamAnimation':
args = template_common.parse_animation_args(
data,
{
'1': ['reportType', 'histogramBins', 'startTime'],
'': ['reportType', 'histogramBins', 'plotRangeType', 'horizontalSize', 'horizontalOffset', 'verticalSize', 'verticalOffset', 'isRunning', 'startTime'],
},
)
return extract_beam_report(args, run_dir, frame_index)
elif data['modelName'] == 'beamHistogramAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'histogramBins', 'startTime']},
)
return extract_beam_histrogram(args, run_dir, frame_index)
elif data['modelName'] == 'particleAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'renderCount', 'startTime']},
)
return extract_particle_report(args, run_dir)
elif data['modelName'] == 'parameterAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'startTime']},
)
return extract_parameter_report(args, run_dir)
raise RuntimeError('unknown animation model: {}'.format(data['modelName']))
def models_related_to_report(data):
"""What models are required for this data['report']
Args:
data (dict): simulation
Returns:
list: Named models, model fields or values (dict, list) that affect report
"""
r = data['report']
if r == 'animation':
return []
res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [
'beam',
'ellipticalDistribution',
'energyPhaseDistribution',
'solenoid',
'sphericalDistribution',
'twissDistribution',
]
for f in template_common.lib_files(data):
res.append(f.mtime())
return res
def python_source_for_model(data, model):
return '''
from rslinac import solver
{}
with open('input.txt', 'w') as f:
f.write(input_file)
with open('defaults.ini', 'w') as f:
f.write(ini_file)
s = solver.BeamSolver('defaults.ini', 'input.txt')
s.solve()
s.save_output('output.txt')
'''.format(_generate_parameters_file(data, is_parallel=len(data.models.beamline)))
def remove_last_frame(run_dir):
pass
def validate_delete_file(data, filename, file_type):
"""Returns True if the filename is in use by the simulation data."""
return filename in _simulation_files(data)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(
data,
run_dir,
is_parallel,
),
)
def _compute_range_across_files(run_dir, data):
res = {}
for v in _SCHEMA.enum.BeamReportType:
x, y = v[0].split('-')
res[x] = []
res[y] = []
dump_file = _dump_file(run_dir)
if not os.path.exists(dump_file):
return res
beam_header = hellweg_dump_reader.beam_header(dump_file)
for frame in xrange(beam_header.NPoints):
beam_info = hellweg_dump_reader.beam_info(dump_file, frame)
for field in res:
values = hellweg_dump_reader.get_points(beam_info, field)
if not len(values):
pass
elif len(res[field]):
res[field][0] = min(min(values), res[field][0])
res[field][1] = max(max(values), res[field][1])
else:
res[field] = [min(values), max(values)]
return res
def _dump_file(run_dir):
return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)
def _enum_text(enum_name, v):
enum_values = _SCHEMA['enum'][enum_name]
for e in enum_values:
if e[0] == v:
return e[1]
raise
|
'error': _parse_error_message(run_dir)
}
|
random_line_split
|
|
hellweg.py
|
format(data['method'])
def lib_files(data, source_lib):
return template_common.filename_to_path(_simulation_files(data), source_lib)
def get_simulation_frame(run_dir, data, model_data):
frame_index = int(data['frameIndex'])
if data['modelName'] == 'beamAnimation':
args = template_common.parse_animation_args(
data,
{
'1': ['reportType', 'histogramBins', 'startTime'],
'': ['reportType', 'histogramBins', 'plotRangeType', 'horizontalSize', 'horizontalOffset', 'verticalSize', 'verticalOffset', 'isRunning', 'startTime'],
},
)
return extract_beam_report(args, run_dir, frame_index)
elif data['modelName'] == 'beamHistogramAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'histogramBins', 'startTime']},
)
return extract_beam_histrogram(args, run_dir, frame_index)
elif data['modelName'] == 'particleAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'renderCount', 'startTime']},
)
return extract_particle_report(args, run_dir)
elif data['modelName'] == 'parameterAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'startTime']},
)
return extract_parameter_report(args, run_dir)
raise RuntimeError('unknown animation model: {}'.format(data['modelName']))
def models_related_to_report(data):
"""What models are required for this data['report']
Args:
data (dict): simulation
Returns:
list: Named models, model fields or values (dict, list) that affect report
"""
r = data['report']
if r == 'animation':
return []
res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [
'beam',
'ellipticalDistribution',
'energyPhaseDistribution',
'solenoid',
'sphericalDistribution',
'twissDistribution',
]
for f in template_common.lib_files(data):
res.append(f.mtime())
return res
def python_source_for_model(data, model):
return '''
from rslinac import solver
{}
with open('input.txt', 'w') as f:
f.write(input_file)
with open('defaults.ini', 'w') as f:
f.write(ini_file)
s = solver.BeamSolver('defaults.ini', 'input.txt')
s.solve()
s.save_output('output.txt')
'''.format(_generate_parameters_file(data, is_parallel=len(data.models.beamline)))
def remove_last_frame(run_dir):
pass
def validate_delete_file(data, filename, file_type):
"""Returns True if the filename is in use by the simulation data."""
return filename in _simulation_files(data)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(
data,
run_dir,
is_parallel,
),
)
def _compute_range_across_files(run_dir, data):
res = {}
for v in _SCHEMA.enum.BeamReportType:
x, y = v[0].split('-')
res[x] = []
res[y] = []
dump_file = _dump_file(run_dir)
if not os.path.exists(dump_file):
return res
beam_header = hellweg_dump_reader.beam_header(dump_file)
for frame in xrange(beam_header.NPoints):
beam_info = hellweg_dump_reader.beam_info(dump_file, frame)
for field in res:
values = hellweg_dump_reader.get_points(beam_info, field)
if not len(values):
pass
elif len(res[field]):
res[field][0] = min(min(values), res[field][0])
res[field][1] = max(max(values), res[field][1])
else:
res[field] = [min(values), max(values)]
return res
def _dump_file(run_dir):
return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)
def _enum_text(enum_name, v):
enum_values = _SCHEMA['enum'][enum_name]
for e in enum_values:
if e[0] == v:
return e[1]
raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))
def _generate_beam(models):
# BEAM SPH2D 0.564 -15 5 NORM2D 0.30 0.0000001 90 180
beam_def = models.beam.beamDefinition
if beam_def == 'transverse_longitude':
return 'BEAM {} {}'.format(_generate_transverse_dist(models), _generate_longitude_dist(models))
if beam_def == 'cst_pit':
return 'BEAM CST_PIT {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
'COMPRESS' if models.beam.cstCompress else '',
)
if beam_def == 'cst_pid':
return 'BEAM CST_PID {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
raise RuntimeError('invalid beam def: {}'.format(beam_def))
def _generate_cell_params(el):
#TODO(pjm): add an option field to select auto-calculate
if el.attenuation == 0 and el.aperture == 0:
return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant)
return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant, el.attenuation, el.aperture)
def _generate_charge(models):
if models.beam.spaceCharge == 'none':
return ''
return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.beam.spaceChargeCore)
def _generate_current(models):
return 'CURRENT {} {}'.format(models.beam.current, models.beam.numberOfParticles)
def _generate_energy_phase_distribution(dist):
return '{} {} {}'.format(
dist.meanPhase,
dist.phaseLength,
dist.phaseDeviation if dist.distributionType == 'gaussian' else '',
)
def _generate_lattice(models):
res = ''
for el in models.beamline:
if el.type == 'powerElement':
res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.phaseShift)
elif el.type == 'cellElement':
res += 'CELL {}'.format(_generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'cellsElement':
res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'driftElement':
res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)
has_cell_or_drift = True
elif el.type == 'saveElement':
#TODO(pjm): implement this
pass
else:
raise RuntimeError('unknown element type: {}'.format(el.type))
res += "\n"
return res
def _generate_longitude_dist(models):
dist_type = models.beam.longitudinalDistribution
if dist_type == 'norm2d':
dist = models.energyPhaseDistribution
if dist.distributionType == 'uniform':
return 'NORM2D {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.meanPhase, dist.phaseLength)
if dist.distributionType == 'gaussian':
return 'NORM2D {} {} {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.energyDeviation, dist.meanPhase, dist.phaseLength, dist.phaseDeviation)
raise RuntimeError('unknown longitudinal distribution type: {}'.format(models.longitudinalDistribution.distributionType))
if dist_type == 'file1d':
return 'FILE1D {} {}'.format(
template_common.lib_file_name('beam', 'longitudinalFile1d', models.beam.longitudinalFile1d),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
raise RuntimeError('unknown longitudinal distribution: {}'.format(models.beam.longitudinalDistribution))
def _generate_options(models):
if models.simulationSettings.allowBackwardWaves == '1':
return 'OPTIONS REVERSE'
return ''
def _generate_parameters_file(data, run_dir=None, is_parallel=False):
template_common.validate_models(data, _SCHEMA)
v = template_common.flatten_data(data['models'], {})
v['optionsCommand'] = _generate_options(data['models'])
v['solenoidCommand'] = _generate_solenoid(data['models'])
v['beamCommand'] = _generate_beam(data['models'])
v['currentCommand'] = _generate_current(data['models'])
v['chargeCommand'] = _generate_charge(data['models'])
if is_parallel:
|
v['latticeCommands'] = _generate_lattice(data['models'])
|
conditional_block
|
|
lib.rs
|
/// Other, one-off errors, with reasoning provided as a string
Other(&'static str),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::IO(err)
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> Self {
match err {
Error::IO(err) => err,
Error::Python3Only => io::Error::new(
io::ErrorKind::Other,
"this function is only available for Python 3",
),
Error::Other(why) => io::Error::new(io::ErrorKind::Other, why),
}
}
}
/// The result type denoting a return `T` or
/// an [`Error`](enum.Error.html).
pub type PyResult<T> = Result<T, Error>;
/// The result type denotes that this function
/// is only available when interfacing a Python 3
/// interpreter.
///
/// It's the same as the normal [`PyResult`](type.PyResult.html)
/// used throughout this module, but it's just a little
/// type hint.
pub type Py3Only<T> = Result<T, Error>;
#[inline]
fn other_err(what: &'static str) -> Error {
Error::Other(what)
}
/// Defines the script with a common prelude of imports
/// and helper functions. Returns a single string that
/// represents the script.
fn build_script(lines: &[&str]) -> String {
let mut script = String::new();
script.push_str("from __future__ import print_function\n");
script.push_str("import sysconfig\n");
script.push_str("pyver = sysconfig.get_config_var('VERSION')\n");
script.push_str("getvar = sysconfig.get_config_var\n");
script.push_str(&lines.join("\n"));
script
}
/// Exposes Python configuration information
pub struct PythonConfig {
/// The commander that provides responses to our commands
cmdr: SysCommand,
/// The version of the Python interpreter we're using
ver: Version,
}
impl Default for PythonConfig {
fn default() -> PythonConfig {
PythonConfig::new()
}
}
impl PythonConfig {
/// Create a new `PythonConfig` that uses the system installed Python 3
/// interpreter to query configuration information.
pub fn new() -> Self {
PythonConfig::version(Version::Three)
}
/// Create a new `PythonConfig` that uses the system installed Python
/// of version `version`.
///
/// # Example
///
/// ```
/// use python_config::{PythonConfig, Version};
///
/// // Use the system-wide Python3 interpreter
/// let cfg = PythonConfig::version(Version::Three);
/// ```
pub fn version(version: Version) -> Self {
match version {
Version::Three => Self::with_commander(version, SysCommand::new("python3")),
Version::Two => Self::with_commander(version, SysCommand::new("python2")),
}
}
fn with_commander(ver: Version, cmdr: SysCommand) -> Self
|
fn is_py3(&self) -> Result<(), Error> {
if self.ver != Version::Three {
Err(Error::Python3Only)
} else {
Ok(())
}
}
/// Create a `PythonConfig` that uses the interpreter at the path `interpreter`.
///
/// This fails if the path cannot be represented as a string, or if a query
/// for the Python version fails.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::interpreter("/usr/local/bin/python3");
/// assert!(cfg.is_ok());
/// ```
pub fn interpreter<P: AsRef<path::Path>>(interpreter: P) -> PyResult<Self> {
let cmdr = SysCommand::new(
interpreter
.as_ref()
.to_str()
.ok_or_else(|| other_err("unable to coerce interpreter path to string"))?,
);
// Assume Python 3 unless the semver tells us otherwise
let mut cfg = PythonConfig {
cmdr,
ver: Version::Three,
};
if cfg.semantic_version()?.major == 2 {
cfg.ver = Version::Two;
}
Ok(cfg)
}
/// Returns the Python version string
///
/// This is the raw return of `python --version`. Consider using
/// [`semantic_version`](struct.PythonConfig.html#method.semantic_version)
/// for something more useful.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints something like 'Python 3.7.4'
/// println!("{}", cfg.version_raw().unwrap());
/// ```
pub fn version_raw(&self) -> PyResult<String> {
self.cmdr.commands(&["--version"]).map_err(From::from)
}
/// Returns the Python version as a semver
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints semver "3.7.4"
/// println!("{}", cfg.semantic_version().unwrap());
/// ```
pub fn semantic_version(&self) -> PyResult<semver::Version> {
self.version_raw()
.and_then(|resp| {
let mut witer = resp.split_whitespace();
witer.next(); // 'Python'
let ver = witer.next().ok_or_else(|| {
other_err("expected --version to return a string resembling 'Python X.Y.Z'")
})?;
semver::Version::parse(ver).map_err(|_| other_err("unable to parse semver"))
})
.map_err(From::from)
}
fn script(&self, lines: &[&str]) -> PyResult<String> {
self.cmdr
.commands(&["-c", &build_script(lines)])
.map_err(From::from)
}
/// Returns the installation prefix of the Python interpreter as a string.
///
/// The prefix is dependent on the host operating system.
/// On macOS, depending on how Python is installed, it will return
/// a string resembling
/// `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix().unwrap());
/// ```
pub fn prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('prefix'))"])
}
/// Like [`prefix`](#method.prefix), but returns
/// the installation prefix as a `PathBuf`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix_path().unwrap().display());
/// ```
pub fn prefix_path(&self) -> PyResult<PathBuf> {
self.prefix().map(PathBuf::from)
}
/// Returns the executable path prefix for the Python interpreter as a string
///
/// The path is dependent on the host OS and the installation path
/// of the Python interpreter. On macOS, the string may resemble something
/// like `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
pub fn exec_prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('exec_prefix'))"])
}
/// Like [`exec_prefix`](#method.exec_prefix), but
/// returns the executable prefix as a `PathBuf`.
pub fn exec_prefix_path(&self) -> PyResult<PathBuf> {
self.exec_prefix().map(PathBuf::from)
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. This is a space-delimited
/// string of paths prefixed with `-I`.
///
/// The single string may resemble something lke the following
/// (on macOS)
///
/// ```text
/// -I/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/include/python3.7m
/// ```
///
/// Note that the same path may appear more than once.
pub fn includes(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
"print(' '.join(flags))",
])
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. Unlike [`includes`](#method.includes),
/// this is simply a collection of paths. Note that the same
/// path may appear more than once.
pub fn include_paths(&self) -> PyResult<Vec<PathBuf>> {
self.script(&[
"print(sysconfig.get_path('include'))",
"print(sysconfig.get_path('platinclude'))",
])
.map(|resp| resp.lines().map(PathBuf::from).collect())
}
/// All the flags useful for C compilation. This includes the include
|
{
PythonConfig { cmdr, ver }
}
|
identifier_body
|
lib.rs
|
'")
})?;
semver::Version::parse(ver).map_err(|_| other_err("unable to parse semver"))
})
.map_err(From::from)
}
fn script(&self, lines: &[&str]) -> PyResult<String> {
self.cmdr
.commands(&["-c", &build_script(lines)])
.map_err(From::from)
}
/// Returns the installation prefix of the Python interpreter as a string.
///
/// The prefix is dependent on the host operating system.
/// On macOS, depending on how Python is installed, it will return
/// a string resembling
/// `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix().unwrap());
/// ```
pub fn prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('prefix'))"])
}
/// Like [`prefix`](#method.prefix), but returns
/// the installation prefix as a `PathBuf`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix_path().unwrap().display());
/// ```
pub fn prefix_path(&self) -> PyResult<PathBuf> {
self.prefix().map(PathBuf::from)
}
/// Returns the executable path prefix for the Python interpreter as a string
///
/// The path is dependent on the host OS and the installation path
/// of the Python interpreter. On macOS, the string may resemble something
/// like `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
pub fn exec_prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('exec_prefix'))"])
}
/// Like [`exec_prefix`](#method.exec_prefix), but
/// returns the executable prefix as a `PathBuf`.
pub fn exec_prefix_path(&self) -> PyResult<PathBuf> {
self.exec_prefix().map(PathBuf::from)
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. This is a space-delimited
/// string of paths prefixed with `-I`.
///
/// The single string may resemble something lke the following
/// (on macOS)
///
/// ```text
/// -I/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/include/python3.7m
/// ```
///
/// Note that the same path may appear more than once.
pub fn includes(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
"print(' '.join(flags))",
])
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. Unlike [`includes`](#method.includes),
/// this is simply a collection of paths. Note that the same
/// path may appear more than once.
pub fn include_paths(&self) -> PyResult<Vec<PathBuf>> {
self.script(&[
"print(sysconfig.get_path('include'))",
"print(sysconfig.get_path('platinclude'))",
])
.map(|resp| resp.lines().map(PathBuf::from).collect())
}
/// All the flags useful for C compilation. This includes the include
/// paths (see [`includes`](#method.includes)) as well as other compiler
/// flags for this target. The return is a string with spaces separating
/// the flags.
pub fn cflags(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
linux_line!("flags.extend(getvar('BASECFLAGS').split())"),
linux_line!("flags.extend(getvar('CONFIGURE_CFLAGS').split())"),
macos_line!("flags.extend(getvar('CFLAGS').split())"),
"print(' '.join(flags))",
])
}
/// Returns linker flags required for linking this Python
/// distribution. All libraries / frameworks have the appropriate `-l`
/// or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn libs(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"print(' '.join(libs))",
])
}
/// Returns linker flags required for creating
/// a shared library for this Python distribution. All libraries / frameworks
/// have the appropriate `-L`, `-l`, or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -L/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7/lib/python3.7/config-3.7m-darwin -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn ldflags(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
linux_line!["libs.insert(0, '-L' + getvar('exec_prefix') + '/lib')"],
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"if not getvar('Py_ENABLED_SHARED'):",
tab!("libs.insert(0, '-L' + getvar('LIBPL'))"),
"if not getvar('PYTHONFRAMEWORK'):",
tab!("libs.extend(getvar('LINKFORSHARED').split())"),
"print(' '.join(libs))",
])
}
/// Returns a string that represents the file extension for this distribution's library
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
///
/// On macOS, the string may resemble something like `.cpython-37m-darwin.so`.
pub fn extension_suffix(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('EXT_SUFFIX'))"])?;
Ok(resp)
}
/// The ABI flags specified when building this Python distribution
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn abi_flags(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["import sys", "print(sys.abiflags)"])?;
Ok(resp)
}
/// The location of the distribution's actual `python3-config` script
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('LIBPL'))"])?;
Ok(resp)
}
/// Like [`config_dir`](#method.config_dir), but returns the path to
/// the distribution's `python-config` script as a `PathBuf`.
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir_path(&self) -> Py3Only<PathBuf> {
self.config_dir().map(PathBuf::from)
}
}
#[cfg(test)]
mod tests {
//! The tests only show that, under normal circumstances, there
//! are no errors returned from the public API.
use super::PythonConfig;
use std::path::PathBuf;
macro_rules! pycfgtest {
($ident:ident) => {
#[test]
fn $ident() {
assert!(PythonConfig::new().$ident().is_ok());
}
};
}
pycfgtest!(version_raw);
pycfgtest!(semantic_version);
pycfgtest!(prefix);
pycfgtest!(prefix_path);
pycfgtest!(exec_prefix);
pycfgtest!(exec_prefix_path);
pycfgtest!(includes);
pycfgtest!(include_paths);
pycfgtest!(cflags);
pycfgtest!(libs);
pycfgtest!(ldflags);
pycfgtest!(extension_suffix);
pycfgtest!(abi_flags);
pycfgtest!(config_dir);
pycfgtest!(config_dir_path);
// Shows that includes and include_paths return the same things
// just in different types.
#[test]
fn
|
include_paths_same
|
identifier_name
|
|
lib.rs
|
/// Other, one-off errors, with reasoning provided as a string
Other(&'static str),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::IO(err)
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> Self {
match err {
Error::IO(err) => err,
Error::Python3Only => io::Error::new(
io::ErrorKind::Other,
"this function is only available for Python 3",
),
Error::Other(why) => io::Error::new(io::ErrorKind::Other, why),
}
}
}
/// The result type denoting a return `T` or
|
/// The result type denotes that this function
/// is only available when interfacing a Python 3
/// interpreter.
///
/// It's the same as the normal [`PyResult`](type.PyResult.html)
/// used throughout this module, but it's just a little
/// type hint.
pub type Py3Only<T> = Result<T, Error>;
#[inline]
fn other_err(what: &'static str) -> Error {
Error::Other(what)
}
/// Defines the script with a common prelude of imports
/// and helper functions. Returns a single string that
/// represents the script.
fn build_script(lines: &[&str]) -> String {
let mut script = String::new();
script.push_str("from __future__ import print_function\n");
script.push_str("import sysconfig\n");
script.push_str("pyver = sysconfig.get_config_var('VERSION')\n");
script.push_str("getvar = sysconfig.get_config_var\n");
script.push_str(&lines.join("\n"));
script
}
/// Exposes Python configuration information
pub struct PythonConfig {
/// The commander that provides responses to our commands
cmdr: SysCommand,
/// The version of the Python interpreter we're using
ver: Version,
}
impl Default for PythonConfig {
fn default() -> PythonConfig {
PythonConfig::new()
}
}
impl PythonConfig {
/// Create a new `PythonConfig` that uses the system installed Python 3
/// interpreter to query configuration information.
pub fn new() -> Self {
PythonConfig::version(Version::Three)
}
/// Create a new `PythonConfig` that uses the system installed Python
/// of version `version`.
///
/// # Example
///
/// ```
/// use python_config::{PythonConfig, Version};
///
/// // Use the system-wide Python3 interpreter
/// let cfg = PythonConfig::version(Version::Three);
/// ```
pub fn version(version: Version) -> Self {
match version {
Version::Three => Self::with_commander(version, SysCommand::new("python3")),
Version::Two => Self::with_commander(version, SysCommand::new("python2")),
}
}
fn with_commander(ver: Version, cmdr: SysCommand) -> Self {
PythonConfig { cmdr, ver }
}
fn is_py3(&self) -> Result<(), Error> {
if self.ver != Version::Three {
Err(Error::Python3Only)
} else {
Ok(())
}
}
/// Create a `PythonConfig` that uses the interpreter at the path `interpreter`.
///
/// This fails if the path cannot be represented as a string, or if a query
/// for the Python version fails.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::interpreter("/usr/local/bin/python3");
/// assert!(cfg.is_ok());
/// ```
pub fn interpreter<P: AsRef<path::Path>>(interpreter: P) -> PyResult<Self> {
let cmdr = SysCommand::new(
interpreter
.as_ref()
.to_str()
.ok_or_else(|| other_err("unable to coerce interpreter path to string"))?,
);
// Assume Python 3 unless the semver tells us otherwise
let mut cfg = PythonConfig {
cmdr,
ver: Version::Three,
};
if cfg.semantic_version()?.major == 2 {
cfg.ver = Version::Two;
}
Ok(cfg)
}
/// Returns the Python version string
///
/// This is the raw return of `python --version`. Consider using
/// [`semantic_version`](struct.PythonConfig.html#method.semantic_version)
/// for something more useful.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints something like 'Python 3.7.4'
/// println!("{}", cfg.version_raw().unwrap());
/// ```
pub fn version_raw(&self) -> PyResult<String> {
self.cmdr.commands(&["--version"]).map_err(From::from)
}
/// Returns the Python version as a semver
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints semver "3.7.4"
/// println!("{}", cfg.semantic_version().unwrap());
/// ```
pub fn semantic_version(&self) -> PyResult<semver::Version> {
self.version_raw()
.and_then(|resp| {
let mut witer = resp.split_whitespace();
witer.next(); // 'Python'
let ver = witer.next().ok_or_else(|| {
other_err("expected --version to return a string resembling 'Python X.Y.Z'")
})?;
semver::Version::parse(ver).map_err(|_| other_err("unable to parse semver"))
})
.map_err(From::from)
}
fn script(&self, lines: &[&str]) -> PyResult<String> {
self.cmdr
.commands(&["-c", &build_script(lines)])
.map_err(From::from)
}
/// Returns the installation prefix of the Python interpreter as a string.
///
/// The prefix is dependent on the host operating system.
/// On macOS, depending on how Python is installed, it will return
/// a string resembling
/// `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix().unwrap());
/// ```
pub fn prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('prefix'))"])
}
/// Like [`prefix`](#method.prefix), but returns
/// the installation prefix as a `PathBuf`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix_path().unwrap().display());
/// ```
pub fn prefix_path(&self) -> PyResult<PathBuf> {
self.prefix().map(PathBuf::from)
}
/// Returns the executable path prefix for the Python interpreter as a string
///
/// The path is dependent on the host OS and the installation path
/// of the Python interpreter. On macOS, the string may resemble something
/// like `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
pub fn exec_prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('exec_prefix'))"])
}
/// Like [`exec_prefix`](#method.exec_prefix), but
/// returns the executable prefix as a `PathBuf`.
pub fn exec_prefix_path(&self) -> PyResult<PathBuf> {
self.exec_prefix().map(PathBuf::from)
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. This is a space-delimited
/// string of paths prefixed with `-I`.
///
/// The single string may resemble something lke the following
/// (on macOS)
///
/// ```text
/// -I/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/include/python3.7m
/// ```
///
/// Note that the same path may appear more than once.
pub fn includes(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
"print(' '.join(flags))",
])
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. Unlike [`includes`](#method.includes),
/// this is simply a collection of paths. Note that the same
/// path may appear more than once.
pub fn include_paths(&self) -> PyResult<Vec<PathBuf>> {
self.script(&[
"print(sysconfig.get_path('include'))",
"print(sysconfig.get_path('platinclude'))",
])
.map(|resp| resp.lines().map(PathBuf::from).collect())
}
/// All the flags useful for C compilation. This includes the include
|
/// an [`Error`](enum.Error.html).
pub type PyResult<T> = Result<T, Error>;
|
random_line_split
|
forward.go
|
type PortMappings struct {
// Name of the container - May be left empty in YAML config file
Name string `yaml:"name,omitempty"`
// Protocol should be "tcp" or "udp"
Protocol string `yaml:"protocol"`
// Ports is a mapping of host ports as keys to container ports as values
Ports map[string]int `yaml:",inline"`
}
// NewPortMappings initializes and returns an empty PortMappings struct
func NewPortMappings() PortMappings {
p := PortMappings{}
p.Ports = map[string]int{}
return p
}
// Config represents the Config File format that can be stored in YAML format
type Config struct {
Forwards map[string][]PortMappings `yaml:",inline"`
}
// NewConfig creates and returns initialized config
func NewConfig() Config {
c := Config{}
c.Forwards = map[string][]PortMappings{}
return c
}
// LoadYAMLConfig loads a YAML Port Forwarding config file and builds the appropriate config
func LoadYAMLConfig(path string) (config Config, err error) {
yml, err := ioutil.ReadFile(path)
if err != nil {
return config, err
}
err = yaml.Unmarshal(yml, &config)
return config, err
}
// Validate checks a config for correctness. Currently provides the following checks:
// * For each container, makes sure an equal number of Host and Container Ports are provided
// * Makes sure no Host port is used more than once.
func (c Config) Validate() (bool, error) {
// First do some sanity checks
hostPorts := map[string]interface{}{}
for container, portForwards := range c.Forwards {
for _, portForward := range portForwards {
// Make sure that port lists were actually provided
if len(portForward.Ports) == 0 {
return false, fmt.Errorf("No ports provided for container %s", container)
}
for hPort := range portForward.Ports {
_, err := strconv.Atoi(hPort)
if err != nil {
return false, fmt.Errorf("Invalid port %s provided for container %s", hPort, container)
}
// Can only forward a port from the host to one container, check to ensure no duplicate host ports
fullPort := portForward.Protocol + ":" + hPort
_, ok := hostPorts[fullPort]
if ok {
return false, fmt.Errorf("Port %s has already been mapped", fullPort)
}
hostPorts[fullPort] = nil
portForward.Name = container
}
}
}
return true, nil
}
// Forwarder represents a port forwarding client that can setup and teardown port forwarding for LXD containers
type Forwarder struct {
Config
*lxd.Client
}
const (
// ContainerStarted matches the text used in monitoring for a Container Starting up
ContainerStarted = "ContainerStart"
// ContainerStopped matches the text used in monitoring for a Container shutting down or being stopped
ContainerStopped = "ContainerStop"
// IPTable is the table that all IPTable rules should be added to
IPTable = "nat"
)
// NewForwarder validates the provided config then creates and returns port forward client
func NewForwarder(config Config) (*Forwarder, error) {
_, err := config.Validate()
if err != nil {
return nil, err
}
c := Forwarder{}
c.Client, err = lxd.NewClient(&lxd.DefaultConfig, "local")
if err != nil {
return nil, err
}
c.Config = config
return &c, nil
}
// Forward enables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Forward() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ForwardContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to forward ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// Reverse disables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Reverse() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ReverseContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to remove forwarding of ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// ForwardContainer turns on port forwarding for the provided container
// Uses iptables to place ipv4 and ipv6 port forwarding rules
func (f Forwarder) ForwardContainer(container string) error {
_, ok := f.Config.Forwards[container]
if !ok {
return fmt.Errorf("No port rules provided for %s", container)
}
state, err := f.ContainerState(container)
if err != nil {
return fmt.Errorf("unable to get container state for container %s: %s", container, err)
}
if state.StatusCode != shared.Running {
return fmt.Errorf("Container %s is not currently running", container)
}
// Get list of IP addresses on the container to forward to
ip4Addresses := []string{}
ip6Addresses := []string{}
for name, network := range state.Network {
if strings.Contains(name, "eth") || strings.Contains(name, "enp") {
// TODO: Can map interface in container to bridge being used, find standard way to find which interfaces on host bridge is tied to
for _, address := range network.Addresses {
switch address.Family {
case "inet":
ip4Addresses = append(ip4Addresses, address.Address)
case "inet6":
ip6Addresses = append(ip6Addresses, address.Address)
}
}
}
}
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
// Create a new custom chain for the IPTable rules for just this container
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
err = iptable.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = iptable.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
// Tell IPTables when to use our custom chain
err = iptable.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv4, Src)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv6, Src)...)
if err != nil {
return err
}
// Set up rules within the custom chain of the actual port forwardings
for _, portForwards := range f.Config.Forwards[container] {
protocol := portForwards.Protocol
for hostPort, containerPort := range portForwards.Ports {
for _, address := range ip4Addresses {
iptable.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Dst)...)
iptable.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Src)...)
}
for _, address := range ip6Addresses {
ip6table.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Dst)...)
ip6table.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Src)...)
}
}
}
return nil
}
// ReverseContainer removes port forwarding for the provided container
func (f Forwarder) ReverseContainer(container string) error {
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6
|
"gopkg.in/yaml.v2"
)
// PortMappings contains information for mapping ports from a host to a container
|
random_line_split
|
|
forward.go
|
each container, makes sure an equal number of Host and Container Ports are provided
// * Makes sure no Host port is used more than once.
func (c Config) Validate() (bool, error) {
// First do some sanity checks
hostPorts := map[string]interface{}{}
for container, portForwards := range c.Forwards {
for _, portForward := range portForwards {
// Make sure that port lists were actually provided
if len(portForward.Ports) == 0 {
return false, fmt.Errorf("No ports provided for container %s", container)
}
for hPort := range portForward.Ports {
_, err := strconv.Atoi(hPort)
if err != nil {
return false, fmt.Errorf("Invalid port %s provided for container %s", hPort, container)
}
// Can only forward a port from the host to one container, check to ensure no duplicate host ports
fullPort := portForward.Protocol + ":" + hPort
_, ok := hostPorts[fullPort]
if ok {
return false, fmt.Errorf("Port %s has already been mapped", fullPort)
}
hostPorts[fullPort] = nil
portForward.Name = container
}
}
}
return true, nil
}
// Forwarder represents a port forwarding client that can setup and teardown port forwarding for LXD containers
type Forwarder struct {
Config
*lxd.Client
}
const (
// ContainerStarted matches the text used in monitoring for a Container Starting up
ContainerStarted = "ContainerStart"
// ContainerStopped matches the text used in monitoring for a Container shutting down or being stopped
ContainerStopped = "ContainerStop"
// IPTable is the table that all IPTable rules should be added to
IPTable = "nat"
)
// NewForwarder validates the provided config then creates and returns port forward client
func NewForwarder(config Config) (*Forwarder, error) {
_, err := config.Validate()
if err != nil {
return nil, err
}
c := Forwarder{}
c.Client, err = lxd.NewClient(&lxd.DefaultConfig, "local")
if err != nil {
return nil, err
}
c.Config = config
return &c, nil
}
// Forward enables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Forward() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ForwardContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to forward ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// Reverse disables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Reverse() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ReverseContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to remove forwarding of ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// ForwardContainer turns on port forwarding for the provided container
// Uses iptables to place ipv4 and ipv6 port forwarding rules
func (f Forwarder) ForwardContainer(container string) error {
_, ok := f.Config.Forwards[container]
if !ok {
return fmt.Errorf("No port rules provided for %s", container)
}
state, err := f.ContainerState(container)
if err != nil {
return fmt.Errorf("unable to get container state for container %s: %s", container, err)
}
if state.StatusCode != shared.Running {
return fmt.Errorf("Container %s is not currently running", container)
}
// Get list of IP addresses on the container to forward to
ip4Addresses := []string{}
ip6Addresses := []string{}
for name, network := range state.Network {
if strings.Contains(name, "eth") || strings.Contains(name, "enp") {
// TODO: Can map interface in container to bridge being used, find standard way to find which interfaces on host bridge is tied to
for _, address := range network.Addresses {
switch address.Family {
case "inet":
ip4Addresses = append(ip4Addresses, address.Address)
case "inet6":
ip6Addresses = append(ip6Addresses, address.Address)
}
}
}
}
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
// Create a new custom chain for the IPTable rules for just this container
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
err = iptable.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = iptable.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
// Tell IPTables when to use our custom chain
err = iptable.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv4, Src)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv6, Src)...)
if err != nil {
return err
}
// Set up rules within the custom chain of the actual port forwardings
for _, portForwards := range f.Config.Forwards[container] {
protocol := portForwards.Protocol
for hostPort, containerPort := range portForwards.Ports {
for _, address := range ip4Addresses {
iptable.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Dst)...)
iptable.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Src)...)
}
for _, address := range ip6Addresses {
ip6table.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Dst)...)
ip6table.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Src)...)
}
}
}
return nil
}
// ReverseContainer removes port forwarding for the provided container
func (f Forwarder) ReverseContainer(container string) error {
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
iptable.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable, "POSTROUTING", getChainForwardRule(container, IPv4, Src)...)
ip6table.Delete(IPTable, "POSTROUTING", getChainForwardRule(container, IPv6, Src)...)
iptable.ClearChain(IPTable, customDstChain)
iptable.DeleteChain(IPTable, customDstChain)
iptable.ClearChain(IPTable, customSrcChain)
iptable.DeleteChain(IPTable, customSrcChain)
ip6table.ClearChain(IPTable, customDstChain)
ip6table.DeleteChain(IPTable, customDstChain)
ip6table.ClearChain(IPTable, customSrcChain)
ip6table.DeleteChain(IPTable, customSrcChain)
return nil
}
// Watch monitors LXD events and identifies when containers named in the config are stopped or started,
// and disables or enables port forwarding respecitvely
func (f Forwarder)
|
Watch
|
identifier_name
|
|
forward.go
|
.Errorf("No ports provided for container %s", container)
}
for hPort := range portForward.Ports {
_, err := strconv.Atoi(hPort)
if err != nil {
return false, fmt.Errorf("Invalid port %s provided for container %s", hPort, container)
}
// Can only forward a port from the host to one container, check to ensure no duplicate host ports
fullPort := portForward.Protocol + ":" + hPort
_, ok := hostPorts[fullPort]
if ok {
return false, fmt.Errorf("Port %s has already been mapped", fullPort)
}
hostPorts[fullPort] = nil
portForward.Name = container
}
}
}
return true, nil
}
// Forwarder represents a port forwarding client that can setup and teardown port forwarding for LXD containers
type Forwarder struct {
Config
*lxd.Client
}
const (
// ContainerStarted matches the text used in monitoring for a Container Starting up
ContainerStarted = "ContainerStart"
// ContainerStopped matches the text used in monitoring for a Container shutting down or being stopped
ContainerStopped = "ContainerStop"
// IPTable is the table that all IPTable rules should be added to
IPTable = "nat"
)
// NewForwarder validates the provided config then creates and returns port forward client
func NewForwarder(config Config) (*Forwarder, error) {
_, err := config.Validate()
if err != nil {
return nil, err
}
c := Forwarder{}
c.Client, err = lxd.NewClient(&lxd.DefaultConfig, "local")
if err != nil {
return nil, err
}
c.Config = config
return &c, nil
}
// Forward enables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Forward() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ForwardContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to forward ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// Reverse disables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Reverse() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ReverseContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to remove forwarding of ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// ForwardContainer turns on port forwarding for the provided container
// Uses iptables to place ipv4 and ipv6 port forwarding rules
func (f Forwarder) ForwardContainer(container string) error {
_, ok := f.Config.Forwards[container]
if !ok {
return fmt.Errorf("No port rules provided for %s", container)
}
state, err := f.ContainerState(container)
if err != nil {
return fmt.Errorf("unable to get container state for container %s: %s", container, err)
}
if state.StatusCode != shared.Running {
return fmt.Errorf("Container %s is not currently running", container)
}
// Get list of IP addresses on the container to forward to
ip4Addresses := []string{}
ip6Addresses := []string{}
for name, network := range state.Network {
if strings.Contains(name, "eth") || strings.Contains(name, "enp") {
// TODO: Can map interface in container to bridge being used, find standard way to find which interfaces on host bridge is tied to
for _, address := range network.Addresses {
switch address.Family {
case "inet":
ip4Addresses = append(ip4Addresses, address.Address)
case "inet6":
ip6Addresses = append(ip6Addresses, address.Address)
}
}
}
}
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
// Create a new custom chain for the IPTable rules for just this container
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
err = iptable.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = iptable.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
// Tell IPTables when to use our custom chain
err = iptable.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv4, Src)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv6, Src)...)
if err != nil {
return err
}
// Set up rules within the custom chain of the actual port forwardings
for _, portForwards := range f.Config.Forwards[container] {
protocol := portForwards.Protocol
for hostPort, containerPort := range portForwards.Ports {
for _, address := range ip4Addresses {
iptable.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Dst)...)
iptable.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Src)...)
}
for _, address := range ip6Addresses {
ip6table.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Dst)...)
ip6table.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Src)...)
}
}
}
return nil
}
// ReverseContainer removes port forwarding for the provided container
func (f Forwarder) ReverseContainer(container string) error {
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
iptable.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable, "POSTROUTING", getChainForwardRule(container, IPv4, Src)...)
ip6table.Delete(IPTable, "POSTROUTING", getChainForwardRule(container, IPv6, Src)...)
iptable.ClearChain(IPTable, customDstChain)
iptable.DeleteChain(IPTable, customDstChain)
iptable.ClearChain(IPTable, customSrcChain)
iptable.DeleteChain(IPTable, customSrcChain)
ip6table.ClearChain(IPTable, customDstChain)
ip6table.DeleteChain(IPTable, customDstChain)
ip6table.ClearChain(IPTable, customSrcChain)
ip6table.DeleteChain(IPTable, customSrcChain)
return nil
}
// Watch monitors LXD events and identifies when containers named in the config are stopped or started,
// and disables or enables port forwarding respecitvely
func (f Forwarder) Watch()
|
{
handler := func(i interface{}) {
var container string
var message string
var context map[string]interface{}
data := i.(map[string]interface{})
metadata := data["metadata"].(map[string]interface{})
tmp, ok := metadata["context"]
if ok {
context = tmp.(map[string]interface{})
}
tmp, ok = context["container"]
if ok {
container = tmp.(string)
}
_, ok = f.Forwards[container]
if ok {
|
identifier_body
|
|
forward.go
|
Mappings() PortMappings {
p := PortMappings{}
p.Ports = map[string]int{}
return p
}
// Config represents the Config File format that can be stored in YAML format
type Config struct {
Forwards map[string][]PortMappings `yaml:",inline"`
}
// NewConfig creates and returns initialized config
func NewConfig() Config {
c := Config{}
c.Forwards = map[string][]PortMappings{}
return c
}
// LoadYAMLConfig loads a YAML Port Forwarding config file and builds the appropriate config
func LoadYAMLConfig(path string) (config Config, err error) {
yml, err := ioutil.ReadFile(path)
if err != nil
|
err = yaml.Unmarshal(yml, &config)
return config, err
}
// Validate checks a config for correctness. Currently provides the following checks:
// * For each container, makes sure an equal number of Host and Container Ports are provided
// * Makes sure no Host port is used more than once.
func (c Config) Validate() (bool, error) {
// First do some sanity checks
hostPorts := map[string]interface{}{}
for container, portForwards := range c.Forwards {
for _, portForward := range portForwards {
// Make sure that port lists were actually provided
if len(portForward.Ports) == 0 {
return false, fmt.Errorf("No ports provided for container %s", container)
}
for hPort := range portForward.Ports {
_, err := strconv.Atoi(hPort)
if err != nil {
return false, fmt.Errorf("Invalid port %s provided for container %s", hPort, container)
}
// Can only forward a port from the host to one container, check to ensure no duplicate host ports
fullPort := portForward.Protocol + ":" + hPort
_, ok := hostPorts[fullPort]
if ok {
return false, fmt.Errorf("Port %s has already been mapped", fullPort)
}
hostPorts[fullPort] = nil
portForward.Name = container
}
}
}
return true, nil
}
// Forwarder represents a port forwarding client that can setup and teardown port forwarding for LXD containers
type Forwarder struct {
Config
*lxd.Client
}
const (
// ContainerStarted matches the text used in monitoring for a Container Starting up
ContainerStarted = "ContainerStart"
// ContainerStopped matches the text used in monitoring for a Container shutting down or being stopped
ContainerStopped = "ContainerStop"
// IPTable is the table that all IPTable rules should be added to
IPTable = "nat"
)
// NewForwarder validates the provided config then creates and returns port forward client
func NewForwarder(config Config) (*Forwarder, error) {
_, err := config.Validate()
if err != nil {
return nil, err
}
c := Forwarder{}
c.Client, err = lxd.NewClient(&lxd.DefaultConfig, "local")
if err != nil {
return nil, err
}
c.Config = config
return &c, nil
}
// Forward enables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Forward() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ForwardContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to forward ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// Reverse disables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Reverse() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ReverseContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to remove forwarding of ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// ForwardContainer turns on port forwarding for the provided container
// Uses iptables to place ipv4 and ipv6 port forwarding rules
func (f Forwarder) ForwardContainer(container string) error {
_, ok := f.Config.Forwards[container]
if !ok {
return fmt.Errorf("No port rules provided for %s", container)
}
state, err := f.ContainerState(container)
if err != nil {
return fmt.Errorf("unable to get container state for container %s: %s", container, err)
}
if state.StatusCode != shared.Running {
return fmt.Errorf("Container %s is not currently running", container)
}
// Get list of IP addresses on the container to forward to
ip4Addresses := []string{}
ip6Addresses := []string{}
for name, network := range state.Network {
if strings.Contains(name, "eth") || strings.Contains(name, "enp") {
// TODO: Can map interface in container to bridge being used, find standard way to find which interfaces on host bridge is tied to
for _, address := range network.Addresses {
switch address.Family {
case "inet":
ip4Addresses = append(ip4Addresses, address.Address)
case "inet6":
ip6Addresses = append(ip6Addresses, address.Address)
}
}
}
}
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
// Create a new custom chain for the IPTable rules for just this container
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
err = iptable.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = iptable.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
// Tell IPTables when to use our custom chain
err = iptable.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv4, Src)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv6, Src)...)
if err != nil {
return err
}
// Set up rules within the custom chain of the actual port forwardings
for _, portForwards := range f.Config.Forwards[container] {
protocol := portForwards.Protocol
for hostPort, containerPort := range portForwards.Ports {
for _, address := range ip4Addresses {
iptable.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Dst)...)
iptable.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Src)...)
}
for _, address := range ip6Addresses {
ip6table.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Dst)...)
ip6table.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Src)...)
}
}
}
return nil
}
// ReverseContainer removes port forwarding for the provided container
func (f Forwarder) ReverseContainer(container string) error {
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
iptable.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable
|
{
return config, err
}
|
conditional_block
|
login.py
|
return disable_cache(response)
return aiohttp.web.Response(
status=302,
headers={
"Location": HAKA_OIDC_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]),
oidc=str(setd["keystone_oidc_provider"]),
origin=str(setd["set_origin_address"]),
),
},
)
def test_token(
formdata: MultiDictProxy[typing.Union[str, bytes, aiohttp.web.FileField]],
request: aiohttp.web.Request,
) -> str:
"""Validate unscoped token."""
unscoped: typing.Union[str, None] = None
log = request.app["Log"]
if "token" in formdata:
unscoped = str(formdata["token"])
log.debug(
f"Got OS token in formdata from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from form
if "token" in request.query and unscoped is None:
unscoped = request.query["token"]
log.debug(
"Got OS token in query string "
f"from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from headers
if "X-Auth-Token" in request.headers and unscoped is None:
unscoped = request.headers["X-Auth-Token"]
log.debug(
"Got OS token in http header "
f"from address {request.remote} :: {time.ctime()}"
)
if unscoped is None:
raise aiohttp.web.HTTPBadRequest(reason="Token missing from query")
if not (re.match("[a-f0-9]{32}", unscoped) and len(unscoped) == 32):
try:
# Check the magic byte matches a fernet token
if not base64.urlsafe_b64decode(unscoped.encode("utf-8"))[:1] == b"\x80":
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
# Handle failures in base64decode
except (binascii.Error, UnicodeDecodeError):
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
log.info("Got OS token in login return")
return unscoped
async def credentials_login_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure with classic POST."""
log = request.app["Log"]
client = request.app["api_client"]
log.info("Got login request with username, password")
form = await request.post()
try:
username = str(form["username"])
password = str(form["password"])
except KeyError:
raise aiohttp.web.HTTPBadRequest(reason="Username or password not provided")
# Get an unscoped token with credentials
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"password",
],
"password": {
"user": {
"name": username,
"domain": {
"name": "Default",
},
"password": password,
},
},
},
"scope": "unscoped",
},
},
) as resp:
if resp.status == 400:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPBadRequest(reason="No username or password provided.")
if resp.status == 401:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized(
reason="Wrong username or password, or no access to the service."
)
if resp.status != 201:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized
unscoped = resp.headers["X-Subject-Token"]
log.debug("Got token in password auth")
return await login_with_token(request, unscoped)
async def sso_query_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure return from SSO or user from POST."""
formdata = await request.post()
# Declare the unscoped token
unscoped = test_token(formdata, request)
return await login_with_token(request, unscoped)
async def login_with_token(
request: aiohttp.web.Request,
token: str,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Log in a session with token."""
# Establish connection and begin user session
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
response = aiohttp.web.Response(
status=303,
body=None,
)
client = request.app["api_client"]
session = (
await aiohttp_session.get_session(request)
if setd["oidc_enabled"]
else await aiohttp_session.new_session(request)
)
session["at"] = time.time()
session["referer"] = request.url.host
uname = ""
taint = True if setd["force_restricted_mode"] else False
# Check token availability
avail = await get_availability_from_token(token, client)
csc_projects = session.get("csc-projects", None)
session["projects"] = {}
# Scope a token for all accessible projects
for project in avail["projects"]:
# Filter out projects without a declared access if the OIDC provider supports it
project_without_prefix = project["name"].removeprefix("project_")
if isinstance(csc_projects, list) and project_without_prefix not in csc_projects:
request.app["Log"].debug(
"Project %r is not enabled for sd-connect, skipping",
project["name"],
)
continue
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"token",
],
"token": {
"id": token,
},
},
"scope": {"project": {"id": project["id"]}},
}
},
) as resp:
if resp.status == 401:
raise aiohttp.web.HTTPUnauthorized(reason="Token is not valid")
if resp.status == 403:
raise aiohttp.web.HTTPForbidden(reason="No access to service with token.")
ret = await resp.json()
request.app["Log"].debug(f"token output: {ret}")
obj_role = False
request.app["Log"].debug(f'roles: {ret["token"]["roles"]}')
for role in ret["token"]["roles"]:
if role["name"] in str(setd["os_accepted_roles"]).split(";"):
obj_role = True
if not obj_role:
continue
scoped = resp.headers["X-Subject-Token"]
# Use the first available public endpoint
endpoint = [
list(filter(lambda i: i["interface"] == "public", i["endpoints"]))[0]
for i in filter(
lambda i: i["type"] == "object-store", ret["token"]["catalog"]
)
][0]
request.app["Log"].debug(endpoint)
if not uname:
uname = ret["token"]["user"]["name"]
session["projects"][project["id"]] = {
"id": project["id"],
"name": project["name"],
"endpoint": endpoint["url"],
"token": scoped,
"tainted": True if setd["force_restricted_mode"] else False,
}
session["token"] = token
session["uname"] = uname
# the intersection of sdConnectProjects and Allas projects is empty
# in practice this might happen if there are sd connect projects that
# don't have Allas enabled
if not session["projects"]:
request.app["Log"].debug("possible sdConnectProjects and Allas projects mismatch")
raise aiohttp.web.HTTPForbidden(
reason="There are no projects available for this user."
)
session["taint"] = True if taint else False
session.changed()
if taint:
response.headers["Location"] = "/select"
return response
# Redirect to the browse page
if "NAV_TO" in request.cookies.keys():
response.headers["Location"] = request.cookies["NAV_TO"]
response.del_cookie("NAV_TO")
else:
response.headers["Location"] = "/browse"
return response
async def handle_project_lock(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Lock down to a specific project."""
log = request.app["Log"]
log.info("Call for locking down the project.")
session = await aiohttp_session.get_session(request)
project = request.match_info["project"]
# Ditch all projects that aren't the one specified if project is defined
if project in session["projects"]:
session["projects"] = dict(
filter(
lambda val: val[0] == project,
session["projects"].items(),
)
)
# If the project doesn't exist, allow all untainted projects
else:
session["projects"] = dict(
filter(lambda val: not val[1]["tainted"], session["projects"].items())
)
if not session["projects"]:
session.invalidate()
raise aiohttp.web.HTTPForbidden(reason="No untainted projects available.")
|
random_line_split
|
||
login.py
|
["has_trust"]:
response = aiohttp.web.FileResponse(str(setd["static_directory"]) + "/login.html")
return disable_cache(response)
return aiohttp.web.Response(
status=302,
headers={
"Location": HAKA_OIDC_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]),
oidc=str(setd["keystone_oidc_provider"]),
origin=str(setd["set_origin_address"]),
),
},
)
def test_token(
formdata: MultiDictProxy[typing.Union[str, bytes, aiohttp.web.FileField]],
request: aiohttp.web.Request,
) -> str:
"""Validate unscoped token."""
unscoped: typing.Union[str, None] = None
log = request.app["Log"]
if "token" in formdata:
unscoped = str(formdata["token"])
log.debug(
f"Got OS token in formdata from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from form
if "token" in request.query and unscoped is None:
unscoped = request.query["token"]
log.debug(
"Got OS token in query string "
f"from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from headers
if "X-Auth-Token" in request.headers and unscoped is None:
unscoped = request.headers["X-Auth-Token"]
log.debug(
"Got OS token in http header "
f"from address {request.remote} :: {time.ctime()}"
)
if unscoped is None:
raise aiohttp.web.HTTPBadRequest(reason="Token missing from query")
if not (re.match("[a-f0-9]{32}", unscoped) and len(unscoped) == 32):
try:
# Check the magic byte matches a fernet token
if not base64.urlsafe_b64decode(unscoped.encode("utf-8"))[:1] == b"\x80":
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
# Handle failures in base64decode
except (binascii.Error, UnicodeDecodeError):
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
log.info("Got OS token in login return")
return unscoped
async def credentials_login_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure with classic POST."""
log = request.app["Log"]
client = request.app["api_client"]
log.info("Got login request with username, password")
form = await request.post()
try:
username = str(form["username"])
password = str(form["password"])
except KeyError:
raise aiohttp.web.HTTPBadRequest(reason="Username or password not provided")
# Get an unscoped token with credentials
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"password",
],
"password": {
"user": {
"name": username,
"domain": {
"name": "Default",
},
"password": password,
},
},
},
"scope": "unscoped",
},
},
) as resp:
if resp.status == 400:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPBadRequest(reason="No username or password provided.")
if resp.status == 401:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized(
reason="Wrong username or password, or no access to the service."
)
if resp.status != 201:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized
unscoped = resp.headers["X-Subject-Token"]
log.debug("Got token in password auth")
return await login_with_token(request, unscoped)
async def sso_query_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure return from SSO or user from POST."""
formdata = await request.post()
# Declare the unscoped token
unscoped = test_token(formdata, request)
return await login_with_token(request, unscoped)
async def login_with_token(
request: aiohttp.web.Request,
token: str,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Log in a session with token."""
# Establish connection and begin user session
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
response = aiohttp.web.Response(
status=303,
body=None,
)
client = request.app["api_client"]
session = (
await aiohttp_session.get_session(request)
if setd["oidc_enabled"]
else await aiohttp_session.new_session(request)
)
session["at"] = time.time()
session["referer"] = request.url.host
uname = ""
taint = True if setd["force_restricted_mode"] else False
# Check token availability
avail = await get_availability_from_token(token, client)
csc_projects = session.get("csc-projects", None)
session["projects"] = {}
# Scope a token for all accessible projects
for project in avail["projects"]:
# Filter out projects without a declared access if the OIDC provider supports it
project_without_prefix = project["name"].removeprefix("project_")
if isinstance(csc_projects, list) and project_without_prefix not in csc_projects:
request.app["Log"].debug(
"Project %r is not enabled for sd-connect, skipping",
project["name"],
)
continue
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"token",
],
"token": {
"id": token,
},
},
"scope": {"project": {"id": project["id"]}},
}
},
) as resp:
if resp.status == 401:
raise aiohttp.web.HTTPUnauthorized(reason="Token is not valid")
if resp.status == 403:
raise aiohttp.web.HTTPForbidden(reason="No access to service with token.")
ret = await resp.json()
request.app["Log"].debug(f"token output: {ret}")
obj_role = False
request.app["Log"].debug(f'roles: {ret["token"]["roles"]}')
for role in ret["token"]["roles"]:
if role["name"] in str(setd["os_accepted_roles"]).split(";"):
obj_role = True
if not obj_role:
continue
scoped = resp.headers["X-Subject-Token"]
# Use the first available public endpoint
endpoint = [
list(filter(lambda i: i["interface"] == "public", i["endpoints"]))[0]
for i in filter(
lambda i: i["type"] == "object-store", ret["token"]["catalog"]
)
][0]
request.app["Log"].debug(endpoint)
if not uname:
uname = ret["token"]["user"]["name"]
session["projects"][project["id"]] = {
"id": project["id"],
"name": project["name"],
"endpoint": endpoint["url"],
"token": scoped,
"tainted": True if setd["force_restricted_mode"] else False,
}
session["token"] = token
session["uname"] = uname
# the intersection of sdConnectProjects and Allas projects is empty
# in practice this might happen if there are sd connect projects that
# don't have Allas enabled
if not session["projects"]:
request.app["Log"].debug("possible sdConnectProjects and Allas projects mismatch")
raise aiohttp.web.HTTPForbidden(
reason="There are no projects available for this user."
)
session["taint"] = True if taint else False
session.changed()
if taint:
response.headers["Location"] = "/select"
return response
# Redirect to the browse page
if "NAV_TO" in request.cookies.keys():
response.headers["Location"] = request.cookies["NAV_TO"]
response.del_cookie("NAV_TO")
else:
response.headers["Location"] = "/browse"
return response
async def handle_project_lock(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Lock down to a specific project."""
log = request.app["Log"]
log.info("Call for locking down the project.")
session = await aiohttp_session.get_session(request)
project = request.match_info["project"]
# Ditch all projects that aren't the one specified if project is defined
if project in session["projects"]:
session["projects"] = dict(
filter(
lambda val: val[0] == project,
session["projects"].items(),
)
)
# If the project doesn't exist, allow all untainted projects
else:
|
session["projects"] = dict(
filter(lambda val: not val[1]["tainted"], session["projects"].items())
)
|
conditional_block
|
|
login.py
|
} :: {time.ctime()}"
)
# Try getting the token id from headers
if "X-Auth-Token" in request.headers and unscoped is None:
unscoped = request.headers["X-Auth-Token"]
log.debug(
"Got OS token in http header "
f"from address {request.remote} :: {time.ctime()}"
)
if unscoped is None:
raise aiohttp.web.HTTPBadRequest(reason="Token missing from query")
if not (re.match("[a-f0-9]{32}", unscoped) and len(unscoped) == 32):
try:
# Check the magic byte matches a fernet token
if not base64.urlsafe_b64decode(unscoped.encode("utf-8"))[:1] == b"\x80":
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
# Handle failures in base64decode
except (binascii.Error, UnicodeDecodeError):
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
log.info("Got OS token in login return")
return unscoped
async def credentials_login_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure with classic POST."""
log = request.app["Log"]
client = request.app["api_client"]
log.info("Got login request with username, password")
form = await request.post()
try:
username = str(form["username"])
password = str(form["password"])
except KeyError:
raise aiohttp.web.HTTPBadRequest(reason="Username or password not provided")
# Get an unscoped token with credentials
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"password",
],
"password": {
"user": {
"name": username,
"domain": {
"name": "Default",
},
"password": password,
},
},
},
"scope": "unscoped",
},
},
) as resp:
if resp.status == 400:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPBadRequest(reason="No username or password provided.")
if resp.status == 401:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized(
reason="Wrong username or password, or no access to the service."
)
if resp.status != 201:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized
unscoped = resp.headers["X-Subject-Token"]
log.debug("Got token in password auth")
return await login_with_token(request, unscoped)
async def sso_query_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure return from SSO or user from POST."""
formdata = await request.post()
# Declare the unscoped token
unscoped = test_token(formdata, request)
return await login_with_token(request, unscoped)
async def login_with_token(
request: aiohttp.web.Request,
token: str,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Log in a session with token."""
# Establish connection and begin user session
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
response = aiohttp.web.Response(
status=303,
body=None,
)
client = request.app["api_client"]
session = (
await aiohttp_session.get_session(request)
if setd["oidc_enabled"]
else await aiohttp_session.new_session(request)
)
session["at"] = time.time()
session["referer"] = request.url.host
uname = ""
taint = True if setd["force_restricted_mode"] else False
# Check token availability
avail = await get_availability_from_token(token, client)
csc_projects = session.get("csc-projects", None)
session["projects"] = {}
# Scope a token for all accessible projects
for project in avail["projects"]:
# Filter out projects without a declared access if the OIDC provider supports it
project_without_prefix = project["name"].removeprefix("project_")
if isinstance(csc_projects, list) and project_without_prefix not in csc_projects:
request.app["Log"].debug(
"Project %r is not enabled for sd-connect, skipping",
project["name"],
)
continue
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"token",
],
"token": {
"id": token,
},
},
"scope": {"project": {"id": project["id"]}},
}
},
) as resp:
if resp.status == 401:
raise aiohttp.web.HTTPUnauthorized(reason="Token is not valid")
if resp.status == 403:
raise aiohttp.web.HTTPForbidden(reason="No access to service with token.")
ret = await resp.json()
request.app["Log"].debug(f"token output: {ret}")
obj_role = False
request.app["Log"].debug(f'roles: {ret["token"]["roles"]}')
for role in ret["token"]["roles"]:
if role["name"] in str(setd["os_accepted_roles"]).split(";"):
obj_role = True
if not obj_role:
continue
scoped = resp.headers["X-Subject-Token"]
# Use the first available public endpoint
endpoint = [
list(filter(lambda i: i["interface"] == "public", i["endpoints"]))[0]
for i in filter(
lambda i: i["type"] == "object-store", ret["token"]["catalog"]
)
][0]
request.app["Log"].debug(endpoint)
if not uname:
uname = ret["token"]["user"]["name"]
session["projects"][project["id"]] = {
"id": project["id"],
"name": project["name"],
"endpoint": endpoint["url"],
"token": scoped,
"tainted": True if setd["force_restricted_mode"] else False,
}
session["token"] = token
session["uname"] = uname
# the intersection of sdConnectProjects and Allas projects is empty
# in practice this might happen if there are sd connect projects that
# don't have Allas enabled
if not session["projects"]:
request.app["Log"].debug("possible sdConnectProjects and Allas projects mismatch")
raise aiohttp.web.HTTPForbidden(
reason="There are no projects available for this user."
)
session["taint"] = True if taint else False
session.changed()
if taint:
response.headers["Location"] = "/select"
return response
# Redirect to the browse page
if "NAV_TO" in request.cookies.keys():
response.headers["Location"] = request.cookies["NAV_TO"]
response.del_cookie("NAV_TO")
else:
response.headers["Location"] = "/browse"
return response
async def handle_project_lock(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Lock down to a specific project."""
log = request.app["Log"]
log.info("Call for locking down the project.")
session = await aiohttp_session.get_session(request)
project = request.match_info["project"]
# Ditch all projects that aren't the one specified if project is defined
if project in session["projects"]:
session["projects"] = dict(
filter(
lambda val: val[0] == project,
session["projects"].items(),
)
)
# If the project doesn't exist, allow all untainted projects
else:
session["projects"] = dict(
filter(lambda val: not val[1]["tainted"], session["projects"].items())
)
if not session["projects"]:
session.invalidate()
raise aiohttp.web.HTTPForbidden(reason="No untainted projects available.")
# The session is no longer tainted if it's been locked
session["taint"] = False
session.changed()
return aiohttp.web.Response(
status=303,
body=None,
headers={
"Location": "/browse",
},
)
async def handle_logout(request: aiohttp.web.Request) -> aiohttp.web.Response:
|
"""Properly kill the session for the user."""
log = request.app["Log"]
client = request.app["api_client"]
if not setd["set_session_devmode"]:
try:
session = await aiohttp_session.get_session(request)
log.info(f"Killing session {session.identity}")
for project in session["projects"]:
async with client.delete(
f"{setd['auth_endpoint_url']}/auth/tokens",
headers={
"X-Auth-Token": session["token"],
"X-Subject-Token": session["projects"][project]["token"],
},
):
pass
session.invalidate()
except aiohttp.web.HTTPUnauthorized:
log.info("Trying to log our an invalidated session")
raise aiohttp.web.HTTPUnauthorized
|
identifier_body
|
|
login.py
|
)
except KeyError as e:
request.app["Log"].error(f"Issuer {oidc_session['iss']} not found: {e}.")
raise aiohttp.web.HTTPBadRequest(reason="Token issuer not found.")
except OidcServiceError as e:
# This exception is raised if RPHandler encounters an error due to:
# 1. "code" is wrong, so token request failed
# 2. token validation failed
# 3. userinfo request failed
request.app["Log"].error(f"OIDC Callback failed with: {e}")
raise aiohttp.web.HTTPBadRequest(reason="Invalid OIDC callback.")
session = await aiohttp_session.new_session(request)
session["at"] = time.time()
session["referer"] = request.url.host
session["oidc"] = {
"userinfo": oidc_result["userinfo"].to_dict(),
"state": oidc_result["state"],
"access_token": oidc_result["token"],
}
csc_projects: typing.List[typing.Any] | None = _get_projects_from_userinfo(
session["oidc"]["userinfo"]
)
# add entry to session only if the OIDC provider has csc-projects in userinfo
if csc_projects is not None:
session["csc-projects"] = csc_projects
request.app["Log"].debug(session["oidc"])
response = aiohttp.web.Response(
status=302, headers={"Location": "/login"}, reason="Redirection to login"
)
if session["oidc"]["userinfo"].get("homeFederation", "") == "Haka":
response.headers["Location"] = HAKA_OIDC_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]),
oidc=str(setd["keystone_oidc_provider"]),
origin=str(setd["set_origin_address"]),
)
return response
async def
|
(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Create new session cookie for the user."""
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
response = aiohttp.web.Response(status=302, reason="Redirection to login")
# Add a cookie for navigating
if "navto" in request.query.keys():
response.set_cookie("NAV_TO", request.query["navto"], expires=str(3600))
if setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" in session:
response = aiohttp.web.FileResponse(
str(setd["static_directory"]) + "/login2step.html"
)
else:
response.headers["Location"] = "/"
else:
response.headers["Location"] = "/login/front"
return response
async def sso_query_begin(
request: typing.Union[aiohttp.web.Request, None]
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Display login page and initiate federated keystone authentication."""
# Return the form based login page if the service isn't trusted
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
if request and setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" not in session:
return aiohttp.web.Response(status=302, headers={"Location": "/"})
if not setd["has_trust"]:
response = aiohttp.web.FileResponse(str(setd["static_directory"]) + "/login.html")
return disable_cache(response)
response = aiohttp.web.Response(
status=302,
)
response.headers["Location"] = HAKA_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]), origin=str(setd["set_origin_address"])
)
return response
async def sso_query_begin_oidc(
request: typing.Union[aiohttp.web.Request, None]
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Initiate a federated Keystone authentication with OIDC."""
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
if request and setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" not in session:
return aiohttp.web.Response(status=302, headers={"Location": "/"})
if not setd["has_trust"]:
response = aiohttp.web.FileResponse(str(setd["static_directory"]) + "/login.html")
return disable_cache(response)
return aiohttp.web.Response(
status=302,
headers={
"Location": HAKA_OIDC_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]),
oidc=str(setd["keystone_oidc_provider"]),
origin=str(setd["set_origin_address"]),
),
},
)
def test_token(
formdata: MultiDictProxy[typing.Union[str, bytes, aiohttp.web.FileField]],
request: aiohttp.web.Request,
) -> str:
"""Validate unscoped token."""
unscoped: typing.Union[str, None] = None
log = request.app["Log"]
if "token" in formdata:
unscoped = str(formdata["token"])
log.debug(
f"Got OS token in formdata from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from form
if "token" in request.query and unscoped is None:
unscoped = request.query["token"]
log.debug(
"Got OS token in query string "
f"from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from headers
if "X-Auth-Token" in request.headers and unscoped is None:
unscoped = request.headers["X-Auth-Token"]
log.debug(
"Got OS token in http header "
f"from address {request.remote} :: {time.ctime()}"
)
if unscoped is None:
raise aiohttp.web.HTTPBadRequest(reason="Token missing from query")
if not (re.match("[a-f0-9]{32}", unscoped) and len(unscoped) == 32):
try:
# Check the magic byte matches a fernet token
if not base64.urlsafe_b64decode(unscoped.encode("utf-8"))[:1] == b"\x80":
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
# Handle failures in base64decode
except (binascii.Error, UnicodeDecodeError):
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
log.info("Got OS token in login return")
return unscoped
async def credentials_login_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure with classic POST."""
log = request.app["Log"]
client = request.app["api_client"]
log.info("Got login request with username, password")
form = await request.post()
try:
username = str(form["username"])
password = str(form["password"])
except KeyError:
raise aiohttp.web.HTTPBadRequest(reason="Username or password not provided")
# Get an unscoped token with credentials
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"password",
],
"password": {
"user": {
"name": username,
"domain": {
"name": "Default",
},
"password": password,
},
},
},
"scope": "unscoped",
},
},
) as resp:
if resp.status == 400:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPBadRequest(reason="No username or password provided.")
if resp.status == 401:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized(
reason="Wrong username or password, or no access to the service."
)
if resp.status != 201:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized
unscoped = resp.headers["X-Subject-Token"]
log.debug("Got token in password auth")
return await login_with_token(request, unscoped)
async def sso_query_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure return from SSO or user from POST."""
formdata = await request.post()
# Declare the unscoped token
unscoped = test_token(formdata, request)
return await login_with_token(request, unscoped)
async def login_with_token(
request: aiohttp.web.Request,
token: str,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Log in a session with token."""
# Establish connection and begin user session
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
response = aiohttp.web.Response(
status=303,
body=None,
)
client = request.app["api_client"]
session = (
await aiohttp_session.get_session(request)
|
handle_login
|
identifier_name
|
docker.go
|
(g *g) dockerConnect() (*docker, error) {
g.log.Info("connecting to docker engine")
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return nil, errors.Join(ErrEnvClient, err)
}
g.log.Info("connected to docker engine")
return &docker{client: cli, log: g.log}, nil
}
func (d *docker) isExistingLocalImage(ctx context.Context, image string) (bool, error) {
images, err := d.client.ImageList(ctx, types.ImageListOptions{All: true})
if err != nil {
return false, fmt.Errorf("can't list image: %w", err)
}
for _, img := range images {
for _, repoTag := range img.RepoTags {
if image == repoTag {
return true, nil
}
if !strings.Contains(repoTag, "/") {
repoTag = "library/" + repoTag
}
if strings.HasSuffix(image, repoTag) {
return true, nil
}
}
}
return false, nil
}
func (d *docker) pullImage(ctx context.Context, image string, cfg *Options) error {
d.log.Info("pulling image")
reader, err := d.client.ImagePull(ctx, image, types.ImagePullOptions{
RegistryAuth: cfg.Auth,
})
if err != nil {
return fmt.Errorf("can't pull image: %w", err)
}
defer func() {
closeErr := reader.Close()
if err == nil {
err = closeErr
}
}()
_, err = io.ReadAll(reader)
if err != nil {
return fmt.Errorf("can't read server output: %w", err)
}
d.log.Info("image pulled")
return nil
}
func (d *docker) startContainer(ctx context.Context, image string, ports NamedPorts, cfg *Options) (*Container, error) {
if cfg.Reuse {
container, ok, err := d.findReusableContainer(ctx, image, ports, cfg)
if err != nil {
return nil, err
}
if ok {
d.log.Info("re-using container")
return container, nil
}
}
d.log.Info("starting container")
resp, err := d.prepareContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't prepare container: %w", err)
}
sidecarChan := d.setupContainerCleanup(resp.ID, cfg)
err = d.client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
if err != nil {
return nil, fmt.Errorf("can't start container %s: %w", resp.ID, err)
}
container, err := d.waitForContainerNetwork(ctx, resp.ID, ports)
if err != nil {
return nil, fmt.Errorf("container network isn't ready: %w", err)
}
if sidecar, ok := <-sidecarChan; ok {
container.ID = generateID(container.ID, sidecar)
}
d.log.Infow("container started", "container", container)
return container, nil
}
func (d *docker) setupContainerCleanup(id string, cfg *Options) chan string {
sidecarChan := make(chan string)
go func() {
defer close(sidecarChan)
if cfg.DisableAutoCleanup || cfg.Reuse || cfg.Debug {
return
}
opts := []Option{
WithDisableAutoCleanup(),
WithHostMounts(dockerSockAddr, dockerSockAddr),
WithHealthCheck(func(ctx context.Context, c *Container) error {
return health.HTTPGet(ctx, c.DefaultAddress())
}),
WithInit(func(ctx context.Context, c *Container) error {
return cleaner.Notify(context.Background(), c.DefaultAddress(), id)
}),
}
if cfg.UseLocalImagesFirst {
opts = append(opts, WithUseLocalImagesFirst())
}
if sc, err := StartCustom(
cleaner.Image, DefaultTCP(cleaner.Port),
opts...,
); err == nil {
sidecarChan <- sc.ID
}
}()
return sidecarChan
}
func (d *docker) prepareContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
pullImage := true
if cfg.UseLocalImagesFirst {
isExisting, err := d.isExistingLocalImage(ctx, image)
if err != nil {
return nil, fmt.Errorf("can't list image: %w", err)
}
if isExisting {
pullImage = false
}
}
if pullImage {
if err := d.pullImage(ctx, image, cfg); err != nil {
return nil, fmt.Errorf("can't pull image: %w", err)
}
}
|
}
return resp, err
}
func (d *docker) waitForContainerNetwork(ctx context.Context, id string, ports NamedPorts) (*Container, error) {
d.log.Infow("waiting for container network", "container", id)
tick := time.NewTicker(time.Millisecond * 250)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return nil, fmt.Errorf("container network is unavailable after timeout")
case <-tick.C:
containerJSON, err := d.client.ContainerInspect(ctx, id)
if err != nil {
return nil, fmt.Errorf("can't inspect container %s: %w", id, err)
}
boundNamedPorts, err := d.boundNamedPorts(containerJSON, ports)
if err != nil {
return nil, fmt.Errorf("can't find bound ports: %w", err)
}
d.log.Infow("waiting for port allocation", "container", id)
if len(boundNamedPorts) == len(ports) {
return &Container{
ID: id,
Host: d.hostAddr(),
Ports: boundNamedPorts,
gateway: containerJSON.NetworkSettings.Gateway,
}, nil
}
}
}
}
func (d *docker) exposedPorts(namedPorts NamedPorts) nat.PortSet {
exposedPorts := make(nat.PortSet)
for _, port := range namedPorts {
containerPort := fmt.Sprintf("%d/%s", port.Port, port.Protocol)
exposedPorts[nat.Port(containerPort)] = struct{}{}
}
return exposedPorts
}
func (d *docker) portBindings(exposedPorts nat.PortSet, ports NamedPorts) nat.PortMap {
portBindings := make(nat.PortMap)
// for the container to be accessible from another container, it cannot
// listen on 127.0.0.1 as it will be accessed by gateway address (e.g
// 172.17.0.1), so its port should be exposed everywhere
hostAddr := d.hostAddr()
if isInDocker() {
hostAddr = "0.0.0.0"
}
for port := range exposedPorts {
binding := nat.PortBinding{
HostIP: hostAddr,
}
if pName, err := ports.Find(port.Proto(), port.Int()); err == nil {
namedPort := ports.Get(pName)
if namedPort.HostPort > 0 {
binding.HostPort = strconv.Itoa(namedPort.HostPort)
}
}
portBindings[port] = []nat.PortBinding{binding}
}
return portBindings
}
func (d *docker) createContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
exposedPorts := d.exposedPorts(ports)
containerConfig := &container.Config{
Image: image,
ExposedPorts: exposedPorts,
Env: cfg.Env,
}
if len(cfg.Cmd) > 0 {
containerConfig.Cmd = cfg.Cmd
}
if len(cfg.Entrypoint) > 0 {
containerConfig.Entrypoint = cfg.Entrypoint
}
mounts := []mount.Mount{}
for src, dst := range cfg.HostMounts {
mounts = append(mounts, mount.Mount{
Type: mount.TypeBind,
Source: src,
Target: dst,
})
}
portBindings := d.portBindings(exposedPorts, ports)
hostConfig := &container.HostConfig{
PortBindings: portBindings,
AutoRemove: !cfg.Debug,
Privileged: cfg.Privileged,
Mounts: mounts,
ExtraHosts: cfg.ExtraHosts,
}
resp, err := d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, cfg.ContainerName)
if err == nil {
return &resp, nil
}
matches := duplicateContainerRegexp.FindStringSubmatch(err.Error())
if len(matches) == 2 {
d.log.Infow("duplicate container found, stopping", "container", matches[1])
err = d.client.ContainerRemove(ctx, matches[1], types.ContainerRemoveOptions{
Force: true,
})
if err != nil {
return nil, fmt.Errorf("can't remove existing container: %w", err)
}
|
resp, err := d.createContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't create container: %w", err)
|
random_line_split
|
docker.go
|
); err == nil {
sidecarChan <- sc.ID
}
}()
return sidecarChan
}
func (d *docker) prepareContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
pullImage := true
if cfg.UseLocalImagesFirst {
isExisting, err := d.isExistingLocalImage(ctx, image)
if err != nil {
return nil, fmt.Errorf("can't list image: %w", err)
}
if isExisting {
pullImage = false
}
}
if pullImage {
if err := d.pullImage(ctx, image, cfg); err != nil {
return nil, fmt.Errorf("can't pull image: %w", err)
}
}
resp, err := d.createContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't create container: %w", err)
}
return resp, err
}
func (d *docker) waitForContainerNetwork(ctx context.Context, id string, ports NamedPorts) (*Container, error) {
d.log.Infow("waiting for container network", "container", id)
tick := time.NewTicker(time.Millisecond * 250)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return nil, fmt.Errorf("container network is unavailable after timeout")
case <-tick.C:
containerJSON, err := d.client.ContainerInspect(ctx, id)
if err != nil {
return nil, fmt.Errorf("can't inspect container %s: %w", id, err)
}
boundNamedPorts, err := d.boundNamedPorts(containerJSON, ports)
if err != nil {
return nil, fmt.Errorf("can't find bound ports: %w", err)
}
d.log.Infow("waiting for port allocation", "container", id)
if len(boundNamedPorts) == len(ports) {
return &Container{
ID: id,
Host: d.hostAddr(),
Ports: boundNamedPorts,
gateway: containerJSON.NetworkSettings.Gateway,
}, nil
}
}
}
}
func (d *docker) exposedPorts(namedPorts NamedPorts) nat.PortSet {
exposedPorts := make(nat.PortSet)
for _, port := range namedPorts {
containerPort := fmt.Sprintf("%d/%s", port.Port, port.Protocol)
exposedPorts[nat.Port(containerPort)] = struct{}{}
}
return exposedPorts
}
func (d *docker) portBindings(exposedPorts nat.PortSet, ports NamedPorts) nat.PortMap {
portBindings := make(nat.PortMap)
// for the container to be accessible from another container, it cannot
// listen on 127.0.0.1 as it will be accessed by gateway address (e.g
// 172.17.0.1), so its port should be exposed everywhere
hostAddr := d.hostAddr()
if isInDocker() {
hostAddr = "0.0.0.0"
}
for port := range exposedPorts {
binding := nat.PortBinding{
HostIP: hostAddr,
}
if pName, err := ports.Find(port.Proto(), port.Int()); err == nil {
namedPort := ports.Get(pName)
if namedPort.HostPort > 0 {
binding.HostPort = strconv.Itoa(namedPort.HostPort)
}
}
portBindings[port] = []nat.PortBinding{binding}
}
return portBindings
}
func (d *docker) createContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
exposedPorts := d.exposedPorts(ports)
containerConfig := &container.Config{
Image: image,
ExposedPorts: exposedPorts,
Env: cfg.Env,
}
if len(cfg.Cmd) > 0 {
containerConfig.Cmd = cfg.Cmd
}
if len(cfg.Entrypoint) > 0 {
containerConfig.Entrypoint = cfg.Entrypoint
}
mounts := []mount.Mount{}
for src, dst := range cfg.HostMounts {
mounts = append(mounts, mount.Mount{
Type: mount.TypeBind,
Source: src,
Target: dst,
})
}
portBindings := d.portBindings(exposedPorts, ports)
hostConfig := &container.HostConfig{
PortBindings: portBindings,
AutoRemove: !cfg.Debug,
Privileged: cfg.Privileged,
Mounts: mounts,
ExtraHosts: cfg.ExtraHosts,
}
resp, err := d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, cfg.ContainerName)
if err == nil {
return &resp, nil
}
matches := duplicateContainerRegexp.FindStringSubmatch(err.Error())
if len(matches) == 2 {
d.log.Infow("duplicate container found, stopping", "container", matches[1])
err = d.client.ContainerRemove(ctx, matches[1], types.ContainerRemoveOptions{
Force: true,
})
if err != nil {
return nil, fmt.Errorf("can't remove existing container: %w", err)
}
resp, err = d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, cfg.ContainerName)
}
return &resp, err
}
func (d *docker) findReusableContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*Container, bool, error) {
if cfg.ContainerName == "" {
return nil, false, fmt.Errorf("container name is required when container reuse is enabled")
}
list, err := d.client.ContainerList(ctx, types.ContainerListOptions{
Filters: filters.NewArgs(
filters.Arg("name", cfg.ContainerName),
filters.Arg("ancestor", image),
filters.Arg("status", "running"),
),
})
if err != nil || len(list) < 1 {
return nil, false, err
}
container, err := d.waitForContainerNetwork(ctx, list[0].ID, ports)
if err != nil {
return nil, false, err
}
return container, true, nil
}
func (d *docker) boundNamedPorts(json types.ContainerJSON, namedPorts NamedPorts) (NamedPorts, error) {
boundNamedPorts := make(NamedPorts)
for containerPort, bindings := range json.NetworkSettings.Ports {
if len(bindings) == 0 {
continue
}
hostPortNum, err := strconv.Atoi(bindings[0].HostPort)
if err != nil {
return nil, fmt.Errorf("invalid host port value '%s': %w", bindings[0].HostPort, err)
}
proto, intPort := containerPort.Proto(), containerPort.Int()
portName, err := namedPorts.Find(proto, intPort)
if err != nil {
return nil, fmt.Errorf("can't find port %s/%d: %w", proto, intPort, err)
}
boundNamedPorts[portName] = Port{
Protocol: proto,
Port: hostPortNum,
}
}
return boundNamedPorts, nil
}
func (d *docker) readLogs(ctx context.Context, id string) (io.ReadCloser, error) {
d.log.Info("starting container logs forwarder")
logsOptions := types.ContainerLogsOptions{
ShowStderr: true, ShowStdout: true, Follow: true,
}
rc, err := d.client.ContainerLogs(ctx, id, logsOptions)
if err != nil {
return nil, fmt.Errorf("can't read logs: %w", err)
}
d.log.Info("container logs forwarder ready")
return rc, nil
}
func (d *docker) stopContainer(ctx context.Context, id string) error {
d.lock.Lock()
defer d.lock.Unlock()
stopTimeout := defaultStopTimeoutSec
err := d.client.ContainerStop(ctx, id, container.StopOptions{
Timeout: &stopTimeout,
})
if err != nil && !client.IsErrNotFound(err) {
return fmt.Errorf("can't stop container %s: %w", id, err)
}
return nil
}
func (d *docker) removeContainer(ctx context.Context, id string) error {
d.lock.Lock()
defer d.lock.Unlock()
err := d.client.ContainerRemove(ctx, id, types.ContainerRemoveOptions{Force: true})
if err != nil && !client.IsErrNotFound(err) && !isDeletionAlreadyInProgessError(err, id) {
return fmt.Errorf("can't remove container %s: %w", id, err)
}
return nil
}
// hostAddr returns an address of a host that runs the containers. If
// DOCKER_HOST environment variable is not set, if its value is an invalid URL,
// or if it is a `unix:///` socket address, it returns local address.
func (d *docker) hostAddr() string {
if dh := os.Getenv("DOCKER_HOST"); dh != "" {
u, err := url.Parse(dh)
if err == nil {
if host := u.Hostname(); host != "" {
return host
}
}
}
return localhostAddr
}
func
|
isDeletionAlreadyInProgessError
|
identifier_name
|
|
docker.go
|
(g *g) dockerConnect() (*docker, error) {
g.log.Info("connecting to docker engine")
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return nil, errors.Join(ErrEnvClient, err)
}
g.log.Info("connected to docker engine")
return &docker{client: cli, log: g.log}, nil
}
func (d *docker) isExistingLocalImage(ctx context.Context, image string) (bool, error) {
images, err := d.client.ImageList(ctx, types.ImageListOptions{All: true})
if err != nil {
return false, fmt.Errorf("can't list image: %w", err)
}
for _, img := range images {
for _, repoTag := range img.RepoTags {
if image == repoTag {
return true, nil
}
if !strings.Contains(repoTag, "/") {
repoTag = "library/" + repoTag
}
if strings.HasSuffix(image, repoTag) {
return true, nil
}
}
}
return false, nil
}
func (d *docker) pullImage(ctx context.Context, image string, cfg *Options) error {
d.log.Info("pulling image")
reader, err := d.client.ImagePull(ctx, image, types.ImagePullOptions{
RegistryAuth: cfg.Auth,
})
if err != nil {
return fmt.Errorf("can't pull image: %w", err)
}
defer func() {
closeErr := reader.Close()
if err == nil {
err = closeErr
}
}()
_, err = io.ReadAll(reader)
if err != nil {
return fmt.Errorf("can't read server output: %w", err)
}
d.log.Info("image pulled")
return nil
}
func (d *docker) startContainer(ctx context.Context, image string, ports NamedPorts, cfg *Options) (*Container, error)
|
sidecarChan := d.setupContainerCleanup(resp.ID, cfg)
err = d.client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
if err != nil {
return nil, fmt.Errorf("can't start container %s: %w", resp.ID, err)
}
container, err := d.waitForContainerNetwork(ctx, resp.ID, ports)
if err != nil {
return nil, fmt.Errorf("container network isn't ready: %w", err)
}
if sidecar, ok := <-sidecarChan; ok {
container.ID = generateID(container.ID, sidecar)
}
d.log.Infow("container started", "container", container)
return container, nil
}
func (d *docker) setupContainerCleanup(id string, cfg *Options) chan string {
sidecarChan := make(chan string)
go func() {
defer close(sidecarChan)
if cfg.DisableAutoCleanup || cfg.Reuse || cfg.Debug {
return
}
opts := []Option{
WithDisableAutoCleanup(),
WithHostMounts(dockerSockAddr, dockerSockAddr),
WithHealthCheck(func(ctx context.Context, c *Container) error {
return health.HTTPGet(ctx, c.DefaultAddress())
}),
WithInit(func(ctx context.Context, c *Container) error {
return cleaner.Notify(context.Background(), c.DefaultAddress(), id)
}),
}
if cfg.UseLocalImagesFirst {
opts = append(opts, WithUseLocalImagesFirst())
}
if sc, err := StartCustom(
cleaner.Image, DefaultTCP(cleaner.Port),
opts...,
); err == nil {
sidecarChan <- sc.ID
}
}()
return sidecarChan
}
func (d *docker) prepareContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
pullImage := true
if cfg.UseLocalImagesFirst {
isExisting, err := d.isExistingLocalImage(ctx, image)
if err != nil {
return nil, fmt.Errorf("can't list image: %w", err)
}
if isExisting {
pullImage = false
}
}
if pullImage {
if err := d.pullImage(ctx, image, cfg); err != nil {
return nil, fmt.Errorf("can't pull image: %w", err)
}
}
resp, err := d.createContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't create container: %w", err)
}
return resp, err
}
func (d *docker) waitForContainerNetwork(ctx context.Context, id string, ports NamedPorts) (*Container, error) {
d.log.Infow("waiting for container network", "container", id)
tick := time.NewTicker(time.Millisecond * 250)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return nil, fmt.Errorf("container network is unavailable after timeout")
case <-tick.C:
containerJSON, err := d.client.ContainerInspect(ctx, id)
if err != nil {
return nil, fmt.Errorf("can't inspect container %s: %w", id, err)
}
boundNamedPorts, err := d.boundNamedPorts(containerJSON, ports)
if err != nil {
return nil, fmt.Errorf("can't find bound ports: %w", err)
}
d.log.Infow("waiting for port allocation", "container", id)
if len(boundNamedPorts) == len(ports) {
return &Container{
ID: id,
Host: d.hostAddr(),
Ports: boundNamedPorts,
gateway: containerJSON.NetworkSettings.Gateway,
}, nil
}
}
}
}
func (d *docker) exposedPorts(namedPorts NamedPorts) nat.PortSet {
exposedPorts := make(nat.PortSet)
for _, port := range namedPorts {
containerPort := fmt.Sprintf("%d/%s", port.Port, port.Protocol)
exposedPorts[nat.Port(containerPort)] = struct{}{}
}
return exposedPorts
}
func (d *docker) portBindings(exposedPorts nat.PortSet, ports NamedPorts) nat.PortMap {
portBindings := make(nat.PortMap)
// for the container to be accessible from another container, it cannot
// listen on 127.0.0.1 as it will be accessed by gateway address (e.g
// 172.17.0.1), so its port should be exposed everywhere
hostAddr := d.hostAddr()
if isInDocker() {
hostAddr = "0.0.0.0"
}
for port := range exposedPorts {
binding := nat.PortBinding{
HostIP: hostAddr,
}
if pName, err := ports.Find(port.Proto(), port.Int()); err == nil {
namedPort := ports.Get(pName)
if namedPort.HostPort > 0 {
binding.HostPort = strconv.Itoa(namedPort.HostPort)
}
}
portBindings[port] = []nat.PortBinding{binding}
}
return portBindings
}
func (d *docker) createContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
exposedPorts := d.exposedPorts(ports)
containerConfig := &container.Config{
Image: image,
ExposedPorts: exposedPorts,
Env: cfg.Env,
}
if len(cfg.Cmd) > 0 {
containerConfig.Cmd = cfg.Cmd
}
if len(cfg.Entrypoint) > 0 {
containerConfig.Entrypoint = cfg.Entrypoint
}
mounts := []mount.Mount{}
for src, dst := range cfg.HostMounts {
mounts = append(mounts, mount.Mount{
Type: mount.TypeBind,
Source: src,
Target: dst,
})
}
portBindings := d.portBindings(exposedPorts, ports)
hostConfig := &container.HostConfig{
PortBindings: portBindings,
AutoRemove: !cfg.Debug,
Privileged: cfg.Privileged,
Mounts: mounts,
ExtraHosts: cfg.ExtraHosts,
}
resp, err := d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, cfg.ContainerName)
if err == nil {
return &resp, nil
}
matches := duplicateContainerRegexp.FindStringSubmatch(err.Error())
if len(matches) == 2 {
d.log.Infow("duplicate container found, stopping", "container", matches[1])
err = d.client.ContainerRemove(ctx, matches[1], types.ContainerRemoveOptions{
Force: true,
})
if err != nil {
return nil, fmt.Errorf("can't remove existing container: %w", err)
}
|
{
if cfg.Reuse {
container, ok, err := d.findReusableContainer(ctx, image, ports, cfg)
if err != nil {
return nil, err
}
if ok {
d.log.Info("re-using container")
return container, nil
}
}
d.log.Info("starting container")
resp, err := d.prepareContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't prepare container: %w", err)
}
|
identifier_body
|
docker.go
|
(g *g) dockerConnect() (*docker, error) {
g.log.Info("connecting to docker engine")
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return nil, errors.Join(ErrEnvClient, err)
}
g.log.Info("connected to docker engine")
return &docker{client: cli, log: g.log}, nil
}
func (d *docker) isExistingLocalImage(ctx context.Context, image string) (bool, error) {
images, err := d.client.ImageList(ctx, types.ImageListOptions{All: true})
if err != nil {
return false, fmt.Errorf("can't list image: %w", err)
}
for _, img := range images {
for _, repoTag := range img.RepoTags {
if image == repoTag {
return true, nil
}
if !strings.Contains(repoTag, "/") {
repoTag = "library/" + repoTag
}
if strings.HasSuffix(image, repoTag) {
return true, nil
}
}
}
return false, nil
}
func (d *docker) pullImage(ctx context.Context, image string, cfg *Options) error {
d.log.Info("pulling image")
reader, err := d.client.ImagePull(ctx, image, types.ImagePullOptions{
RegistryAuth: cfg.Auth,
})
if err != nil {
return fmt.Errorf("can't pull image: %w", err)
}
defer func() {
closeErr := reader.Close()
if err == nil {
err = closeErr
}
}()
_, err = io.ReadAll(reader)
if err != nil {
return fmt.Errorf("can't read server output: %w", err)
}
d.log.Info("image pulled")
return nil
}
func (d *docker) startContainer(ctx context.Context, image string, ports NamedPorts, cfg *Options) (*Container, error) {
if cfg.Reuse {
container, ok, err := d.findReusableContainer(ctx, image, ports, cfg)
if err != nil {
return nil, err
}
if ok {
d.log.Info("re-using container")
return container, nil
}
}
d.log.Info("starting container")
resp, err := d.prepareContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't prepare container: %w", err)
}
sidecarChan := d.setupContainerCleanup(resp.ID, cfg)
err = d.client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
if err != nil {
return nil, fmt.Errorf("can't start container %s: %w", resp.ID, err)
}
container, err := d.waitForContainerNetwork(ctx, resp.ID, ports)
if err != nil {
return nil, fmt.Errorf("container network isn't ready: %w", err)
}
if sidecar, ok := <-sidecarChan; ok {
container.ID = generateID(container.ID, sidecar)
}
d.log.Infow("container started", "container", container)
return container, nil
}
func (d *docker) setupContainerCleanup(id string, cfg *Options) chan string {
sidecarChan := make(chan string)
go func() {
defer close(sidecarChan)
if cfg.DisableAutoCleanup || cfg.Reuse || cfg.Debug {
return
}
opts := []Option{
WithDisableAutoCleanup(),
WithHostMounts(dockerSockAddr, dockerSockAddr),
WithHealthCheck(func(ctx context.Context, c *Container) error {
return health.HTTPGet(ctx, c.DefaultAddress())
}),
WithInit(func(ctx context.Context, c *Container) error {
return cleaner.Notify(context.Background(), c.DefaultAddress(), id)
}),
}
if cfg.UseLocalImagesFirst {
opts = append(opts, WithUseLocalImagesFirst())
}
if sc, err := StartCustom(
cleaner.Image, DefaultTCP(cleaner.Port),
opts...,
); err == nil {
sidecarChan <- sc.ID
}
}()
return sidecarChan
}
func (d *docker) prepareContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
pullImage := true
if cfg.UseLocalImagesFirst {
isExisting, err := d.isExistingLocalImage(ctx, image)
if err != nil {
return nil, fmt.Errorf("can't list image: %w", err)
}
if isExisting {
pullImage = false
}
}
if pullImage {
if err := d.pullImage(ctx, image, cfg); err != nil {
return nil, fmt.Errorf("can't pull image: %w", err)
}
}
resp, err := d.createContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't create container: %w", err)
}
return resp, err
}
func (d *docker) waitForContainerNetwork(ctx context.Context, id string, ports NamedPorts) (*Container, error) {
d.log.Infow("waiting for container network", "container", id)
tick := time.NewTicker(time.Millisecond * 250)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return nil, fmt.Errorf("container network is unavailable after timeout")
case <-tick.C:
containerJSON, err := d.client.ContainerInspect(ctx, id)
if err != nil {
return nil, fmt.Errorf("can't inspect container %s: %w", id, err)
}
boundNamedPorts, err := d.boundNamedPorts(containerJSON, ports)
if err != nil {
return nil, fmt.Errorf("can't find bound ports: %w", err)
}
d.log.Infow("waiting for port allocation", "container", id)
if len(boundNamedPorts) == len(ports) {
return &Container{
ID: id,
Host: d.hostAddr(),
Ports: boundNamedPorts,
gateway: containerJSON.NetworkSettings.Gateway,
}, nil
}
}
}
}
func (d *docker) exposedPorts(namedPorts NamedPorts) nat.PortSet {
exposedPorts := make(nat.PortSet)
for _, port := range namedPorts {
containerPort := fmt.Sprintf("%d/%s", port.Port, port.Protocol)
exposedPorts[nat.Port(containerPort)] = struct{}{}
}
return exposedPorts
}
func (d *docker) portBindings(exposedPorts nat.PortSet, ports NamedPorts) nat.PortMap {
portBindings := make(nat.PortMap)
// for the container to be accessible from another container, it cannot
// listen on 127.0.0.1 as it will be accessed by gateway address (e.g
// 172.17.0.1), so its port should be exposed everywhere
hostAddr := d.hostAddr()
if isInDocker() {
hostAddr = "0.0.0.0"
}
for port := range exposedPorts {
binding := nat.PortBinding{
HostIP: hostAddr,
}
if pName, err := ports.Find(port.Proto(), port.Int()); err == nil {
namedPort := ports.Get(pName)
if namedPort.HostPort > 0 {
binding.HostPort = strconv.Itoa(namedPort.HostPort)
}
}
portBindings[port] = []nat.PortBinding{binding}
}
return portBindings
}
func (d *docker) createContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
exposedPorts := d.exposedPorts(ports)
containerConfig := &container.Config{
Image: image,
ExposedPorts: exposedPorts,
Env: cfg.Env,
}
if len(cfg.Cmd) > 0 {
containerConfig.Cmd = cfg.Cmd
}
if len(cfg.Entrypoint) > 0 {
containerConfig.Entrypoint = cfg.Entrypoint
}
mounts := []mount.Mount{}
for src, dst := range cfg.HostMounts {
mounts = append(mounts, mount.Mount{
Type: mount.TypeBind,
Source: src,
Target: dst,
})
}
portBindings := d.portBindings(exposedPorts, ports)
hostConfig := &container.HostConfig{
PortBindings: portBindings,
AutoRemove: !cfg.Debug,
Privileged: cfg.Privileged,
Mounts: mounts,
ExtraHosts: cfg.ExtraHosts,
}
resp, err := d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, cfg.ContainerName)
if err == nil
|
matches := duplicateContainerRegexp.FindStringSubmatch(err.Error())
if len(matches) == 2 {
d.log.Infow("duplicate container found, stopping", "container", matches[1])
err = d.client.ContainerRemove(ctx, matches[1], types.ContainerRemoveOptions{
Force: true,
})
if err != nil {
return nil, fmt.Errorf("can't remove existing container: %w", err)
}
|
{
return &resp, nil
}
|
conditional_block
|
amqp_transport.py
|
max permissible number of channels per
connection. Defaults to 128.
"""
__slots__ = [
'host', 'port', 'secure', 'vhost', 'reconnect_attempts', 'retry_delay',
'timeout', 'heartbeat_timeout', 'blocked_connection_timeout', 'creds'
]
def __init__(self, host='127.0.0.1', port='5672', creds=None,
secure=False, vhost='/', reconnect_attempts=5,
retry_delay=2.0, timeout=120, blocked_connection_timeout=None,
heartbeat_timeout=60, channel_max=128):
"""Constructor."""
self.host = host
self.port = port
self.secure = secure
self.vhost = vhost
self.reconnect_attempts = reconnect_attempts
self.retry_delay = retry_delay
self.timeout = timeout
self.blocked_connection_timeout = blocked_connection_timeout
self.heartbeat_timeout = heartbeat_timeout
self.channel_max = channel_max
if creds is None:
creds = Credentials()
super(ConnectionParameters, self).__init__(
host=host,
port=str(port),
credentials=creds,
connection_attempts=reconnect_attempts,
retry_delay=retry_delay,
blocked_connection_timeout=blocked_connection_timeout,
socket_timeout=timeout,
virtual_host=vhost,
heartbeat=heartbeat_timeout,
channel_max=channel_max)
def __str__(self):
_properties = {
'host': self.host,
'port': self.port,
'vhost': self.vhost,
'reconnect_attempts': self.reconnect_attempts,
'retry_delay': self.retry_delay,
'timeout': self.timeout,
'blocked_connection_timeout': self.blocked_connection_timeout,
'heartbeat_timeout': self.heartbeat_timeout,
'channel_max': self.channel_max
}
_str = json.dumps(_properties)
return _str
class AMQPConnection(pika.BlockingConnection):
"""Connection. Thin wrapper around pika.BlockingConnection"""
def __init__(self, conn_params):
self._connection_params = conn_params
self._pika_connection = None
super(AMQPConnection, self).__init__(
parameters=self._connection_params)
class ExchangeTypes(object):
"""AMQP Exchange Types."""
Topic = 'topic'
Direct = 'direct'
Fanout = 'fanout'
Default = ''
class Credentials(pika.PlainCredentials):
"""Connection credentials for authn/authz.
Args:
username (str): The username.
password (str): The password (Basic Authentication).
"""
__slots__ = ['username', 'password']
def __init__(self, username='guest', password='guest'):
"""Constructor."""
super(Credentials, self).__init__(username=username, password=password)
class AMQPTransportSync(object):
"""Broker Interface.
Implements commonly used functionalities. Base class of high-level
implementations such as SubscriberSync and RpcServer.
"""
def __init__(self, *args, **kwargs):
"""Constructor."""
self._connection = None
self._channel = None
self._closing = False
self._debug = False
self.logger = None
if 'logger' in kwargs:
self.logger = kwargs.pop('logger')
else:
self.logger = create_logger('{}-{}'.format(
self.__class__.__name__, self._name))
if 'debug' in kwargs:
self.debug = kwargs.pop('debug')
else:
self.debug = False
if 'connection_params' in kwargs:
self.connection_params = kwargs.pop('connection_params')
else:
# Default Connection Parameters
self.connection_params = ConnectionParameters()
if 'creds' in kwargs:
self.credentials = kwargs.pop('creds')
|
self.connection_params.credentials = self.credentials
else:
self.credentials = self.connection_params.credentials
# So that connections do not go zombie
atexit.register(self._graceful_shutdown)
@property
def channel(self):
return self._channel
@property
def connection(self):
return self._connection
@property
def debug(self):
"""Debug mode flag."""
return self._debug
@debug.setter
def debug(self, val):
if not isinstance(val, bool):
raise TypeError('Value should be boolean')
self._debug = val
if self._debug is True:
self.logger.setLevel(LoggingLevel.DEBUG)
else:
self.logger.setLevel(LoggingLevel.INFO)
def connect(self):
"""Connect to the AMQP broker. Creates a new channel."""
if self._connection is not None:
self.logger.debug('Using allready existing connection [{}]'.format(
self._connection))
# Create a new communication channel
self._channel = self._connection.channel()
return True
try:
# Create a new connection
self.logger.debug(
'Connecting to AMQP broker @ [{}:{}, vhost={}]...'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
self.logger.debug('Connection parameters:')
self.logger.debug(self.connection_params)
self._connection = AMQPConnection(self.connection_params)
# Create a new communication channel
self._channel = self._connection.channel()
self.logger.info(
'Connected to AMQP broker @ [{}:{}, vhost={}]'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
except pika.exceptions.ConnectionClosed:
self.logger.debug('Connection timed out. Reconnecting...')
return self.connect()
except pika.exceptions.AMQPConnectionError:
self.logger.debug('Connection error. Reconnecting...')
return self.connect()
except Exception as exc:
self.logger.exception('')
raise (exc)
return self._channel
def process_amqp_events(self):
"""Force process amqp events, such as heartbeat packages."""
self.connection.process_data_events()
def _signal_handler(self, signum, frame):
self.logger.info('Signal received: ', signum)
self._graceful_shutdown()
def _graceful_shutdown(self):
if not self.connection:
return
if self._channel.is_closed:
# self.logger.warning('Channel is allready closed')
return
self.logger.debug('Invoking a graceful shutdown...')
self._channel.stop_consuming()
self._channel.close()
self.logger.debug('Channel closed!')
def exchange_exists(self, exchange_name):
resp = self._channel.exchange_declare(
exchange=exchange_name,
passive=True, # Perform a declare or just to see if it exists
)
self.logger.debug('Exchange exists result: {}'.format(resp))
return resp
def create_exchange(self, exchange_name, exchange_type, internal=None):
"""
Create a new exchange.
@param exchange_name: The name of the exchange (e.g. com.logging).
@type exchange_name: string
@param exchange_type: The type of the exchange (e.g. 'topic').
@type exchange_type: string
"""
self._channel.exchange_declare(
exchange=exchange_name,
durable=True, # Survive reboot
passive=False, # Perform a declare or just to see if it exists
internal=internal, # Can only be published to by other exchanges
exchange_type=exchange_type
)
self.logger.debug('Created exchange: [name={}, type={}]'.format(
exchange_name, exchange_type))
def create_queue(self, queue_name='', exclusive=True, queue_size=10,
message_ttl=60000, overflow_behaviour='drop-head',
expires=600000):
"""
Create a new queue.
@param queue_name: The name of the queue.
@type queue_name: string
@param exclusive: Only allow access by the current connection.
@type exclusive: bool
@param queue_size: The size of the queue
@type queue_size: int
@param message_ttl: Per-queue message time-to-live
(https://www.rabbitmq.com/ttl.html#per-queue-message-ttl)
@type message_ttl: int
@param overflow_behaviour: Overflow behaviour - 'drop-head' ||
'reject-publish'.
https://www.rabbitmq.com/maxlength.html#overflow-behaviour
@type overflow_behaviour: str
@param expires: Queues will expire after a period of time only
when they are not used (e.g. do not have consumers).
This feature can be used together with the auto-delete
queue property. The value is expressed in milliseconds (ms).
Default value is 10 minutes.
https://www.rabbitmq.com/ttl.html#queue-ttl
"""
args = {
'x-max-length': queue_size,
'x-overflow': overflow_behaviour,
'x-message-ttl': message_ttl,
'x-expires': expires
}
result = self._channel.queue_declare(
exclusive=exclusive,
queue=queue_name,
durable=False,
auto_delete=True,
arguments=args)
queue_name = result.method.queue
self.logger.debug('Created queue [{}] [size={}, ttl={}]'.format(
queue_name, queue_size, message_ttl))
return queue_name
def delete_queue(self, queue_name):
self._channel.queue_delete(queue=queue_name)
def _queue_exists
|
random_line_split
|
|
amqp_transport.py
|
"""Constructor."""
self.host = host
self.port = port
self.secure = secure
self.vhost = vhost
self.reconnect_attempts = reconnect_attempts
self.retry_delay = retry_delay
self.timeout = timeout
self.blocked_connection_timeout = blocked_connection_timeout
self.heartbeat_timeout = heartbeat_timeout
self.channel_max = channel_max
if creds is None:
creds = Credentials()
super(ConnectionParameters, self).__init__(
host=host,
port=str(port),
credentials=creds,
connection_attempts=reconnect_attempts,
retry_delay=retry_delay,
blocked_connection_timeout=blocked_connection_timeout,
socket_timeout=timeout,
virtual_host=vhost,
heartbeat=heartbeat_timeout,
channel_max=channel_max)
def __str__(self):
_properties = {
'host': self.host,
'port': self.port,
'vhost': self.vhost,
'reconnect_attempts': self.reconnect_attempts,
'retry_delay': self.retry_delay,
'timeout': self.timeout,
'blocked_connection_timeout': self.blocked_connection_timeout,
'heartbeat_timeout': self.heartbeat_timeout,
'channel_max': self.channel_max
}
_str = json.dumps(_properties)
return _str
class AMQPConnection(pika.BlockingConnection):
"""Connection. Thin wrapper around pika.BlockingConnection"""
def __init__(self, conn_params):
self._connection_params = conn_params
self._pika_connection = None
super(AMQPConnection, self).__init__(
parameters=self._connection_params)
class ExchangeTypes(object):
"""AMQP Exchange Types."""
Topic = 'topic'
Direct = 'direct'
Fanout = 'fanout'
Default = ''
class Credentials(pika.PlainCredentials):
"""Connection credentials for authn/authz.
Args:
username (str): The username.
password (str): The password (Basic Authentication).
"""
__slots__ = ['username', 'password']
def __init__(self, username='guest', password='guest'):
"""Constructor."""
super(Credentials, self).__init__(username=username, password=password)
class AMQPTransportSync(object):
"""Broker Interface.
Implements commonly used functionalities. Base class of high-level
implementations such as SubscriberSync and RpcServer.
"""
def __init__(self, *args, **kwargs):
"""Constructor."""
self._connection = None
self._channel = None
self._closing = False
self._debug = False
self.logger = None
if 'logger' in kwargs:
self.logger = kwargs.pop('logger')
else:
self.logger = create_logger('{}-{}'.format(
self.__class__.__name__, self._name))
if 'debug' in kwargs:
self.debug = kwargs.pop('debug')
else:
self.debug = False
if 'connection_params' in kwargs:
self.connection_params = kwargs.pop('connection_params')
else:
# Default Connection Parameters
self.connection_params = ConnectionParameters()
if 'creds' in kwargs:
self.credentials = kwargs.pop('creds')
self.connection_params.credentials = self.credentials
else:
self.credentials = self.connection_params.credentials
# So that connections do not go zombie
atexit.register(self._graceful_shutdown)
@property
def channel(self):
return self._channel
@property
def connection(self):
return self._connection
@property
def debug(self):
"""Debug mode flag."""
return self._debug
@debug.setter
def debug(self, val):
if not isinstance(val, bool):
raise TypeError('Value should be boolean')
self._debug = val
if self._debug is True:
self.logger.setLevel(LoggingLevel.DEBUG)
else:
self.logger.setLevel(LoggingLevel.INFO)
def connect(self):
"""Connect to the AMQP broker. Creates a new channel."""
if self._connection is not None:
self.logger.debug('Using allready existing connection [{}]'.format(
self._connection))
# Create a new communication channel
self._channel = self._connection.channel()
return True
try:
# Create a new connection
self.logger.debug(
'Connecting to AMQP broker @ [{}:{}, vhost={}]...'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
self.logger.debug('Connection parameters:')
self.logger.debug(self.connection_params)
self._connection = AMQPConnection(self.connection_params)
# Create a new communication channel
self._channel = self._connection.channel()
self.logger.info(
'Connected to AMQP broker @ [{}:{}, vhost={}]'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
except pika.exceptions.ConnectionClosed:
self.logger.debug('Connection timed out. Reconnecting...')
return self.connect()
except pika.exceptions.AMQPConnectionError:
self.logger.debug('Connection error. Reconnecting...')
return self.connect()
except Exception as exc:
self.logger.exception('')
raise (exc)
return self._channel
def process_amqp_events(self):
"""Force process amqp events, such as heartbeat packages."""
self.connection.process_data_events()
def _signal_handler(self, signum, frame):
self.logger.info('Signal received: ', signum)
self._graceful_shutdown()
def _graceful_shutdown(self):
if not self.connection:
return
if self._channel.is_closed:
# self.logger.warning('Channel is allready closed')
return
self.logger.debug('Invoking a graceful shutdown...')
self._channel.stop_consuming()
self._channel.close()
self.logger.debug('Channel closed!')
def exchange_exists(self, exchange_name):
resp = self._channel.exchange_declare(
exchange=exchange_name,
passive=True, # Perform a declare or just to see if it exists
)
self.logger.debug('Exchange exists result: {}'.format(resp))
return resp
def create_exchange(self, exchange_name, exchange_type, internal=None):
"""
Create a new exchange.
@param exchange_name: The name of the exchange (e.g. com.logging).
@type exchange_name: string
@param exchange_type: The type of the exchange (e.g. 'topic').
@type exchange_type: string
"""
self._channel.exchange_declare(
exchange=exchange_name,
durable=True, # Survive reboot
passive=False, # Perform a declare or just to see if it exists
internal=internal, # Can only be published to by other exchanges
exchange_type=exchange_type
)
self.logger.debug('Created exchange: [name={}, type={}]'.format(
exchange_name, exchange_type))
def create_queue(self, queue_name='', exclusive=True, queue_size=10,
message_ttl=60000, overflow_behaviour='drop-head',
expires=600000):
"""
Create a new queue.
@param queue_name: The name of the queue.
@type queue_name: string
@param exclusive: Only allow access by the current connection.
@type exclusive: bool
@param queue_size: The size of the queue
@type queue_size: int
@param message_ttl: Per-queue message time-to-live
(https://www.rabbitmq.com/ttl.html#per-queue-message-ttl)
@type message_ttl: int
@param overflow_behaviour: Overflow behaviour - 'drop-head' ||
'reject-publish'.
https://www.rabbitmq.com/maxlength.html#overflow-behaviour
@type overflow_behaviour: str
@param expires: Queues will expire after a period of time only
when they are not used (e.g. do not have consumers).
This feature can be used together with the auto-delete
queue property. The value is expressed in milliseconds (ms).
Default value is 10 minutes.
https://www.rabbitmq.com/ttl.html#queue-ttl
"""
args = {
'x-max-length': queue_size,
'x-overflow': overflow_behaviour,
'x-message-ttl': message_ttl,
'x-expires': expires
}
result = self._channel.queue_declare(
exclusive=exclusive,
queue=queue_name,
durable=False,
auto_delete=True,
arguments=args)
queue_name = result.method.queue
self.logger.debug('Created queue [{}] [size={}, ttl={}]'.format(
queue_name, queue_size, message_ttl))
return queue_name
def delete_queue(self, queue_name):
self._channel.queue_delete(queue=queue_name)
def _queue_exists_clb(self, arg):
print(arg)
def queue_exists(self, queue_name):
"""Check if a queue exists, given its name.
Args:
queue_name (str): The name of the queue.
Returns:
int: True if queue exists False otherwise.
"""
# resp = self._channel.queue_declare(queue_name, passive=True,
# callback=self._queue_exists_clb)
try:
resp = self._channel.queue_declare(queue_name, passive=True)
except pika.exceptions.ChannelClosedByBroker as exc:
self.connect()
if exc.reply_code == 404: # Not Found
|
return False
|
conditional_block
|
|
amqp_transport.py
|
Args:
username (str): The username.
password (str): The password (Basic Authentication).
"""
__slots__ = ['username', 'password']
def __init__(self, username='guest', password='guest'):
"""Constructor."""
super(Credentials, self).__init__(username=username, password=password)
class AMQPTransportSync(object):
"""Broker Interface.
Implements commonly used functionalities. Base class of high-level
implementations such as SubscriberSync and RpcServer.
"""
def __init__(self, *args, **kwargs):
"""Constructor."""
self._connection = None
self._channel = None
self._closing = False
self._debug = False
self.logger = None
if 'logger' in kwargs:
self.logger = kwargs.pop('logger')
else:
self.logger = create_logger('{}-{}'.format(
self.__class__.__name__, self._name))
if 'debug' in kwargs:
self.debug = kwargs.pop('debug')
else:
self.debug = False
if 'connection_params' in kwargs:
self.connection_params = kwargs.pop('connection_params')
else:
# Default Connection Parameters
self.connection_params = ConnectionParameters()
if 'creds' in kwargs:
self.credentials = kwargs.pop('creds')
self.connection_params.credentials = self.credentials
else:
self.credentials = self.connection_params.credentials
# So that connections do not go zombie
atexit.register(self._graceful_shutdown)
@property
def channel(self):
return self._channel
@property
def connection(self):
return self._connection
@property
def debug(self):
"""Debug mode flag."""
return self._debug
@debug.setter
def debug(self, val):
if not isinstance(val, bool):
raise TypeError('Value should be boolean')
self._debug = val
if self._debug is True:
self.logger.setLevel(LoggingLevel.DEBUG)
else:
self.logger.setLevel(LoggingLevel.INFO)
def connect(self):
"""Connect to the AMQP broker. Creates a new channel."""
if self._connection is not None:
self.logger.debug('Using allready existing connection [{}]'.format(
self._connection))
# Create a new communication channel
self._channel = self._connection.channel()
return True
try:
# Create a new connection
self.logger.debug(
'Connecting to AMQP broker @ [{}:{}, vhost={}]...'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
self.logger.debug('Connection parameters:')
self.logger.debug(self.connection_params)
self._connection = AMQPConnection(self.connection_params)
# Create a new communication channel
self._channel = self._connection.channel()
self.logger.info(
'Connected to AMQP broker @ [{}:{}, vhost={}]'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
except pika.exceptions.ConnectionClosed:
self.logger.debug('Connection timed out. Reconnecting...')
return self.connect()
except pika.exceptions.AMQPConnectionError:
self.logger.debug('Connection error. Reconnecting...')
return self.connect()
except Exception as exc:
self.logger.exception('')
raise (exc)
return self._channel
def process_amqp_events(self):
"""Force process amqp events, such as heartbeat packages."""
self.connection.process_data_events()
def _signal_handler(self, signum, frame):
self.logger.info('Signal received: ', signum)
self._graceful_shutdown()
def _graceful_shutdown(self):
if not self.connection:
return
if self._channel.is_closed:
# self.logger.warning('Channel is allready closed')
return
self.logger.debug('Invoking a graceful shutdown...')
self._channel.stop_consuming()
self._channel.close()
self.logger.debug('Channel closed!')
def exchange_exists(self, exchange_name):
resp = self._channel.exchange_declare(
exchange=exchange_name,
passive=True, # Perform a declare or just to see if it exists
)
self.logger.debug('Exchange exists result: {}'.format(resp))
return resp
def create_exchange(self, exchange_name, exchange_type, internal=None):
"""
Create a new exchange.
@param exchange_name: The name of the exchange (e.g. com.logging).
@type exchange_name: string
@param exchange_type: The type of the exchange (e.g. 'topic').
@type exchange_type: string
"""
self._channel.exchange_declare(
exchange=exchange_name,
durable=True, # Survive reboot
passive=False, # Perform a declare or just to see if it exists
internal=internal, # Can only be published to by other exchanges
exchange_type=exchange_type
)
self.logger.debug('Created exchange: [name={}, type={}]'.format(
exchange_name, exchange_type))
def create_queue(self, queue_name='', exclusive=True, queue_size=10,
message_ttl=60000, overflow_behaviour='drop-head',
expires=600000):
"""
Create a new queue.
@param queue_name: The name of the queue.
@type queue_name: string
@param exclusive: Only allow access by the current connection.
@type exclusive: bool
@param queue_size: The size of the queue
@type queue_size: int
@param message_ttl: Per-queue message time-to-live
(https://www.rabbitmq.com/ttl.html#per-queue-message-ttl)
@type message_ttl: int
@param overflow_behaviour: Overflow behaviour - 'drop-head' ||
'reject-publish'.
https://www.rabbitmq.com/maxlength.html#overflow-behaviour
@type overflow_behaviour: str
@param expires: Queues will expire after a period of time only
when they are not used (e.g. do not have consumers).
This feature can be used together with the auto-delete
queue property. The value is expressed in milliseconds (ms).
Default value is 10 minutes.
https://www.rabbitmq.com/ttl.html#queue-ttl
"""
args = {
'x-max-length': queue_size,
'x-overflow': overflow_behaviour,
'x-message-ttl': message_ttl,
'x-expires': expires
}
result = self._channel.queue_declare(
exclusive=exclusive,
queue=queue_name,
durable=False,
auto_delete=True,
arguments=args)
queue_name = result.method.queue
self.logger.debug('Created queue [{}] [size={}, ttl={}]'.format(
queue_name, queue_size, message_ttl))
return queue_name
def delete_queue(self, queue_name):
self._channel.queue_delete(queue=queue_name)
def _queue_exists_clb(self, arg):
print(arg)
def queue_exists(self, queue_name):
"""Check if a queue exists, given its name.
Args:
queue_name (str): The name of the queue.
Returns:
int: True if queue exists False otherwise.
"""
# resp = self._channel.queue_declare(queue_name, passive=True,
# callback=self._queue_exists_clb)
try:
resp = self._channel.queue_declare(queue_name, passive=True)
except pika.exceptions.ChannelClosedByBroker as exc:
self.connect()
if exc.reply_code == 404: # Not Found
return False
else:
self.logger.warning('Queue exists <{}>'.format(queue_name))
return True
def bind_queue(self, exchange_name, queue_name, bind_key):
"""
Bind a queue to and exchange using a bind-key.
@param exchange_name: The name of the exchange (e.g. com.logging).
@type exchange_name: string
@param queue_name: The name of the queue.
@type queue_name: string
@param bind_key: The binding key name.
@type bind_key: string
"""
self.logger.info('Subscribed to topic: {}'.format(bind_key))
try:
self._channel.queue_bind(
exchange=exchange_name, queue=queue_name, routing_key=bind_key)
except Exception as exc:
raise exc
def close(self):
self._graceful_shutdown()
def disconnect(self):
self._graceful_shutdown()
def __del__(self):
self._graceful_shutdown()
class AMQPTransportAsync(object):
|
CONNECTION_TIMEOUT_SEC = 5
def __init__(self, host='127.0.0.1', port='5672', exchange='amq.topic'):
self._connection = None
self._channel = None
self._closing = False
self.logger = create_logger(self.__class__.__name__)
self._exchange = 'amq.topic'
self._host = host
self._port = port
super(AMQPTransportAsync, self).__init__()
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
self.logger.info("Connecting to AMQP broker @ [{}:{}] ...".format(
|
identifier_body
|
|
amqp_transport.py
|
(pika.BasicProperties):
"""Message Properties/Attribures used for sending and receiving messages.
Args:
content_type (str):
content_encoding (str):
timestamp (str):
"""
def __init__(self, content_type=None, content_encoding=None,
timestamp=None, correlation_id=None, reply_to=None,
message_id=None, user_id=None, app_id=None):
"""Constructor."""
if timestamp is None:
timestamp = (time.time() + 0.5) * 1000
timestamp = int(timestamp)
super(MessageProperties, self).__init__(
content_type=content_type,
content_encoding=content_encoding,
timestamp=timestamp,
correlation_id=correlation_id,
reply_to=reply_to,
message_id=str(message_id) if message_id is not None else None,
user_id=str(user_id) if user_id is not None else None,
app_id=str(app_id) if app_id is not None else None
)
class ConnectionParameters(pika.ConnectionParameters):
"""AMQP Connection parameters.
Args:
host (str): Hostname of AMQP broker to connect to.
port (int|str): AMQP broker listening port.
creds (object): Auth Credentials - Credentials instance.
secure (bool): Enable SSL/TLS (AMQPS) - Not supported!!
reconnect_attempts (int): The reconnection attempts to make before
droping and raising an Exception.
retry_delay (float): Time delay between reconnect attempts.
timeout (float): Socket Connection timeout value.
timeout (float): Blocked Connection timeout value.
Set the timeout, in seconds, that the connection may remain blocked
(triggered by Connection.Blocked from broker). If the timeout
expires before connection becomes unblocked, the connection will
be torn down.
heartbeat_timeout (int): Controls AMQP heartbeat
timeout negotiation during connection tuning. An integer value
always overrides the value proposed by broker. Use 0 to deactivate
heartbeats and None to always accept the broker's proposal.
The value passed for timeout is also used to calculate an interval
at which a heartbeat frame is sent to the broker. The interval is
equal to the timeout value divided by two.
channel_max (int): The max permissible number of channels per
connection. Defaults to 128.
"""
__slots__ = [
'host', 'port', 'secure', 'vhost', 'reconnect_attempts', 'retry_delay',
'timeout', 'heartbeat_timeout', 'blocked_connection_timeout', 'creds'
]
def __init__(self, host='127.0.0.1', port='5672', creds=None,
secure=False, vhost='/', reconnect_attempts=5,
retry_delay=2.0, timeout=120, blocked_connection_timeout=None,
heartbeat_timeout=60, channel_max=128):
"""Constructor."""
self.host = host
self.port = port
self.secure = secure
self.vhost = vhost
self.reconnect_attempts = reconnect_attempts
self.retry_delay = retry_delay
self.timeout = timeout
self.blocked_connection_timeout = blocked_connection_timeout
self.heartbeat_timeout = heartbeat_timeout
self.channel_max = channel_max
if creds is None:
creds = Credentials()
super(ConnectionParameters, self).__init__(
host=host,
port=str(port),
credentials=creds,
connection_attempts=reconnect_attempts,
retry_delay=retry_delay,
blocked_connection_timeout=blocked_connection_timeout,
socket_timeout=timeout,
virtual_host=vhost,
heartbeat=heartbeat_timeout,
channel_max=channel_max)
def __str__(self):
_properties = {
'host': self.host,
'port': self.port,
'vhost': self.vhost,
'reconnect_attempts': self.reconnect_attempts,
'retry_delay': self.retry_delay,
'timeout': self.timeout,
'blocked_connection_timeout': self.blocked_connection_timeout,
'heartbeat_timeout': self.heartbeat_timeout,
'channel_max': self.channel_max
}
_str = json.dumps(_properties)
return _str
class AMQPConnection(pika.BlockingConnection):
"""Connection. Thin wrapper around pika.BlockingConnection"""
def __init__(self, conn_params):
self._connection_params = conn_params
self._pika_connection = None
super(AMQPConnection, self).__init__(
parameters=self._connection_params)
class ExchangeTypes(object):
"""AMQP Exchange Types."""
Topic = 'topic'
Direct = 'direct'
Fanout = 'fanout'
Default = ''
class Credentials(pika.PlainCredentials):
"""Connection credentials for authn/authz.
Args:
username (str): The username.
password (str): The password (Basic Authentication).
"""
__slots__ = ['username', 'password']
def __init__(self, username='guest', password='guest'):
"""Constructor."""
super(Credentials, self).__init__(username=username, password=password)
class AMQPTransportSync(object):
"""Broker Interface.
Implements commonly used functionalities. Base class of high-level
implementations such as SubscriberSync and RpcServer.
"""
def __init__(self, *args, **kwargs):
"""Constructor."""
self._connection = None
self._channel = None
self._closing = False
self._debug = False
self.logger = None
if 'logger' in kwargs:
self.logger = kwargs.pop('logger')
else:
self.logger = create_logger('{}-{}'.format(
self.__class__.__name__, self._name))
if 'debug' in kwargs:
self.debug = kwargs.pop('debug')
else:
self.debug = False
if 'connection_params' in kwargs:
self.connection_params = kwargs.pop('connection_params')
else:
# Default Connection Parameters
self.connection_params = ConnectionParameters()
if 'creds' in kwargs:
self.credentials = kwargs.pop('creds')
self.connection_params.credentials = self.credentials
else:
self.credentials = self.connection_params.credentials
# So that connections do not go zombie
atexit.register(self._graceful_shutdown)
@property
def channel(self):
return self._channel
@property
def connection(self):
return self._connection
@property
def debug(self):
"""Debug mode flag."""
return self._debug
@debug.setter
def debug(self, val):
if not isinstance(val, bool):
raise TypeError('Value should be boolean')
self._debug = val
if self._debug is True:
self.logger.setLevel(LoggingLevel.DEBUG)
else:
self.logger.setLevel(LoggingLevel.INFO)
def connect(self):
"""Connect to the AMQP broker. Creates a new channel."""
if self._connection is not None:
self.logger.debug('Using allready existing connection [{}]'.format(
self._connection))
# Create a new communication channel
self._channel = self._connection.channel()
return True
try:
# Create a new connection
self.logger.debug(
'Connecting to AMQP broker @ [{}:{}, vhost={}]...'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
self.logger.debug('Connection parameters:')
self.logger.debug(self.connection_params)
self._connection = AMQPConnection(self.connection_params)
# Create a new communication channel
self._channel = self._connection.channel()
self.logger.info(
'Connected to AMQP broker @ [{}:{}, vhost={}]'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
except pika.exceptions.ConnectionClosed:
self.logger.debug('Connection timed out. Reconnecting...')
return self.connect()
except pika.exceptions.AMQPConnectionError:
self.logger.debug('Connection error. Reconnecting...')
return self.connect()
except Exception as exc:
self.logger.exception('')
raise (exc)
return self._channel
def process_amqp_events(self):
"""Force process amqp events, such as heartbeat packages."""
self.connection.process_data_events()
def _signal_handler(self, signum, frame):
self.logger.info('Signal received: ', signum)
self._graceful_shutdown()
def _graceful_shutdown(self):
if not self.connection:
return
if self._channel.is_closed:
# self.logger.warning('Channel is allready closed')
return
self.logger.debug('Invoking a graceful shutdown...')
self._channel.stop_consuming()
self._channel.close()
self.logger.debug('Channel closed!')
def exchange_exists(self, exchange_name):
resp = self._channel.exchange_declare(
exchange=exchange_name,
passive=True, # Perform a declare or just to see if it exists
)
self.logger.debug('Exchange exists result: {}'.format(resp))
return resp
def create_exchange(self, exchange_name, exchange_type, internal=None):
"""
Create a new exchange.
@param exchange_name: The name of the exchange (e.g. com.logging).
@type exchange_name: string
@param exchange_type: The type of the exchange (e.g. 'topic').
@type exchange_type: string
"""
self._channel.exchange_declare(
exchange=exchange_name,
durable=True, # Survive reboot
passive=False,
|
MessageProperties
|
identifier_name
|
|
path.rs
|
.split_terminator(DELIMITER)
.map(|s| PathPart(s.to_string()))
.collect(),
}
}
/// For use when receiving a path from a filesystem directly, not
/// when building a path. Uses the standard library's path splitting
/// implementation to separate into parts.
pub fn from_path_buf_unchecked(path: impl Into<PathBuf>) -> Self {
let path = path.into();
Self {
parts: path
.iter()
.flat_map(|s| s.to_os_string().into_string().map(PathPart))
.collect(),
}
}
/// Add a part to the end of the path, encoding any restricted characters.
pub fn push(&mut self, part: impl Into<String>) {
let part = part.into();
self.parts.push((&*part).into());
}
/// Add a `PathPart` to the end of the path. Infallible because the
/// `PathPart` should already have been checked for restricted
/// characters.
pub fn push_part(&mut self, part: &PathPart) {
self.parts.push(part.to_owned());
}
/// Add the parts of `ObjectStorePath` to the end of the path. Notably does
/// *not* behave as `PathBuf::push` does: no existing part of `self`
/// will be replaced as part of this call.
pub fn push_path(&mut self, path: &Self) {
self.parts.extend_from_slice(&path.parts);
}
/// Push a bunch of parts in one go.
pub fn push_all<'a>(&mut self, parts: impl AsRef<[&'a str]>) {
self.parts.extend(parts.as_ref().iter().map(|&v| v.into()));
}
/// Return the component parts of the path.
pub fn as_parts(&self) -> &[PathPart] {
self.parts.as_ref()
}
/// Pops a part from the path and returns it, or `None` if it's empty.
pub fn pop(&mut self) -> Option<&PathPart> {
unimplemented!()
}
/// Determines whether `prefix` is a prefix of `self`.
pub fn starts_with(&self, prefix: &Self) -> bool {
let diff = itertools::diff_with(self.parts.iter(), prefix.parts.iter(), |a, b| a == b);
match diff {
None => true,
Some(itertools::Diff::Shorter(..)) => true,
Some(itertools::Diff::FirstMismatch(_, mut remaining_self, mut remaining_prefix)) => {
let first_prefix = remaining_prefix.next().expect("must be at least one value");
// there must not be any other remaining parts in the prefix
remaining_prefix.next().is_none()
// and the next item in self must start with the last item in the prefix
&& remaining_self
.next()
.expect("must be at least one value")
.0
.starts_with(&first_prefix.0)
}
_ => false,
}
}
/// Returns delimiter-separated parts contained in `self` after `prefix`.
pub fn parts_after_prefix(&self, _prefix: &Self) -> &[PathPart] {
unimplemented!()
}
}
// TODO: I made these structs rather than functions because I could see
// `convert` being part of a trait, possibly, but that seemed a bit overly
// complex for now.
/// Converts `ObjectStorePath`s to `String`s that are appropriate for use as
/// locations in cloud storage.
#[derive(Debug, Clone, Copy)]
pub struct CloudConverter {}
impl CloudConverter {
/// Creates a cloud storage location by joining this `ObjectStorePath`'s
/// parts with `DELIMITER`
pub fn convert(object_store_path: &ObjectStorePath) -> String {
object_store_path.parts.iter().map(|p| &p.0).join(DELIMITER)
}
}
/// Converts `ObjectStorePath`s to `String`s that are appropriate for use as
/// locations in filesystem storage.
#[derive(Debug, Clone, Copy)]
pub struct
|
{}
impl FileConverter {
/// Creates a filesystem `PathBuf` location by using the standard library's
/// `PathBuf` building implementation appropriate for the current
/// platform.
pub fn convert(object_store_path: &ObjectStorePath) -> PathBuf {
object_store_path.parts.iter().map(|p| &p.0).collect()
}
}
/// The delimiter to separate object namespaces, creating a directory structure.
pub const DELIMITER: &str = "/";
// percent_encode's API needs this as a byte
const DELIMITER_BYTE: u8 = DELIMITER.as_bytes()[0];
/// The PathPart type exists to validate the directory/file names that form part
/// of a path.
///
/// A PathPart instance is guaranteed to contain no `/` characters as it can
/// only be constructed by going through the `try_from` impl.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Default)]
pub struct PathPart(String);
/// Characters we want to encode.
const INVALID: &AsciiSet = &CONTROLS
// The delimiter we are reserving for internal hierarchy
.add(DELIMITER_BYTE)
// Characters AWS recommends avoiding for object keys
// https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
.add(b'\\')
.add(b'{')
// TODO: Non-printable ASCII characters (128–255 decimal characters)
.add(b'^')
.add(b'}')
.add(b'%')
.add(b'`')
.add(b']')
.add(b'"') // " <-- my editor is confused about double quotes within single quotes
.add(b'>')
.add(b'[')
.add(b'~')
.add(b'<')
.add(b'#')
.add(b'|')
// Characters Google Cloud Storage recommends avoiding for object names
// https://cloud.google.com/storage/docs/naming-objects
.add(b'\r')
.add(b'\n')
.add(b'*')
.add(b'?');
impl From<&str> for PathPart {
fn from(v: &str) -> Self {
match v {
// We don't want to encode `.` generally, but we do want to disallow parts of paths
// to be equal to `.` or `..` to prevent file system traversal shenanigans.
"." => Self(String::from("%2E")),
".." => Self(String::from("%2E%2E")),
other => Self(percent_encode(other.as_bytes(), INVALID).to_string()),
}
}
}
impl std::fmt::Display for PathPart {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
percent_decode_str(&self.0)
.decode_utf8()
.expect("Valid UTF-8 that came from String")
.fmt(f)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn path_part_delimiter_gets_encoded() {
let part: PathPart = "foo/bar".into();
assert_eq!(part, PathPart(String::from("foo%2Fbar")));
}
#[test]
fn path_part_gets_decoded_for_display() {
let part: PathPart = "foo/bar".into();
assert_eq!(part.to_string(), "foo/bar");
}
#[test]
fn path_part_given_already_encoded_string() {
let part: PathPart = "foo%2Fbar".into();
assert_eq!(part, PathPart(String::from("foo%252Fbar")));
assert_eq!(part.to_string(), "foo%2Fbar");
}
#[test]
fn path_part_cant_be_one_dot() {
let part: PathPart = ".".into();
assert_eq!(part, PathPart(String::from("%2E")));
assert_eq!(part.to_string(), ".");
}
#[test]
fn path_part_cant_be_two_dots() {
let part: PathPart = "..".into();
assert_eq!(part, PathPart(String::from("%2E%2E")));
assert_eq!(part.to_string(), "..");
}
// Invariants to maintain/document/test:
//
// - always ends in DELIMITER if it's a directory. If it's the end object, it
// should have some sort of file extension like .parquet, .json, or .segment
// - does not contain unencoded DELIMITER
// - for file paths: does not escape root dir
// - for object storage: looks like directories
// - Paths that come from object stores directly don't need to be
// parsed/validated
// - Within a process, the same backing store will always be used
//
#[test]
fn cloud_prefix_no_trailing_delimiter_or_filename() {
// Use case: a file named `test_file.json` exists in object storage and it
// should be returned for a search on prefix `test`, so the prefix path
// should not get a trailing delimiter automatically added
let mut prefix = ObjectStorePath::default();
prefix.push("test");
let converted = CloudConverter::convert(&prefix);
assert_eq!(converted, "test");
}
#[test]
fn cloud_prefix_with_trailing_delimiter() {
//
|
FileConverter
|
identifier_name
|
path.rs
|
parts: path
.iter()
.flat_map(|s| s.to_os_string().into_string().map(PathPart))
.collect(),
}
}
/// Add a part to the end of the path, encoding any restricted characters.
pub fn push(&mut self, part: impl Into<String>) {
let part = part.into();
self.parts.push((&*part).into());
}
/// Add a `PathPart` to the end of the path. Infallible because the
/// `PathPart` should already have been checked for restricted
/// characters.
pub fn push_part(&mut self, part: &PathPart) {
self.parts.push(part.to_owned());
}
/// Add the parts of `ObjectStorePath` to the end of the path. Notably does
/// *not* behave as `PathBuf::push` does: no existing part of `self`
/// will be replaced as part of this call.
pub fn push_path(&mut self, path: &Self) {
self.parts.extend_from_slice(&path.parts);
}
/// Push a bunch of parts in one go.
pub fn push_all<'a>(&mut self, parts: impl AsRef<[&'a str]>) {
self.parts.extend(parts.as_ref().iter().map(|&v| v.into()));
}
/// Return the component parts of the path.
pub fn as_parts(&self) -> &[PathPart] {
self.parts.as_ref()
}
/// Pops a part from the path and returns it, or `None` if it's empty.
pub fn pop(&mut self) -> Option<&PathPart> {
unimplemented!()
}
/// Determines whether `prefix` is a prefix of `self`.
pub fn starts_with(&self, prefix: &Self) -> bool {
let diff = itertools::diff_with(self.parts.iter(), prefix.parts.iter(), |a, b| a == b);
match diff {
None => true,
Some(itertools::Diff::Shorter(..)) => true,
Some(itertools::Diff::FirstMismatch(_, mut remaining_self, mut remaining_prefix)) => {
let first_prefix = remaining_prefix.next().expect("must be at least one value");
// there must not be any other remaining parts in the prefix
remaining_prefix.next().is_none()
// and the next item in self must start with the last item in the prefix
&& remaining_self
.next()
.expect("must be at least one value")
.0
.starts_with(&first_prefix.0)
}
_ => false,
}
}
/// Returns delimiter-separated parts contained in `self` after `prefix`.
pub fn parts_after_prefix(&self, _prefix: &Self) -> &[PathPart] {
unimplemented!()
}
}
// TODO: I made these structs rather than functions because I could see
// `convert` being part of a trait, possibly, but that seemed a bit overly
// complex for now.
/// Converts `ObjectStorePath`s to `String`s that are appropriate for use as
/// locations in cloud storage.
#[derive(Debug, Clone, Copy)]
pub struct CloudConverter {}
impl CloudConverter {
/// Creates a cloud storage location by joining this `ObjectStorePath`'s
/// parts with `DELIMITER`
pub fn convert(object_store_path: &ObjectStorePath) -> String {
object_store_path.parts.iter().map(|p| &p.0).join(DELIMITER)
}
}
/// Converts `ObjectStorePath`s to `String`s that are appropriate for use as
/// locations in filesystem storage.
#[derive(Debug, Clone, Copy)]
pub struct FileConverter {}
impl FileConverter {
/// Creates a filesystem `PathBuf` location by using the standard library's
/// `PathBuf` building implementation appropriate for the current
/// platform.
pub fn convert(object_store_path: &ObjectStorePath) -> PathBuf {
object_store_path.parts.iter().map(|p| &p.0).collect()
}
}
/// The delimiter to separate object namespaces, creating a directory structure.
pub const DELIMITER: &str = "/";
// percent_encode's API needs this as a byte
const DELIMITER_BYTE: u8 = DELIMITER.as_bytes()[0];
/// The PathPart type exists to validate the directory/file names that form part
/// of a path.
///
/// A PathPart instance is guaranteed to contain no `/` characters as it can
/// only be constructed by going through the `try_from` impl.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Default)]
pub struct PathPart(String);
/// Characters we want to encode.
const INVALID: &AsciiSet = &CONTROLS
// The delimiter we are reserving for internal hierarchy
.add(DELIMITER_BYTE)
// Characters AWS recommends avoiding for object keys
// https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
.add(b'\\')
.add(b'{')
// TODO: Non-printable ASCII characters (128–255 decimal characters)
.add(b'^')
.add(b'}')
.add(b'%')
.add(b'`')
.add(b']')
.add(b'"') // " <-- my editor is confused about double quotes within single quotes
.add(b'>')
.add(b'[')
.add(b'~')
.add(b'<')
.add(b'#')
.add(b'|')
// Characters Google Cloud Storage recommends avoiding for object names
// https://cloud.google.com/storage/docs/naming-objects
.add(b'\r')
.add(b'\n')
.add(b'*')
.add(b'?');
impl From<&str> for PathPart {
fn from(v: &str) -> Self {
match v {
// We don't want to encode `.` generally, but we do want to disallow parts of paths
// to be equal to `.` or `..` to prevent file system traversal shenanigans.
"." => Self(String::from("%2E")),
".." => Self(String::from("%2E%2E")),
other => Self(percent_encode(other.as_bytes(), INVALID).to_string()),
}
}
}
impl std::fmt::Display for PathPart {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
percent_decode_str(&self.0)
.decode_utf8()
.expect("Valid UTF-8 that came from String")
.fmt(f)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn path_part_delimiter_gets_encoded() {
let part: PathPart = "foo/bar".into();
assert_eq!(part, PathPart(String::from("foo%2Fbar")));
}
#[test]
fn path_part_gets_decoded_for_display() {
let part: PathPart = "foo/bar".into();
assert_eq!(part.to_string(), "foo/bar");
}
#[test]
fn path_part_given_already_encoded_string() {
let part: PathPart = "foo%2Fbar".into();
assert_eq!(part, PathPart(String::from("foo%252Fbar")));
assert_eq!(part.to_string(), "foo%2Fbar");
}
#[test]
fn path_part_cant_be_one_dot() {
let part: PathPart = ".".into();
assert_eq!(part, PathPart(String::from("%2E")));
assert_eq!(part.to_string(), ".");
}
#[test]
fn path_part_cant_be_two_dots() {
let part: PathPart = "..".into();
assert_eq!(part, PathPart(String::from("%2E%2E")));
assert_eq!(part.to_string(), "..");
}
// Invariants to maintain/document/test:
//
// - always ends in DELIMITER if it's a directory. If it's the end object, it
// should have some sort of file extension like .parquet, .json, or .segment
// - does not contain unencoded DELIMITER
// - for file paths: does not escape root dir
// - for object storage: looks like directories
// - Paths that come from object stores directly don't need to be
// parsed/validated
// - Within a process, the same backing store will always be used
//
#[test]
fn cloud_prefix_no_trailing_delimiter_or_filename() {
// Use case: a file named `test_file.json` exists in object storage and it
// should be returned for a search on prefix `test`, so the prefix path
// should not get a trailing delimiter automatically added
let mut prefix = ObjectStorePath::default();
prefix.push("test");
let converted = CloudConverter::convert(&prefix);
assert_eq!(converted, "test");
}
#[test]
fn cloud_prefix_with_trailing_delimiter() {
// Use case: files exist in object storage named `foo/bar.json` and
// `foo_test.json`. A search for the prefix `foo/` should return
// `foo/bar.json` but not `foo_test.json'.
let mut prefix = ObjectStorePath::default();
prefix.push_all(&["test", ""]);
let converted = CloudConverter::convert(&prefix);
assert_eq!(converted, "test/");
|
}
#[test]
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.