file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
bastion_test.go
|
securityGroupSuffix = "-sg"
imageID = "m-gw8c603eae9ygxgt2ig6"
)
var myPublicIP = ""
var (
accessKeyID = flag.String("access-key-id", "", "Alicloud access key id")
accessKeySecret = flag.String("access-key-secret", "", "Alicloud access key secret")
region = flag.String("region", "", "Alicloud region")
)
func validateFlags() {
if len(*accessKeyID) == 0 {
panic("need an Alicloud access key id")
}
if len(*accessKeySecret) == 0 {
panic("need an Alicloud access key secret")
}
if len(*region) == 0 {
panic("need an Alicloud region")
}
}
type infrastructureIdentifiers struct {
vpcID *string
vswitchID *string
natGatewayID *string
securityGroupIDs *string
zone *string
}
var (
ctx = context.Background()
log logr.Logger
extensionscluster *extensionsv1alpha1.Cluster
controllercluster *controller.Cluster
options *bastionctrl.Options
bastion *extensionsv1alpha1.Bastion
secret *corev1.Secret
clientFactory alicloudclient.ClientFactory
testEnv *envtest.Environment
mgrCancel context.CancelFunc
c client.Client
internalChartsPath string
name string
vpcName string
)
var _ = BeforeSuite(func() {
flag.Parse()
validateFlags()
internalChartsPath = alicloud.InternalChartsPath
repoRoot := filepath.Join("..", "..", "..")
alicloud.InternalChartsPath = filepath.Join(repoRoot, alicloud.InternalChartsPath)
// enable manager logs
logf.SetLogger(logger.MustNewZapLogger(logger.DebugLevel, logger.FormatJSON, zap.WriteTo(GinkgoWriter)))
log = logf.Log.WithName("bastion-test")
randString, err := randomString()
Expect(err).NotTo(HaveOccurred())
// bastion name prefix
name = fmt.Sprintf("alicloud-it-bastion-%s", randString)
vpcName = fmt.Sprintf("%s-vpc", name)
myPublicIP, err = getMyPublicIPWithMask()
Expect(err).NotTo(HaveOccurred())
By("starting test environment")
testEnv = &envtest.Environment{
UseExistingCluster: pointer.Bool(true),
CRDInstallOptions: envtest.CRDInstallOptions{
Paths: []string{
filepath.Join(repoRoot, "example", "20-crd-extensions.gardener.cloud_clusters.yaml"),
filepath.Join(repoRoot, "example", "20-crd-extensions.gardener.cloud_bastions.yaml"),
filepath.Join(repoRoot, "example", "20-crd-extensions.gardener.cloud_workers.yaml"),
},
},
}
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
By("setup manager")
mgr, err := manager.New(cfg, manager.Options{
MetricsBindAddress: "0",
})
Expect(err).NotTo(HaveOccurred())
Expect(extensionsv1alpha1.AddToScheme(mgr.GetScheme())).To(Succeed())
Expect(alicloudinstall.AddToScheme(mgr.GetScheme())).To(Succeed())
Expect(bastionctrl.AddToManager(ctx, mgr)).To(Succeed())
var mgrContext context.Context
mgrContext, mgrCancel = context.WithCancel(ctx)
By("start manager")
go func() {
err := mgr.Start(mgrContext)
Expect(err).NotTo(HaveOccurred())
}()
c = mgr.GetClient()
Expect(c).NotTo(BeNil())
extensionscluster, controllercluster = createClusters(name)
secret = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: v1beta1constants.SecretNameCloudProvider,
Namespace: name,
},
Data: map[string][]byte{
alicloud.AccessKeyID: []byte(*accessKeyID),
alicloud.AccessKeySecret: []byte(*accessKeySecret),
},
}
clientFactory = alicloudclient.NewClientFactory()
})
var _ = AfterSuite(func() {
defer func() {
By("stopping manager")
mgrCancel()
}()
By("running cleanup actions")
framework.RunCleanupActions()
By("stopping test environment")
Expect(testEnv.Stop()).To(Succeed())
alicloud.InternalChartsPath = internalChartsPath
})
var _ = Describe("Bastion tests", func() {
It("should successfully create and delete", func() {
By("setup Infrastructure ")
identifiers := prepareVPCandShootSecurityGroup(ctx, clientFactory, name, vpcName, *region, vpcCIDR, natGatewayCIDR)
framework.AddCleanupAction(func() {
cleanupVPC(ctx, clientFactory, identifiers)
})
By("create namespace for test execution")
worker := createWorker(name, *identifiers.vpcID, *identifiers.vswitchID, *identifiers.zone, imageID, *identifiers.securityGroupIDs)
setupEnvironmentObjects(ctx, c, namespace(name), secret, extensionscluster, worker)
framework.AddCleanupAction(func() {
teardownShootEnvironment(ctx, c, namespace(name), secret, extensionscluster, worker)
})
bastion, options = createBastion(controllercluster, name)
By("setup bastion")
err := c.Create(ctx, bastion)
Expect(err).NotTo(HaveOccurred())
framework.AddCleanupAction(func() {
teardownBastion(ctx, log, c, bastion)
By("verify bastion deletion")
verifyDeletion(clientFactory, options)
})
By("wait until bastion is reconciled")
Expect(extensions.WaitUntilExtensionObjectReady(
ctx,
c,
log,
bastion,
extensionsv1alpha1.BastionResource,
60*time.Second,
60*time.Second,
10*time.Minute,
nil,
)).To(Succeed())
time.Sleep(60 * time.Second)
verifyPort22IsOpen(ctx, c, bastion)
verifyPort42IsClosed(ctx, c, bastion)
By("verify cloud resources")
verifyCreation(clientFactory, options)
})
})
func randomString() (string, error) {
suffix, err := gardenerutils.GenerateRandomStringFromCharset(5, "0123456789abcdefghijklmnopqrstuvwxyz")
if err != nil {
return "", err
}
return suffix, nil
}
func getMyPublicIPWithMask() (string, error) {
resp, err := http.Get("https://api.ipify.org")
if err != nil {
return "", err
}
defer func() {
err := resp.Body.Close()
if err != nil {
Expect(err).NotTo(HaveOccurred())
}
}()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
ip := net.ParseIP(string(body))
var mask net.IPMask
if ip.To4() != nil
|
else {
return "", fmt.Errorf("not valid IPv4 address")
}
cidr := net.IPNet{
IP: ip,
Mask: mask,
}
full := cidr.String()
_, ipnet, _ := net.ParseCIDR(full)
return ipnet.String(), nil
}
func verifyPort22IsOpen(ctx context.Context, c client.Client, bastion *extensionsv1alpha1.Bastion) {
By("check connection to port 22 open should not error")
bastionUpdated := &extensionsv1alpha1.Bastion{}
Expect(c.Get(ctx, client.ObjectKey{Namespace: bastion.Namespace, Name: bastion.Name}, bastionUpdated)).To(Succeed())
ipAddress := bastionUpdated.Status.Ingress.IP
address := net.JoinHostPort(ipAddress, "22")
conn, err := net.DialTimeout("tcp", address, 60*time.Second)
Expect(err).ShouldNot(HaveOccurred())
Expect(conn).NotTo(BeNil())
}
func verifyPort42IsClosed(ctx context.Context, c client.Client, bastion *extensionsv1alpha1.Bastion) {
By("check connection to port 42 which should fail")
bastionUpdated := &extensionsv1alpha1.Bastion{}
Expect(c.Get(ctx, client.ObjectKey{Namespace: bastion.Namespace, Name: bastion.Name}, bastionUpdated)).To(Succeed())
ipAddress := bastionUpdated.Status.Ingress.IP
address := net.JoinHostPort(ipAddress, "42")
conn, err := net.DialTimeout("tcp", address, 3*time.Second)
Expect(err).Should(HaveOccurred())
Expect(conn).To(BeNil())
}
func createClusters(name string) (*extensionsv1alpha1.Cluster, *controller.Cluster) {
infrastructureConfig := createInfrastructureConfig()
infrastructureConfigJSON, _ := json.Marshal(&infrastructureConfig)
shoot := createShoot(infrastructureConfigJSON)
shootJSON, _ := json.Marshal(shoot)
cloudProfile := createCloudProfile()
cloudProfileJSON, _ := json.Marshal(cloudProfile)
extensionscluster := &extensionsv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec
|
{
mask = net.CIDRMask(24, 32) // use a /24 net for IPv4
}
|
conditional_block
|
bastion_test.go
|
gress: []extensionsv1alpha1.BastionIngressPolicy{
{IPBlock: networkingv1.IPBlock{
CIDR: myPublicIP,
}},
},
},
}
options, err := bastionctrl.DetermineOptions(bastion, cluster)
Expect(err).NotTo(HaveOccurred())
return bastion, options
}
func prepareVPCandShootSecurityGroup(ctx context.Context, clientFactory alicloudclient.ClientFactory, name, vpcName, region, vpcCIDR, natGatewayCIDR string) infrastructureIdentifiers {
vpcClient, err := clientFactory.NewVPCClient(region, *accessKeyID, *accessKeySecret)
Expect(err).NotTo(HaveOccurred())
// vpc
createVpcReq := vpc.CreateCreateVpcRequest()
createVpcReq.CidrBlock = vpcCIDR
createVpcReq.RegionId = region
createVpcReq.VpcName = vpcName
createVpcReq.Description = name
createVPCsResp, err := vpcClient.CreateVpc(createVpcReq)
Expect(err).NotTo(HaveOccurred())
describeVpcsReq := vpc.CreateDescribeVpcsRequest()
describeVpcsReq.VpcId = createVPCsResp.VpcId
err = wait.PollUntil(5*time.Second, func() (bool, error) {
describeVpcsResp, err := vpcClient.DescribeVpcs(describeVpcsReq)
if err != nil {
return false, err
}
if describeVpcsResp.Vpcs.Vpc[0].Status != availableStatus {
return false, nil
}
return true, nil
}, ctx.Done())
Expect(err).NotTo(HaveOccurred())
// vswitch
createVSwitchsReq := vpc.CreateCreateVSwitchRequest()
createVSwitchsReq.VpcId = createVPCsResp.VpcId
createVSwitchsReq.RegionId = region
createVSwitchsReq.CidrBlock = natGatewayCIDR
createVSwitchsReq.ZoneId = region + "a"
createVSwitchsReq.Description = name
createVSwitchsResp, err := vpcClient.CreateVSwitch(createVSwitchsReq)
Expect(err).NotTo(HaveOccurred())
describeVSwitchesReq := vpc.CreateDescribeVSwitchesRequest()
describeVSwitchesReq.VSwitchId = createVSwitchsResp.VSwitchId
err = wait.PollUntil(5*time.Second, func() (bool, error) {
describeVSwitchesResp, err := vpcClient.DescribeVSwitches(describeVSwitchesReq)
if err != nil {
return false, err
}
if describeVSwitchesResp.VSwitches.VSwitch[0].Status != availableStatus {
return false, nil
}
return true, nil
}, ctx.Done())
Expect(err).NotTo(HaveOccurred())
// natgateway
createNatGatewayReq := vpc.CreateCreateNatGatewayRequest()
createNatGatewayReq.VpcId = createVPCsResp.VpcId
createNatGatewayReq.RegionId = region
createNatGatewayReq.VSwitchId = createVSwitchsResp.VSwitchId
createNatGatewayReq.NatType = natGatewayType
createNatGatewayReq.Description = name
createNatGatewayResp, err := vpcClient.CreateNatGateway(createNatGatewayReq)
Expect(err).NotTo(HaveOccurred())
describeNatGatewaysReq := vpc.CreateDescribeNatGatewaysRequest()
describeNatGatewaysReq.NatGatewayId = createNatGatewayResp.NatGatewayId
err = wait.PollUntil(5*time.Second, func() (bool, error) {
describeNatGatewaysResp, err := vpcClient.DescribeNatGateways(describeNatGatewaysReq)
if err != nil {
return false, err
}
if describeNatGatewaysResp.NatGateways.NatGateway[0].Status != availableStatus {
return false, nil
}
return true, nil
}, ctx.Done())
Expect(err).NotTo(HaveOccurred())
// shoot security group
ecsClient, err := clientFactory.NewECSClient(region, *accessKeyID, *accessKeySecret)
Expect(err).NotTo(HaveOccurred())
createSecurityGroupsResp, err := ecsClient.CreateSecurityGroups(createVPCsResp.VpcId, name+securityGroupSuffix)
Expect(err).NotTo(HaveOccurred())
return infrastructureIdentifiers{
vpcID: pointer.String(createVPCsResp.VpcId),
vswitchID: pointer.String(createVSwitchsResp.VSwitchId),
natGatewayID: pointer.String(createNatGatewayResp.NatGatewayId),
securityGroupIDs: pointer.String(createSecurityGroupsResp.SecurityGroupId),
zone: pointer.String(createVSwitchsReq.ZoneId),
}
}
func cleanupVPC(ctx context.Context, clientFactory alicloudclient.ClientFactory, identifiers infrastructureIdentifiers) {
vpcClient, err := clientFactory.NewVPCClient(*region, *accessKeyID, *accessKeySecret)
Expect(err).NotTo(HaveOccurred())
ecsClient, err := clientFactory.NewECSClient(*region, *accessKeyID, *accessKeySecret)
Expect(err).NotTo(HaveOccurred())
// cleanup - natGateWay
deleteNatGatewayReq := vpc.CreateDeleteNatGatewayRequest()
deleteNatGatewayReq.NatGatewayId = *identifiers.natGatewayID
_, err = vpcClient.DeleteNatGateway(deleteNatGatewayReq)
Expect(err).NotTo(HaveOccurred())
describeNatGatewaysReq := vpc.CreateDescribeNatGatewaysRequest()
describeNatGatewaysReq.NatGatewayId = *identifiers.natGatewayID
err = wait.PollUntil(5*time.Second, func() (bool, error) {
describeNatGatewaysResp, err := vpcClient.DescribeNatGateways(describeNatGatewaysReq)
if err != nil {
return false, err
}
if len(describeNatGatewaysResp.NatGateways.NatGateway) == 0 {
return true, nil
}
return false, nil
}, ctx.Done())
Expect(err).NotTo(HaveOccurred())
err = ecsClient.DeleteSecurityGroups(*identifiers.securityGroupIDs)
Expect(err).NotTo(HaveOccurred())
// cleanup - vswitch
deleteVSwitchReq := vpc.CreateDeleteVSwitchRequest()
deleteVSwitchReq.VSwitchId = *identifiers.vswitchID
_, err = vpcClient.DeleteVSwitch(deleteVSwitchReq)
Expect(err).NotTo(HaveOccurred())
describeVSwitchesReq := vpc.CreateDescribeVSwitchesRequest()
describeVSwitchesReq.VSwitchId = *identifiers.vswitchID
err = wait.PollUntil(5*time.Second, func() (bool, error) {
describeVSwitchesResp, err := vpcClient.DescribeVSwitches(describeVSwitchesReq)
if err != nil {
return false, err
}
if len(describeVSwitchesResp.VSwitches.VSwitch) == 0 {
return true, nil
}
return false, nil
}, ctx.Done())
Expect(err).NotTo(HaveOccurred())
// cleanup - vpc
deleteVpcReq := vpc.CreateDeleteVpcRequest()
deleteVpcReq.VpcId = *identifiers.vpcID
_, err = vpcClient.DeleteVpc(deleteVpcReq)
Expect(err).NotTo(HaveOccurred())
describeVpcsReq := vpc.CreateDescribeVpcsRequest()
describeVpcsReq.VpcId = *identifiers.vpcID
err = wait.PollUntil(5*time.Second, func() (bool, error) {
describeVpcsResp, err := vpcClient.DescribeVpcs(describeVpcsReq)
if err != nil {
return false, err
}
if len(describeVpcsResp.Vpcs.Vpc) == 0 {
return true, nil
}
return false, nil
}, ctx.Done())
Expect(err).NotTo(HaveOccurred())
}
func namespace(name string) *corev1.Namespace {
return &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
}
func setupEnvironmentObjects(ctx context.Context, c client.Client, namespace *corev1.Namespace, secret *corev1.Secret, cluster *extensionsv1alpha1.Cluster, worker *extensionsv1alpha1.Worker) {
Expect(c.Create(ctx, namespace)).To(Succeed())
Expect(c.Create(ctx, cluster)).To(Succeed())
Expect(c.Create(ctx, secret)).To(Succeed())
Expect(c.Create(ctx, worker)).To(Succeed())
}
func teardownShootEnvironment(ctx context.Context, c client.Client, namespace *corev1.Namespace, secret *corev1.Secret, cluster *extensionsv1alpha1.Cluster, worker *extensionsv1alpha1.Worker) {
Expect(client.IgnoreNotFound(c.Delete(ctx, worker))).To(Succeed())
Expect(client.IgnoreNotFound(c.Delete(ctx, secret))).To(Succeed())
Expect(client.IgnoreNotFound(c.Delete(ctx, cluster))).To(Succeed())
Expect(client.IgnoreNotFound(c.Delete(ctx, namespace))).To(Succeed())
}
func teardownBastion(ctx context.Context, logger logr.Logger, c client.Client, bastion *extensionsv1alpha1.Bastion)
|
{
By("delete bastion")
Expect(client.IgnoreNotFound(c.Delete(ctx, bastion))).To(Succeed())
By("wait until bastion is deleted")
err := extensions.WaitUntilExtensionObjectDeleted(ctx, c, logger, bastion, extensionsv1alpha1.BastionResource, 20*time.Second, 15*time.Minute)
Expect(err).NotTo(HaveOccurred())
}
|
identifier_body
|
|
bastion_test.go
|
securityGroupSuffix = "-sg"
imageID = "m-gw8c603eae9ygxgt2ig6"
)
var myPublicIP = ""
var (
accessKeyID = flag.String("access-key-id", "", "Alicloud access key id")
accessKeySecret = flag.String("access-key-secret", "", "Alicloud access key secret")
region = flag.String("region", "", "Alicloud region")
)
func validateFlags() {
if len(*accessKeyID) == 0 {
panic("need an Alicloud access key id")
}
if len(*accessKeySecret) == 0 {
panic("need an Alicloud access key secret")
}
if len(*region) == 0 {
panic("need an Alicloud region")
}
}
type infrastructureIdentifiers struct {
vpcID *string
vswitchID *string
natGatewayID *string
securityGroupIDs *string
zone *string
}
var (
ctx = context.Background()
log logr.Logger
extensionscluster *extensionsv1alpha1.Cluster
controllercluster *controller.Cluster
options *bastionctrl.Options
bastion *extensionsv1alpha1.Bastion
secret *corev1.Secret
clientFactory alicloudclient.ClientFactory
testEnv *envtest.Environment
mgrCancel context.CancelFunc
c client.Client
internalChartsPath string
name string
vpcName string
)
var _ = BeforeSuite(func() {
flag.Parse()
validateFlags()
internalChartsPath = alicloud.InternalChartsPath
repoRoot := filepath.Join("..", "..", "..")
alicloud.InternalChartsPath = filepath.Join(repoRoot, alicloud.InternalChartsPath)
// enable manager logs
logf.SetLogger(logger.MustNewZapLogger(logger.DebugLevel, logger.FormatJSON, zap.WriteTo(GinkgoWriter)))
log = logf.Log.WithName("bastion-test")
randString, err := randomString()
Expect(err).NotTo(HaveOccurred())
// bastion name prefix
name = fmt.Sprintf("alicloud-it-bastion-%s", randString)
vpcName = fmt.Sprintf("%s-vpc", name)
myPublicIP, err = getMyPublicIPWithMask()
Expect(err).NotTo(HaveOccurred())
By("starting test environment")
testEnv = &envtest.Environment{
UseExistingCluster: pointer.Bool(true),
CRDInstallOptions: envtest.CRDInstallOptions{
Paths: []string{
filepath.Join(repoRoot, "example", "20-crd-extensions.gardener.cloud_clusters.yaml"),
filepath.Join(repoRoot, "example", "20-crd-extensions.gardener.cloud_bastions.yaml"),
filepath.Join(repoRoot, "example", "20-crd-extensions.gardener.cloud_workers.yaml"),
},
},
}
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
By("setup manager")
mgr, err := manager.New(cfg, manager.Options{
MetricsBindAddress: "0",
})
Expect(err).NotTo(HaveOccurred())
Expect(extensionsv1alpha1.AddToScheme(mgr.GetScheme())).To(Succeed())
Expect(alicloudinstall.AddToScheme(mgr.GetScheme())).To(Succeed())
Expect(bastionctrl.AddToManager(ctx, mgr)).To(Succeed())
var mgrContext context.Context
mgrContext, mgrCancel = context.WithCancel(ctx)
By("start manager")
go func() {
err := mgr.Start(mgrContext)
Expect(err).NotTo(HaveOccurred())
}()
c = mgr.GetClient()
Expect(c).NotTo(BeNil())
extensionscluster, controllercluster = createClusters(name)
secret = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: v1beta1constants.SecretNameCloudProvider,
Namespace: name,
},
Data: map[string][]byte{
alicloud.AccessKeyID: []byte(*accessKeyID),
alicloud.AccessKeySecret: []byte(*accessKeySecret),
},
}
clientFactory = alicloudclient.NewClientFactory()
})
var _ = AfterSuite(func() {
defer func() {
By("stopping manager")
mgrCancel()
}()
By("running cleanup actions")
framework.RunCleanupActions()
By("stopping test environment")
Expect(testEnv.Stop()).To(Succeed())
alicloud.InternalChartsPath = internalChartsPath
})
var _ = Describe("Bastion tests", func() {
It("should successfully create and delete", func() {
By("setup Infrastructure ")
identifiers := prepareVPCandShootSecurityGroup(ctx, clientFactory, name, vpcName, *region, vpcCIDR, natGatewayCIDR)
framework.AddCleanupAction(func() {
cleanupVPC(ctx, clientFactory, identifiers)
})
By("create namespace for test execution")
worker := createWorker(name, *identifiers.vpcID, *identifiers.vswitchID, *identifiers.zone, imageID, *identifiers.securityGroupIDs)
setupEnvironmentObjects(ctx, c, namespace(name), secret, extensionscluster, worker)
framework.AddCleanupAction(func() {
teardownShootEnvironment(ctx, c, namespace(name), secret, extensionscluster, worker)
})
bastion, options = createBastion(controllercluster, name)
By("setup bastion")
err := c.Create(ctx, bastion)
Expect(err).NotTo(HaveOccurred())
framework.AddCleanupAction(func() {
teardownBastion(ctx, log, c, bastion)
By("verify bastion deletion")
verifyDeletion(clientFactory, options)
})
By("wait until bastion is reconciled")
Expect(extensions.WaitUntilExtensionObjectReady(
ctx,
c,
log,
bastion,
extensionsv1alpha1.BastionResource,
60*time.Second,
60*time.Second,
10*time.Minute,
nil,
)).To(Succeed())
time.Sleep(60 * time.Second)
verifyPort22IsOpen(ctx, c, bastion)
verifyPort42IsClosed(ctx, c, bastion)
By("verify cloud resources")
verifyCreation(clientFactory, options)
})
})
func
|
() (string, error) {
suffix, err := gardenerutils.GenerateRandomStringFromCharset(5, "0123456789abcdefghijklmnopqrstuvwxyz")
if err != nil {
return "", err
}
return suffix, nil
}
func getMyPublicIPWithMask() (string, error) {
resp, err := http.Get("https://api.ipify.org")
if err != nil {
return "", err
}
defer func() {
err := resp.Body.Close()
if err != nil {
Expect(err).NotTo(HaveOccurred())
}
}()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
ip := net.ParseIP(string(body))
var mask net.IPMask
if ip.To4() != nil {
mask = net.CIDRMask(24, 32) // use a /24 net for IPv4
} else {
return "", fmt.Errorf("not valid IPv4 address")
}
cidr := net.IPNet{
IP: ip,
Mask: mask,
}
full := cidr.String()
_, ipnet, _ := net.ParseCIDR(full)
return ipnet.String(), nil
}
func verifyPort22IsOpen(ctx context.Context, c client.Client, bastion *extensionsv1alpha1.Bastion) {
By("check connection to port 22 open should not error")
bastionUpdated := &extensionsv1alpha1.Bastion{}
Expect(c.Get(ctx, client.ObjectKey{Namespace: bastion.Namespace, Name: bastion.Name}, bastionUpdated)).To(Succeed())
ipAddress := bastionUpdated.Status.Ingress.IP
address := net.JoinHostPort(ipAddress, "22")
conn, err := net.DialTimeout("tcp", address, 60*time.Second)
Expect(err).ShouldNot(HaveOccurred())
Expect(conn).NotTo(BeNil())
}
func verifyPort42IsClosed(ctx context.Context, c client.Client, bastion *extensionsv1alpha1.Bastion) {
By("check connection to port 42 which should fail")
bastionUpdated := &extensionsv1alpha1.Bastion{}
Expect(c.Get(ctx, client.ObjectKey{Namespace: bastion.Namespace, Name: bastion.Name}, bastionUpdated)).To(Succeed())
ipAddress := bastionUpdated.Status.Ingress.IP
address := net.JoinHostPort(ipAddress, "42")
conn, err := net.DialTimeout("tcp", address, 3*time.Second)
Expect(err).Should(HaveOccurred())
Expect(conn).To(BeNil())
}
func createClusters(name string) (*extensionsv1alpha1.Cluster, *controller.Cluster) {
infrastructureConfig := createInfrastructureConfig()
infrastructureConfigJSON, _ := json.Marshal(&infrastructureConfig)
shoot := createShoot(infrastructureConfigJSON)
shootJSON, _ := json.Marshal(shoot)
cloudProfile := createCloudProfile()
cloudProfileJSON, _ := json.Marshal(cloudProfile)
extensionscluster := &extensionsv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec
|
randomString
|
identifier_name
|
bastion_test.go
|
Flags() {
if len(*accessKeyID) == 0 {
panic("need an Alicloud access key id")
}
if len(*accessKeySecret) == 0 {
panic("need an Alicloud access key secret")
}
if len(*region) == 0 {
panic("need an Alicloud region")
}
}
type infrastructureIdentifiers struct {
vpcID *string
vswitchID *string
natGatewayID *string
securityGroupIDs *string
zone *string
}
var (
ctx = context.Background()
log logr.Logger
extensionscluster *extensionsv1alpha1.Cluster
controllercluster *controller.Cluster
options *bastionctrl.Options
bastion *extensionsv1alpha1.Bastion
secret *corev1.Secret
clientFactory alicloudclient.ClientFactory
testEnv *envtest.Environment
mgrCancel context.CancelFunc
c client.Client
internalChartsPath string
name string
vpcName string
)
var _ = BeforeSuite(func() {
flag.Parse()
validateFlags()
internalChartsPath = alicloud.InternalChartsPath
repoRoot := filepath.Join("..", "..", "..")
alicloud.InternalChartsPath = filepath.Join(repoRoot, alicloud.InternalChartsPath)
// enable manager logs
logf.SetLogger(logger.MustNewZapLogger(logger.DebugLevel, logger.FormatJSON, zap.WriteTo(GinkgoWriter)))
log = logf.Log.WithName("bastion-test")
randString, err := randomString()
Expect(err).NotTo(HaveOccurred())
// bastion name prefix
name = fmt.Sprintf("alicloud-it-bastion-%s", randString)
vpcName = fmt.Sprintf("%s-vpc", name)
myPublicIP, err = getMyPublicIPWithMask()
Expect(err).NotTo(HaveOccurred())
By("starting test environment")
testEnv = &envtest.Environment{
UseExistingCluster: pointer.Bool(true),
CRDInstallOptions: envtest.CRDInstallOptions{
Paths: []string{
filepath.Join(repoRoot, "example", "20-crd-extensions.gardener.cloud_clusters.yaml"),
filepath.Join(repoRoot, "example", "20-crd-extensions.gardener.cloud_bastions.yaml"),
filepath.Join(repoRoot, "example", "20-crd-extensions.gardener.cloud_workers.yaml"),
},
},
}
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
By("setup manager")
mgr, err := manager.New(cfg, manager.Options{
MetricsBindAddress: "0",
})
Expect(err).NotTo(HaveOccurred())
Expect(extensionsv1alpha1.AddToScheme(mgr.GetScheme())).To(Succeed())
Expect(alicloudinstall.AddToScheme(mgr.GetScheme())).To(Succeed())
Expect(bastionctrl.AddToManager(ctx, mgr)).To(Succeed())
var mgrContext context.Context
mgrContext, mgrCancel = context.WithCancel(ctx)
By("start manager")
go func() {
err := mgr.Start(mgrContext)
Expect(err).NotTo(HaveOccurred())
}()
c = mgr.GetClient()
Expect(c).NotTo(BeNil())
extensionscluster, controllercluster = createClusters(name)
secret = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: v1beta1constants.SecretNameCloudProvider,
Namespace: name,
},
Data: map[string][]byte{
alicloud.AccessKeyID: []byte(*accessKeyID),
alicloud.AccessKeySecret: []byte(*accessKeySecret),
},
}
clientFactory = alicloudclient.NewClientFactory()
})
var _ = AfterSuite(func() {
defer func() {
By("stopping manager")
mgrCancel()
}()
By("running cleanup actions")
framework.RunCleanupActions()
By("stopping test environment")
Expect(testEnv.Stop()).To(Succeed())
alicloud.InternalChartsPath = internalChartsPath
})
var _ = Describe("Bastion tests", func() {
It("should successfully create and delete", func() {
By("setup Infrastructure ")
identifiers := prepareVPCandShootSecurityGroup(ctx, clientFactory, name, vpcName, *region, vpcCIDR, natGatewayCIDR)
framework.AddCleanupAction(func() {
cleanupVPC(ctx, clientFactory, identifiers)
})
By("create namespace for test execution")
worker := createWorker(name, *identifiers.vpcID, *identifiers.vswitchID, *identifiers.zone, imageID, *identifiers.securityGroupIDs)
setupEnvironmentObjects(ctx, c, namespace(name), secret, extensionscluster, worker)
framework.AddCleanupAction(func() {
teardownShootEnvironment(ctx, c, namespace(name), secret, extensionscluster, worker)
})
bastion, options = createBastion(controllercluster, name)
By("setup bastion")
err := c.Create(ctx, bastion)
Expect(err).NotTo(HaveOccurred())
framework.AddCleanupAction(func() {
teardownBastion(ctx, log, c, bastion)
By("verify bastion deletion")
verifyDeletion(clientFactory, options)
})
By("wait until bastion is reconciled")
Expect(extensions.WaitUntilExtensionObjectReady(
ctx,
c,
log,
bastion,
extensionsv1alpha1.BastionResource,
60*time.Second,
60*time.Second,
10*time.Minute,
nil,
)).To(Succeed())
time.Sleep(60 * time.Second)
verifyPort22IsOpen(ctx, c, bastion)
verifyPort42IsClosed(ctx, c, bastion)
By("verify cloud resources")
verifyCreation(clientFactory, options)
})
})
func randomString() (string, error) {
suffix, err := gardenerutils.GenerateRandomStringFromCharset(5, "0123456789abcdefghijklmnopqrstuvwxyz")
if err != nil {
return "", err
}
return suffix, nil
}
func getMyPublicIPWithMask() (string, error) {
resp, err := http.Get("https://api.ipify.org")
if err != nil {
return "", err
}
defer func() {
err := resp.Body.Close()
if err != nil {
Expect(err).NotTo(HaveOccurred())
}
}()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
ip := net.ParseIP(string(body))
var mask net.IPMask
if ip.To4() != nil {
mask = net.CIDRMask(24, 32) // use a /24 net for IPv4
} else {
return "", fmt.Errorf("not valid IPv4 address")
}
cidr := net.IPNet{
IP: ip,
Mask: mask,
}
full := cidr.String()
_, ipnet, _ := net.ParseCIDR(full)
return ipnet.String(), nil
}
func verifyPort22IsOpen(ctx context.Context, c client.Client, bastion *extensionsv1alpha1.Bastion) {
By("check connection to port 22 open should not error")
bastionUpdated := &extensionsv1alpha1.Bastion{}
Expect(c.Get(ctx, client.ObjectKey{Namespace: bastion.Namespace, Name: bastion.Name}, bastionUpdated)).To(Succeed())
ipAddress := bastionUpdated.Status.Ingress.IP
address := net.JoinHostPort(ipAddress, "22")
conn, err := net.DialTimeout("tcp", address, 60*time.Second)
Expect(err).ShouldNot(HaveOccurred())
Expect(conn).NotTo(BeNil())
}
func verifyPort42IsClosed(ctx context.Context, c client.Client, bastion *extensionsv1alpha1.Bastion) {
By("check connection to port 42 which should fail")
bastionUpdated := &extensionsv1alpha1.Bastion{}
Expect(c.Get(ctx, client.ObjectKey{Namespace: bastion.Namespace, Name: bastion.Name}, bastionUpdated)).To(Succeed())
ipAddress := bastionUpdated.Status.Ingress.IP
address := net.JoinHostPort(ipAddress, "42")
conn, err := net.DialTimeout("tcp", address, 3*time.Second)
Expect(err).Should(HaveOccurred())
Expect(conn).To(BeNil())
}
func createClusters(name string) (*extensionsv1alpha1.Cluster, *controller.Cluster) {
infrastructureConfig := createInfrastructureConfig()
infrastructureConfigJSON, _ := json.Marshal(&infrastructureConfig)
shoot := createShoot(infrastructureConfigJSON)
shootJSON, _ := json.Marshal(shoot)
cloudProfile := createCloudProfile()
cloudProfileJSON, _ := json.Marshal(cloudProfile)
extensionscluster := &extensionsv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: extensionsv1alpha1.ClusterSpec{
CloudProfile: runtime.RawExtension{
Object: cloudProfile,
Raw: cloudProfileJSON,
},
Seed: runtime.RawExtension{
Raw: []byte("{}"),
},
Shoot: runtime.RawExtension{
Object: shoot,
Raw: shootJSON,
},
|
},
}
cluster := &controller.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: name},
|
random_line_split
|
|
WarningAlarm.js
|
V;
loadHistory();
}
});*/
});
function setPumpName(name, id) {
$(".pumpName input").val(name);
$(".pumpName input").attr("data-id", id);
}
function setJzName(name, id) {
$(".machName input").val(name);
$(".machName input").attr("data-id", id);
}
function parseUrl() {
var url = window.location.href;
// alert(url);
var i = url.indexOf('?');
if (i == -1) { return };
var queryStr = url.substr(i + 1);
var arr1 = queryStr.split('&');
var arr2 = {};
for (j in arr1) {
var tar = arr1[j].split('=');
arr2[tar[0]] = tar[1];
};
return arr2;
}
laydate({
elem: "#startTime",
format: "YYYY-MM-DD hh:mm:ss",
istime: true,
istoday: false,
issure: true,
});
laydate({
elem: "#endTime",
format: "YYYY-MM-DD hh:mm:ss",
istime: true,
istoday: false,
issure: true,
});
//ๆถ้ดๆ ผๅผๅ
function formatDate() {
var d = new Date();
var dM = (d.getMonth() + 1).toString().replace(/^(\d)$/, '0$1');
var dD = d.getDate().toString().replace(/^(\d)$/, '0$1');
var dateTemp = d.getFullYear() + "-" + (dM) + "-" + dD;
return dateTemp;
}
function dealPage(total) {
totalPage = Math.ceil(total / pageSize);
}
function dealRealAlarm(realAlarmData) {
$(".ul_alarmList").emp
|
$.ajax({
url: '/V_YCJK/SearchAlarm',
data: {
"pumpID": pumpId,
"pumpJZID": jzId,
"pageIndex": raPageIndex,
"pageSize": 5, //ๆ็ดขๆฏpageSizeๅผๆ ๆ
"StartDate": '2016-01-01',//startDate
"EndDate": '2017-05-09'//endDate
},
dataType: 'JSON',
beforeSend: loadingFunction,
success: function (data) {
console.log(typeof data);
console.log(data);
if (data.obj.data.length > 0) {
dealRealAlarm(data.obj.data);
}
scrollOnoff = true;
},
complete: loadingMiss,
error: function (data) {
console.log('้่ฏฏ๏ผ' + data.responseText);
}
});
}
function loadingFunction() {
var $div = $('<div class="loading" style="position:absolute;left: 50%;top:50%;margin-left: -150px;margin-top: -70px;width: 300px;color:black;text-align:center;line-height: 140px;height: 140px;background: rgba(255,255,0);color: white;border-radius: 8px;"><img style="position: relative;top: 56%;left: 15%;" src="/res/YCJK/img/load1.gif" alt="loading....">ๆญฃๅจๅ ่ฝฝไธญ...</div>');
$('body').append($div);
};
function loadingMiss() {
$('.loading').remove();
};
//ๅๆถๆฅ่ญฆๆปๅจๆกๅฝๆฐlistItemScroll
function realAlarmListScroll() {
$('.alarmListScroll_wrap ').mCustomScrollbar({
scrollbarPosition: "inside",
theme: "minimal-dark",
callbacks: {
whileScrolling: function () {
var $that = this.mcs.left;
// console.log($('#mCSB_2_container').height());
// console.log($('.alarmListScroll_wrap #mCSB_2_container').position().top);
var jian = $('#mCSB_2_container').height() + $('.alarmListScroll_wrap #mCSB_2_container').position().top;
// console.log($('#mCSB_1_container').height() + $('.listBox #mCSB_1_container').position().top);
// console.log($('.listBox').height());
if (jian - 10 <= $('.alarmListScroll_wrap').height()) {
console.log('่ทๅ');
if (scrollOnoff) {
// realAlarmScrollGet();
}
}
// console.log('-----------------------------');
}
}
});
}
//leftListDataScrollGet
function realAlarmScrollGet() {
scrollOnoff = false;
raPageIndex++;
alert(raPageIndex);
loadRealAlarm();
// scrollOnoff = true;
}
//ๅๅฒๆฅ่ญฆ
function getHistoryAlarm() {
$(".ul_alarmList").on("click", "li", function () {
$(this).addClass("active").siblings().removeClass("active");
jzId = $(this).attr("data-id");
fKey = $(this).attr("data-fKey");
startDate = '';
endDate = '';
loadHistory();
});
}
function loadHistory() {
//SearchAlarmHistory
$.ajax({
url: '/V_YCJK/SearchAlarmHistory',
data: {
"pumpID": pumpId,
"pumpJZID": jzId,
"pageIndex": pageIndex,
"pageSize": pageSize,
"StartDate": startDate,
"EndDate": endDate,
"FKey": fKey
},
dataType: 'JSON',
beforeSend: loadingFunction,
success: function (data) {
console.log(typeof data);
console.log(data);
console.log('ๅๅฒๆฅ่ญฆ');
console.log(data.obj.data);
historyTableBody(data.obj.data);
dealPage(data.obj.total)
tableClone();
checkPageType();
layout();
// dealWidth();
},
complete: loadingMiss,
error: function (data) {
console.log('้่ฏฏ๏ผ' + data.responseText);
}
});
}
function historyTableBody(historyData) {
$(".table2 tbody").empty();
var trStr = '';
for (var i = 0; i < historyData.length; i++) {
var tempStr = '<tr><td></td>\
<td data-filed="PName" class="PName">' + historyData[i].PName + '</td>\
<td data-field="PumpJZName" class="PumpJZName">' + historyData[i].PumpJZName + '</td>\
<td data-field="FSetMsg">' + historyData[i].FSetMsg + '</td>\
<td data-field="FAlarmTime">' + historyData[i].FAlarmTime.replace('T', ' ') + '</td>\
<td data-field="FEndAlarmTime">' + historyData[i].FEndAlarmTime.replace('T', ' ') + '</td>\
<td data-field="CXTime">' + historyData[i].CXTime + '</td>\
<td data-field="TypeName">' + historyData[i].TypeName + '</td></tr>';
trStr += tempStr;
}
$(".table2 tbody").append(trStr);
}
function dealPage(total) {
totalPage = Math.ceil(total / pageSize);
$("#totalNum").html(total);
$("#totalPage").html(totalPage);
if (total == 0) {
$("#currentPage").html(0);
} else {
$("#currentPage").html(pageIndex + 1);
}
}
//้ๅๆณตๆฟๅๆบ็ป
function selectPumpAndJz() {
$(".pumpName .selectBtn").click(function () {
//alert('้ๅๆณตๆฟ');
var index = layer.open({
type: 2,
anim: 3,
shade: .6,
title: ['ๆณตๆฟๅ่กจ', 'text-align: center;color: #909090'],
shadeClose: true,
area: ['800px', '620px'],
content: '/YCJK/Window/pumpWindow?pumpID=' + urlJson["pumpID"] + '&pumpName=' + urlJson["pumpName"],
success: function () {
// alert('OK');
}
});
|
ty();
var str = '';
for (var i = 0; i < realAlarmData.length; i++) {
var tempStr = '<li data-id="' + realAlarmData[i].BaseID + '" data-fKey="' + realAlarmData[i].FKey + '">\
<div class="line1_box .clearfix">\
<p class="real_pumpName" data-id="PName">' + realAlarmData[i].PName + '</p>\
<p class="real_keepTime" data-id="TimeRange">' + realAlarmData[i].TimeRange + '</p>\
</div>\
<div class="line_box real_jzName" data-id="PumpJZName">' + realAlarmData[i].PumpJZName + '</div>\
<div class="line2_box">\
<p class="real_alarmTime" data-id="TempTime">ใ' + realAlarmData[i].TempTime.replace('T', ' ') + 'ใ</p>\
<p class="real_alarmInfo" data-id="FSetMsg">' + realAlarmData[i].FSetMsg + '</p>\
</div>\
</li>';
str += tempStr;
}
$(".ul_alarmList").append(str);
}
function loadRealAlarm() {
|
identifier_body
|
WarningAlarm.js
|
V;
loadHistory();
}
});*/
});
function setPumpName(name, id) {
$(".pumpName input").val(name);
$(".pumpName input").attr("data-id", id);
}
function setJzName(name, id) {
$(".machName input").val(name);
$(".machName input").attr("data-id", id);
}
function parseUrl() {
var url = window.location.href;
// alert(url);
var i = url.indexOf('?');
if (i == -1) { return };
var queryStr = url.substr(i + 1);
var arr1 = queryStr.split('&');
var arr2 = {};
for (j in arr1) {
var tar = arr1[j].split('=');
arr2[tar[0]] = tar[1];
};
return arr2;
}
laydate({
elem: "#startTime",
format: "YYYY-MM-DD hh:mm:ss",
istime: true,
istoday: false,
issure: true,
});
laydate({
elem: "#endTime",
format: "YYYY-MM-DD hh:mm:ss",
istime: true,
istoday: false,
issure: true,
});
//ๆถ้ดๆ ผๅผๅ
function formatDate() {
var d = new Date();
var dM = (d.getMonth() + 1).toString().replace(/^(\d)$/, '0$1');
var dD = d.getDate().toString().replace(/^(\d)$/, '0$1');
var dateTemp = d.getFullYear() + "-" + (dM) + "-" + dD;
return dateTemp;
}
function dealPage(total) {
totalPage = Math.ceil(total / pageSize);
}
function dealRealAlarm(realAlarmData)
|
_alarmList").empty();
var str = '';
for (var i = 0; i < realAlarmData.length; i++) {
var tempStr = '<li data-id="' + realAlarmData[i].BaseID + '" data-fKey="' + realAlarmData[i].FKey + '">\
<div class="line1_box .clearfix">\
<p class="real_pumpName" data-id="PName">' + realAlarmData[i].PName + '</p>\
<p class="real_keepTime" data-id="TimeRange">' + realAlarmData[i].TimeRange + '</p>\
</div>\
<div class="line_box real_jzName" data-id="PumpJZName">' + realAlarmData[i].PumpJZName + '</div>\
<div class="line2_box">\
<p class="real_alarmTime" data-id="TempTime">ใ' + realAlarmData[i].TempTime.replace('T', ' ') + 'ใ</p>\
<p class="real_alarmInfo" data-id="FSetMsg">' + realAlarmData[i].FSetMsg + '</p>\
</div>\
</li>';
str += tempStr;
}
$(".ul_alarmList").append(str);
}
function loadRealAlarm() {
$.ajax({
url: '/V_YCJK/SearchAlarm',
data: {
"pumpID": pumpId,
"pumpJZID": jzId,
"pageIndex": raPageIndex,
"pageSize": 5, //ๆ็ดขๆฏpageSizeๅผๆ ๆ
"StartDate": '2016-01-01',//startDate
"EndDate": '2017-05-09'//endDate
},
dataType: 'JSON',
beforeSend: loadingFunction,
success: function (data) {
console.log(typeof data);
console.log(data);
if (data.obj.data.length > 0) {
dealRealAlarm(data.obj.data);
}
scrollOnoff = true;
},
complete: loadingMiss,
error: function (data) {
console.log('้่ฏฏ๏ผ' + data.responseText);
}
});
}
function loadingFunction() {
var $div = $('<div class="loading" style="position:absolute;left: 50%;top:50%;margin-left: -150px;margin-top: -70px;width: 300px;color:black;text-align:center;line-height: 140px;height: 140px;background: rgba(255,255,0);color: white;border-radius: 8px;"><img style="position: relative;top: 56%;left: 15%;" src="/res/YCJK/img/load1.gif" alt="loading....">ๆญฃๅจๅ ่ฝฝไธญ...</div>');
$('body').append($div);
};
function loadingMiss() {
$('.loading').remove();
};
//ๅๆถๆฅ่ญฆๆปๅจๆกๅฝๆฐlistItemScroll
function realAlarmListScroll() {
$('.alarmListScroll_wrap ').mCustomScrollbar({
scrollbarPosition: "inside",
theme: "minimal-dark",
callbacks: {
whileScrolling: function () {
var $that = this.mcs.left;
// console.log($('#mCSB_2_container').height());
// console.log($('.alarmListScroll_wrap #mCSB_2_container').position().top);
var jian = $('#mCSB_2_container').height() + $('.alarmListScroll_wrap #mCSB_2_container').position().top;
// console.log($('#mCSB_1_container').height() + $('.listBox #mCSB_1_container').position().top);
// console.log($('.listBox').height());
if (jian - 10 <= $('.alarmListScroll_wrap').height()) {
console.log('่ทๅ');
if (scrollOnoff) {
// realAlarmScrollGet();
}
}
// console.log('-----------------------------');
}
}
});
}
//leftListDataScrollGet
function realAlarmScrollGet() {
scrollOnoff = false;
raPageIndex++;
alert(raPageIndex);
loadRealAlarm();
// scrollOnoff = true;
}
//ๅๅฒๆฅ่ญฆ
function getHistoryAlarm() {
$(".ul_alarmList").on("click", "li", function () {
$(this).addClass("active").siblings().removeClass("active");
jzId = $(this).attr("data-id");
fKey = $(this).attr("data-fKey");
startDate = '';
endDate = '';
loadHistory();
});
}
function loadHistory() {
//SearchAlarmHistory
$.ajax({
url: '/V_YCJK/SearchAlarmHistory',
data: {
"pumpID": pumpId,
"pumpJZID": jzId,
"pageIndex": pageIndex,
"pageSize": pageSize,
"StartDate": startDate,
"EndDate": endDate,
"FKey": fKey
},
dataType: 'JSON',
beforeSend: loadingFunction,
success: function (data) {
console.log(typeof data);
console.log(data);
console.log('ๅๅฒๆฅ่ญฆ');
console.log(data.obj.data);
historyTableBody(data.obj.data);
dealPage(data.obj.total)
tableClone();
checkPageType();
layout();
// dealWidth();
},
complete: loadingMiss,
error: function (data) {
console.log('้่ฏฏ๏ผ' + data.responseText);
}
});
}
function historyTableBody(historyData) {
$(".table2 tbody").empty();
var trStr = '';
for (var i = 0; i < historyData.length; i++) {
var tempStr = '<tr><td></td>\
<td data-filed="PName" class="PName">' + historyData[i].PName + '</td>\
<td data-field="PumpJZName" class="PumpJZName">' + historyData[i].PumpJZName + '</td>\
<td data-field="FSetMsg">' + historyData[i].FSetMsg + '</td>\
<td data-field="FAlarmTime">' + historyData[i].FAlarmTime.replace('T', ' ') + '</td>\
<td data-field="FEndAlarmTime">' + historyData[i].FEndAlarmTime.replace('T', ' ') + '</td>\
<td data-field="CXTime">' + historyData[i].CXTime + '</td>\
<td data-field="TypeName">' + historyData[i].TypeName + '</td></tr>';
trStr += tempStr;
}
$(".table2 tbody").append(trStr);
}
function dealPage(total) {
totalPage = Math.ceil(total / pageSize);
$("#totalNum").html(total);
$("#totalPage").html(totalPage);
if (total == 0) {
$("#currentPage").html(0);
} else {
$("#currentPage").html(pageIndex + 1);
}
}
//้ๅๆณตๆฟๅๆบ็ป
function selectPumpAndJz() {
$(".pumpName .selectBtn").click(function () {
//alert('้ๅๆณตๆฟ');
var index = layer.open({
type: 2,
anim: 3,
shade: .6,
title: ['ๆณตๆฟๅ่กจ', 'text-align: center;color: #909090'],
shadeClose: true,
area: ['800px', '620px'],
content: '/YCJK/Window/pumpWindow?pumpID=' + urlJson["pumpID"] + '&pumpName=' + urlJson["pumpName"],
success: function () {
// alert('OK');
}
});
});
|
{
$(".ul
|
identifier_name
|
WarningAlarm.js
|
V;
loadHistory();
}
});*/
});
function setPumpName(name, id) {
$(".pumpName input").val(name);
$(".pumpName input").attr("data-id", id);
}
function setJzName(name, id) {
$(".machName input").val(name);
$(".machName input").attr("data-id", id);
}
function parseUrl() {
var url = window.location.href;
// alert(url);
|
var i = url.indexOf('?');
if (i == -1) { return };
var queryStr = url.substr(i + 1);
var arr1 = queryStr.split('&');
var arr2 = {};
for (j in arr1) {
var tar = arr1[j].split('=');
arr2[tar[0]] = tar[1];
};
return arr2;
}
laydate({
elem: "#startTime",
format: "YYYY-MM-DD hh:mm:ss",
istime: true,
istoday: false,
issure: true,
});
laydate({
elem: "#endTime",
format: "YYYY-MM-DD hh:mm:ss",
istime: true,
istoday: false,
issure: true,
});
//ๆถ้ดๆ ผๅผๅ
function formatDate() {
var d = new Date();
var dM = (d.getMonth() + 1).toString().replace(/^(\d)$/, '0$1');
var dD = d.getDate().toString().replace(/^(\d)$/, '0$1');
var dateTemp = d.getFullYear() + "-" + (dM) + "-" + dD;
return dateTemp;
}
function dealPage(total) {
totalPage = Math.ceil(total / pageSize);
}
function dealRealAlarm(realAlarmData) {
$(".ul_alarmList").empty();
var str = '';
for (var i = 0; i < realAlarmData.length; i++) {
var tempStr = '<li data-id="' + realAlarmData[i].BaseID + '" data-fKey="' + realAlarmData[i].FKey + '">\
<div class="line1_box .clearfix">\
<p class="real_pumpName" data-id="PName">' + realAlarmData[i].PName + '</p>\
<p class="real_keepTime" data-id="TimeRange">' + realAlarmData[i].TimeRange + '</p>\
</div>\
<div class="line_box real_jzName" data-id="PumpJZName">' + realAlarmData[i].PumpJZName + '</div>\
<div class="line2_box">\
<p class="real_alarmTime" data-id="TempTime">ใ' + realAlarmData[i].TempTime.replace('T', ' ') + 'ใ</p>\
<p class="real_alarmInfo" data-id="FSetMsg">' + realAlarmData[i].FSetMsg + '</p>\
</div>\
</li>';
str += tempStr;
}
$(".ul_alarmList").append(str);
}
function loadRealAlarm() {
$.ajax({
url: '/V_YCJK/SearchAlarm',
data: {
"pumpID": pumpId,
"pumpJZID": jzId,
"pageIndex": raPageIndex,
"pageSize": 5, //ๆ็ดขๆฏpageSizeๅผๆ ๆ
"StartDate": '2016-01-01',//startDate
"EndDate": '2017-05-09'//endDate
},
dataType: 'JSON',
beforeSend: loadingFunction,
success: function (data) {
console.log(typeof data);
console.log(data);
if (data.obj.data.length > 0) {
dealRealAlarm(data.obj.data);
}
scrollOnoff = true;
},
complete: loadingMiss,
error: function (data) {
console.log('้่ฏฏ๏ผ' + data.responseText);
}
});
}
function loadingFunction() {
var $div = $('<div class="loading" style="position:absolute;left: 50%;top:50%;margin-left: -150px;margin-top: -70px;width: 300px;color:black;text-align:center;line-height: 140px;height: 140px;background: rgba(255,255,0);color: white;border-radius: 8px;"><img style="position: relative;top: 56%;left: 15%;" src="/res/YCJK/img/load1.gif" alt="loading....">ๆญฃๅจๅ ่ฝฝไธญ...</div>');
$('body').append($div);
};
function loadingMiss() {
$('.loading').remove();
};
//ๅๆถๆฅ่ญฆๆปๅจๆกๅฝๆฐlistItemScroll
function realAlarmListScroll() {
$('.alarmListScroll_wrap ').mCustomScrollbar({
scrollbarPosition: "inside",
theme: "minimal-dark",
callbacks: {
whileScrolling: function () {
var $that = this.mcs.left;
// console.log($('#mCSB_2_container').height());
// console.log($('.alarmListScroll_wrap #mCSB_2_container').position().top);
var jian = $('#mCSB_2_container').height() + $('.alarmListScroll_wrap #mCSB_2_container').position().top;
// console.log($('#mCSB_1_container').height() + $('.listBox #mCSB_1_container').position().top);
// console.log($('.listBox').height());
if (jian - 10 <= $('.alarmListScroll_wrap').height()) {
console.log('่ทๅ');
if (scrollOnoff) {
// realAlarmScrollGet();
}
}
// console.log('-----------------------------');
}
}
});
}
//leftListDataScrollGet
function realAlarmScrollGet() {
scrollOnoff = false;
raPageIndex++;
alert(raPageIndex);
loadRealAlarm();
// scrollOnoff = true;
}
//ๅๅฒๆฅ่ญฆ
function getHistoryAlarm() {
$(".ul_alarmList").on("click", "li", function () {
$(this).addClass("active").siblings().removeClass("active");
jzId = $(this).attr("data-id");
fKey = $(this).attr("data-fKey");
startDate = '';
endDate = '';
loadHistory();
});
}
function loadHistory() {
//SearchAlarmHistory
$.ajax({
url: '/V_YCJK/SearchAlarmHistory',
data: {
"pumpID": pumpId,
"pumpJZID": jzId,
"pageIndex": pageIndex,
"pageSize": pageSize,
"StartDate": startDate,
"EndDate": endDate,
"FKey": fKey
},
dataType: 'JSON',
beforeSend: loadingFunction,
success: function (data) {
console.log(typeof data);
console.log(data);
console.log('ๅๅฒๆฅ่ญฆ');
console.log(data.obj.data);
historyTableBody(data.obj.data);
dealPage(data.obj.total)
tableClone();
checkPageType();
layout();
// dealWidth();
},
complete: loadingMiss,
error: function (data) {
console.log('้่ฏฏ๏ผ' + data.responseText);
}
});
}
function historyTableBody(historyData) {
$(".table2 tbody").empty();
var trStr = '';
for (var i = 0; i < historyData.length; i++) {
var tempStr = '<tr><td></td>\
<td data-filed="PName" class="PName">' + historyData[i].PName + '</td>\
<td data-field="PumpJZName" class="PumpJZName">' + historyData[i].PumpJZName + '</td>\
<td data-field="FSetMsg">' + historyData[i].FSetMsg + '</td>\
<td data-field="FAlarmTime">' + historyData[i].FAlarmTime.replace('T', ' ') + '</td>\
<td data-field="FEndAlarmTime">' + historyData[i].FEndAlarmTime.replace('T', ' ') + '</td>\
<td data-field="CXTime">' + historyData[i].CXTime + '</td>\
<td data-field="TypeName">' + historyData[i].TypeName + '</td></tr>';
trStr += tempStr;
}
$(".table2 tbody").append(trStr);
}
function dealPage(total) {
totalPage = Math.ceil(total / pageSize);
$("#totalNum").html(total);
$("#totalPage").html(totalPage);
if (total == 0) {
$("#currentPage").html(0);
} else {
$("#currentPage").html(pageIndex + 1);
}
}
//้ๅๆณตๆฟๅๆบ็ป
function selectPumpAndJz() {
$(".pumpName .selectBtn").click(function () {
//alert('้ๅๆณตๆฟ');
var index = layer.open({
type: 2,
anim: 3,
shade: .6,
title: ['ๆณตๆฟๅ่กจ', 'text-align: center;color: #909090'],
shadeClose: true,
area: ['800px', '620px'],
content: '/YCJK/Window/pumpWindow?pumpID=' + urlJson["pumpID"] + '&pumpName=' + urlJson["pumpName"],
success: function () {
// alert('OK');
}
});
});
|
random_line_split
|
|
WarningAlarm.js
|
V;
loadHistory();
}
});*/
});
function setPumpName(name, id) {
$(".pumpName input").val(name);
$(".pumpName input").attr("data-id", id);
}
function setJzName(name, id) {
$(".machName input").val(name);
$(".machName input").attr("data-id", id);
}
function parseUrl() {
var url = window.location.href;
// alert(url);
var i = url.indexOf('?');
if (i == -1) { return };
var queryStr = url.substr(i + 1);
var arr1 = queryStr.split('&');
var arr2 = {};
for (j in arr1) {
var tar = arr1[j].split('=');
arr2[tar[0]] = tar[1];
};
return arr2;
}
laydate({
elem: "#startTime",
format: "YYYY-MM-DD hh:mm:ss",
istime: true,
istoday: false,
issure: true,
});
laydate({
elem: "#endTime",
format: "YYYY-MM-DD hh:mm:ss",
istime: true,
istoday: false,
issure: true,
});
//ๆถ้ดๆ ผๅผๅ
function formatDate() {
var d = new Date();
var dM = (d.getMonth() + 1).toString().replace(/^(\d)$/, '0$1');
var dD = d.getDate().toString().replace(/^(\d)$/, '0$1');
var dateTemp = d.getFullYear() + "-" + (dM) + "-" + dD;
return dateTemp;
}
function dealPage(total) {
totalPage = Math.ceil(total / pageSize);
}
function dealRealAlarm(realAlarmData) {
$(".ul_alarmList").empty();
var str = '';
for (var i = 0; i < realAlarmData.length; i++) {
var tempStr = '<li data-id="' + realAlarmData[i].BaseID + '" data-fKey="' + realAlarmData[i].FKey + '">\
<div class="line1_box .clearfix">\
<p class="real_pumpName" data-id="PName">' + realAlarmData[i].PName + '</p>\
<p class="real_keepTime" data-id="TimeRange">' + realAlarmData[i].TimeRange + '</p>\
</div>\
<div class="line_box real_jzName" data-id="PumpJZName">' + realAlarmData[i].PumpJZName + '</div>\
<div class="line2_box">\
<p class="real_alarmTime" data-id="TempTime">ใ' + realAlarmData[i].TempTime.replace('T', ' ') + 'ใ</p>\
<p class="real_alarmInfo" data-id="FSetMsg">' + realAlarmData[i].FSetMsg + '</p>\
</div>\
</li>';
str += tempStr;
}
$(".ul_alarmList").append(str);
}
function loadRealAlarm() {
$.ajax({
url: '/V_YCJK/SearchAlarm',
data: {
"pumpID": pumpId,
"pumpJZID": jzId,
"pageIndex": raPageIndex,
"pageSize": 5, //ๆ็ดขๆฏpageSizeๅผๆ ๆ
"StartDate": '2016-01-01',//startDate
"EndDate": '2017-05-09'//endDate
},
dataType: 'JSON',
beforeSend: loadingFunction,
success: function (data) {
console.log(typeof data);
console.log(data);
if (data.obj.data.length > 0) {
dealRealAlarm(data.obj.dat
|
complete: loadingMiss,
error: function (data) {
console.log('้่ฏฏ๏ผ' + data.responseText);
}
});
}
function loadingFunction() {
var $div = $('<div class="loading" style="position:absolute;left: 50%;top:50%;margin-left: -150px;margin-top: -70px;width: 300px;color:black;text-align:center;line-height: 140px;height: 140px;background: rgba(255,255,0);color: white;border-radius: 8px;"><img style="position: relative;top: 56%;left: 15%;" src="/res/YCJK/img/load1.gif" alt="loading....">ๆญฃๅจๅ ่ฝฝไธญ...</div>');
$('body').append($div);
};
function loadingMiss() {
$('.loading').remove();
};
//ๅๆถๆฅ่ญฆๆปๅจๆกๅฝๆฐlistItemScroll
function realAlarmListScroll() {
$('.alarmListScroll_wrap ').mCustomScrollbar({
scrollbarPosition: "inside",
theme: "minimal-dark",
callbacks: {
whileScrolling: function () {
var $that = this.mcs.left;
// console.log($('#mCSB_2_container').height());
// console.log($('.alarmListScroll_wrap #mCSB_2_container').position().top);
var jian = $('#mCSB_2_container').height() + $('.alarmListScroll_wrap #mCSB_2_container').position().top;
// console.log($('#mCSB_1_container').height() + $('.listBox #mCSB_1_container').position().top);
// console.log($('.listBox').height());
if (jian - 10 <= $('.alarmListScroll_wrap').height()) {
console.log('่ทๅ');
if (scrollOnoff) {
// realAlarmScrollGet();
}
}
// console.log('-----------------------------');
}
}
});
}
//leftListDataScrollGet
function realAlarmScrollGet() {
scrollOnoff = false;
raPageIndex++;
alert(raPageIndex);
loadRealAlarm();
// scrollOnoff = true;
}
//ๅๅฒๆฅ่ญฆ
function getHistoryAlarm() {
$(".ul_alarmList").on("click", "li", function () {
$(this).addClass("active").siblings().removeClass("active");
jzId = $(this).attr("data-id");
fKey = $(this).attr("data-fKey");
startDate = '';
endDate = '';
loadHistory();
});
}
function loadHistory() {
//SearchAlarmHistory
$.ajax({
url: '/V_YCJK/SearchAlarmHistory',
data: {
"pumpID": pumpId,
"pumpJZID": jzId,
"pageIndex": pageIndex,
"pageSize": pageSize,
"StartDate": startDate,
"EndDate": endDate,
"FKey": fKey
},
dataType: 'JSON',
beforeSend: loadingFunction,
success: function (data) {
console.log(typeof data);
console.log(data);
console.log('ๅๅฒๆฅ่ญฆ');
console.log(data.obj.data);
historyTableBody(data.obj.data);
dealPage(data.obj.total)
tableClone();
checkPageType();
layout();
// dealWidth();
},
complete: loadingMiss,
error: function (data) {
console.log('้่ฏฏ๏ผ' + data.responseText);
}
});
}
function historyTableBody(historyData) {
$(".table2 tbody").empty();
var trStr = '';
for (var i = 0; i < historyData.length; i++) {
var tempStr = '<tr><td></td>\
<td data-filed="PName" class="PName">' + historyData[i].PName + '</td>\
<td data-field="PumpJZName" class="PumpJZName">' + historyData[i].PumpJZName + '</td>\
<td data-field="FSetMsg">' + historyData[i].FSetMsg + '</td>\
<td data-field="FAlarmTime">' + historyData[i].FAlarmTime.replace('T', ' ') + '</td>\
<td data-field="FEndAlarmTime">' + historyData[i].FEndAlarmTime.replace('T', ' ') + '</td>\
<td data-field="CXTime">' + historyData[i].CXTime + '</td>\
<td data-field="TypeName">' + historyData[i].TypeName + '</td></tr>';
trStr += tempStr;
}
$(".table2 tbody").append(trStr);
}
function dealPage(total) {
totalPage = Math.ceil(total / pageSize);
$("#totalNum").html(total);
$("#totalPage").html(totalPage);
if (total == 0) {
$("#currentPage").html(0);
} else {
$("#currentPage").html(pageIndex + 1);
}
}
//้ๅๆณตๆฟๅๆบ็ป
function selectPumpAndJz() {
$(".pumpName .selectBtn").click(function () {
//alert('้ๅๆณตๆฟ');
var index = layer.open({
type: 2,
anim: 3,
shade: .6,
title: ['ๆณตๆฟๅ่กจ', 'text-align: center;color: #909090'],
shadeClose: true,
area: ['800px', '620px'],
content: '/YCJK/Window/pumpWindow?pumpID=' + urlJson["pumpID"] + '&pumpName=' + urlJson["pumpName"],
success: function () {
// alert('OK');
}
});
|
a);
}
scrollOnoff = true;
},
|
conditional_block
|
host_segfault.rs
|
std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use wasmtime::*;
const VAR_NAME: &str = "__TEST_TO_RUN";
const CONFIRM: &str = "well at least we ran up to the crash";
fn segfault() -> ! {
unsafe {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
*(0x4 as *mut i32) = 3;
unreachable!()
}
}
fn allocate_stack_space() -> ! {
let _a = [0u8; 1024];
for _ in 0..100000 {
allocate_stack_space();
}
unreachable!()
}
fn overrun_the_stack() -> ! {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
allocate_stack_space();
}
fn run_future<F: Future>(future: F) -> F::Output {
let mut f = Pin::from(Box::new(future));
let waker = dummy_waker();
let mut cx = Context::from_waker(&waker);
loop {
match f.as_mut().poll(&mut cx) {
Poll::Ready(val) => break val,
Poll::Pending => {}
}
}
}
fn dummy_waker() -> Waker {
return unsafe { Waker::from_raw(clone(5 as *const _)) };
unsafe fn clone(ptr: *const ()) -> RawWaker {
assert_eq!(ptr as usize, 5);
const VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake_by_ref, drop);
RawWaker::new(ptr, &VTABLE)
}
unsafe fn wake(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn wake_by_ref(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn drop(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
}
fn main() {
if cfg!(miri) {
return;
}
// Skip this tests if it looks like we're in a cross-compiled situation and
// we're emulating this test for a different platform. In that scenario
// emulators (like QEMU) tend to not report signals the same way and such.
if std::env::vars()
.filter(|(k, _v)| k.starts_with("CARGO_TARGET") && k.ends_with("RUNNER"))
.count()
> 0
{
return;
}
let tests: &[(&str, fn(), bool)] = &[
("normal segfault", || segfault(), false),
(
"make instance then segfault",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
segfault();
},
false,
),
(
"make instance then overrun the stack",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
overrun_the_stack();
},
true,
),
(
"segfault in a host function",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, r#"(import "" "" (func)) (start 0)"#).unwrap();
let segfault = Func::wrap(&mut store, || segfault());
Instance::new(&mut store, &module, &[segfault.into()]).unwrap();
unreachable!();
},
false,
),
(
"hit async stack guard page",
|| {
let mut config = Config::default();
config.async_support(true);
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
(
"overrun 8k with misconfigured host",
|| overrun_with_big_module(8 << 10),
true,
),
(
"overrun 32k with misconfigured host",
|| overrun_with_big_module(32 << 10),
true,
),
#[cfg(not(any(target_arch = "riscv64")))]
// Due to `InstanceAllocationStrategy::pooling()` trying to alloc more than 6000G memory space.
// https://gitlab.com/qemu-project/qemu/-/issues/1214
// https://gitlab.com/qemu-project/qemu/-/issues/290
(
"hit async stack guard page with pooling allocator",
|| {
let mut config = Config::default();
config.async_support(true);
config.allocation_strategy(InstanceAllocationStrategy::pooling());
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
];
match env::var(VAR_NAME) {
Ok(s) => {
let test = tests
.iter()
.find(|p| p.0 == s)
.expect("failed to find test")
.1;
test();
}
Err(_) => {
for (name, _test, stack_overflow) in tests {
println!("running {name}");
run_test(name, *stack_overflow);
}
}
}
}
fn run_test(name: &str, stack_overflow: bool) {
let me = env::current_exe().unwrap();
let mut cmd = Command::new(me);
cmd.env(VAR_NAME, name);
let output = cmd.output().expect("failed to spawn subprocess");
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
let mut desc = format!("got status: {}", output.status);
if !stdout.trim().is_empty() {
desc.push_str("\nstdout: ----\n");
desc.push_str(" ");
desc.push_str(&stdout.replace("\n", "\n "));
}
if !stderr.trim().is_empty() {
desc.push_str("\nstderr: ----\n");
desc.push_str(" ");
|
desc.push_str(&stderr.replace("\n", "\n "));
}
if stack_overflow {
if is_stack_overflow(&output.status, &stderr) {
assert!(
stdout.trim().ends_with(CONFIRM),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a stack overflow on `{}`\n{}\n\n", name, desc);
}
} else {
if is_segfault(&output.status) {
assert!(
stdout.trim().ends_with(CONFIRM) && stderr.is_empty(),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a segfault on `{}`\n{}\n\n", name, desc);
}
}
}
#[cfg(unix)]
fn is_segfault(status: &ExitStatus) -> bool {
use std::os::unix::prelude::*;
match status.signal() {
Some(libc::SIGSEGV) => true,
_ => false,
}
}
#[cfg(unix)]
fn is_stack_overflow(status: &ExitStatus, stderr: &str) -> bool {
use std::os::unix::prelude::*;
// The main thread might overflow or it might be from a fiber stack (SIGSEGV/SIGBUS)
stderr.contains("has overflowed its stack")
|| match status.signal() {
Some(libc::SIGSEGV) | Some(libc::SIGBUS) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_segfault(status: &ExitStatus) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc0000005) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_stack_overflow(status: &ExitStatus, _stderr: &str) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc00000fd) => true,
_ => false,
}
}
fn overrun_with_big_module(approx_stack: usize) {
// Each call to `$get` produces ten 8-byte values which need to be saved
// onto the stack, so divide `approx_stack` by 80 to get
// a rough number of calls to consume `approx_stack` stack.
|
random_line_split
|
|
host_segfault.rs
|
::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use wasmtime::*;
const VAR_NAME: &str = "__TEST_TO_RUN";
const CONFIRM: &str = "well at least we ran up to the crash";
fn segfault() -> ! {
unsafe {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
*(0x4 as *mut i32) = 3;
unreachable!()
}
}
fn allocate_stack_space() -> ! {
let _a = [0u8; 1024];
for _ in 0..100000 {
allocate_stack_space();
}
unreachable!()
}
fn overrun_the_stack() -> ! {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
allocate_stack_space();
}
fn run_future<F: Future>(future: F) -> F::Output {
let mut f = Pin::from(Box::new(future));
let waker = dummy_waker();
let mut cx = Context::from_waker(&waker);
loop {
match f.as_mut().poll(&mut cx) {
Poll::Ready(val) => break val,
Poll::Pending => {}
}
}
}
fn dummy_waker() -> Waker {
return unsafe { Waker::from_raw(clone(5 as *const _)) };
unsafe fn clone(ptr: *const ()) -> RawWaker {
assert_eq!(ptr as usize, 5);
const VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake_by_ref, drop);
RawWaker::new(ptr, &VTABLE)
}
unsafe fn wake(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn wake_by_ref(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn drop(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
}
fn main() {
if cfg!(miri) {
return;
}
// Skip this tests if it looks like we're in a cross-compiled situation and
// we're emulating this test for a different platform. In that scenario
// emulators (like QEMU) tend to not report signals the same way and such.
if std::env::vars()
.filter(|(k, _v)| k.starts_with("CARGO_TARGET") && k.ends_with("RUNNER"))
.count()
> 0
{
return;
}
let tests: &[(&str, fn(), bool)] = &[
("normal segfault", || segfault(), false),
(
"make instance then segfault",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
segfault();
},
false,
),
(
"make instance then overrun the stack",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
overrun_the_stack();
},
true,
),
(
"segfault in a host function",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, r#"(import "" "" (func)) (start 0)"#).unwrap();
let segfault = Func::wrap(&mut store, || segfault());
Instance::new(&mut store, &module, &[segfault.into()]).unwrap();
unreachable!();
},
false,
),
(
"hit async stack guard page",
|| {
let mut config = Config::default();
config.async_support(true);
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
(
"overrun 8k with misconfigured host",
|| overrun_with_big_module(8 << 10),
true,
),
(
"overrun 32k with misconfigured host",
|| overrun_with_big_module(32 << 10),
true,
),
#[cfg(not(any(target_arch = "riscv64")))]
// Due to `InstanceAllocationStrategy::pooling()` trying to alloc more than 6000G memory space.
// https://gitlab.com/qemu-project/qemu/-/issues/1214
// https://gitlab.com/qemu-project/qemu/-/issues/290
(
"hit async stack guard page with pooling allocator",
|| {
let mut config = Config::default();
config.async_support(true);
config.allocation_strategy(InstanceAllocationStrategy::pooling());
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
];
match env::var(VAR_NAME) {
Ok(s) => {
let test = tests
.iter()
.find(|p| p.0 == s)
.expect("failed to find test")
.1;
test();
}
Err(_) => {
for (name, _test, stack_overflow) in tests {
println!("running {name}");
run_test(name, *stack_overflow);
}
}
}
}
fn run_test(name: &str, stack_overflow: bool) {
let me = env::current_exe().unwrap();
let mut cmd = Command::new(me);
cmd.env(VAR_NAME, name);
let output = cmd.output().expect("failed to spawn subprocess");
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
let mut desc = format!("got status: {}", output.status);
if !stdout.trim().is_empty() {
desc.push_str("\nstdout: ----\n");
desc.push_str(" ");
desc.push_str(&stdout.replace("\n", "\n "));
}
if !stderr.trim().is_empty() {
desc.push_str("\nstderr: ----\n");
desc.push_str(" ");
desc.push_str(&stderr.replace("\n", "\n "));
}
if stack_overflow {
if is_stack_overflow(&output.status, &stderr) {
assert!(
stdout.trim().ends_with(CONFIRM),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a stack overflow on `{}`\n{}\n\n", name, desc);
}
} else {
if is_segfault(&output.status) {
assert!(
stdout.trim().ends_with(CONFIRM) && stderr.is_empty(),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a segfault on `{}`\n{}\n\n", name, desc);
}
}
}
#[cfg(unix)]
fn is_segfault(status: &ExitStatus) -> bool
|
#[cfg(unix)]
fn is_stack_overflow(status: &ExitStatus, stderr: &str) -> bool {
use std::os::unix::prelude::*;
// The main thread might overflow or it might be from a fiber stack (SIGSEGV/SIGBUS)
stderr.contains("has overflowed its stack")
|| match status.signal() {
Some(libc::SIGSEGV) | Some(libc::SIGBUS) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_segfault(status: &ExitStatus) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc0000005) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_stack_overflow(status: &ExitStatus, _stderr: &str) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc00000fd) => true,
_ => false,
}
}
fn overrun_with_big_module(approx_stack: usize) {
// Each call to `$get` produces ten 8-byte values which need to be saved
// onto the stack, so divide `approx_stack` by 80 to get
// a rough number of calls to consume `approx_stack` stack
|
{
use std::os::unix::prelude::*;
match status.signal() {
Some(libc::SIGSEGV) => true,
_ => false,
}
}
|
identifier_body
|
host_segfault.rs
|
::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use wasmtime::*;
const VAR_NAME: &str = "__TEST_TO_RUN";
const CONFIRM: &str = "well at least we ran up to the crash";
fn segfault() -> ! {
unsafe {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
*(0x4 as *mut i32) = 3;
unreachable!()
}
}
fn allocate_stack_space() -> ! {
let _a = [0u8; 1024];
for _ in 0..100000 {
allocate_stack_space();
}
unreachable!()
}
fn overrun_the_stack() -> ! {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
allocate_stack_space();
}
fn run_future<F: Future>(future: F) -> F::Output {
let mut f = Pin::from(Box::new(future));
let waker = dummy_waker();
let mut cx = Context::from_waker(&waker);
loop {
match f.as_mut().poll(&mut cx) {
Poll::Ready(val) => break val,
Poll::Pending => {}
}
}
}
fn dummy_waker() -> Waker {
return unsafe { Waker::from_raw(clone(5 as *const _)) };
unsafe fn clone(ptr: *const ()) -> RawWaker {
assert_eq!(ptr as usize, 5);
const VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake_by_ref, drop);
RawWaker::new(ptr, &VTABLE)
}
unsafe fn wake(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn wake_by_ref(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn drop(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
}
fn main() {
if cfg!(miri) {
return;
}
// Skip this tests if it looks like we're in a cross-compiled situation and
// we're emulating this test for a different platform. In that scenario
// emulators (like QEMU) tend to not report signals the same way and such.
if std::env::vars()
.filter(|(k, _v)| k.starts_with("CARGO_TARGET") && k.ends_with("RUNNER"))
.count()
> 0
{
return;
}
let tests: &[(&str, fn(), bool)] = &[
("normal segfault", || segfault(), false),
(
"make instance then segfault",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
segfault();
},
false,
),
(
"make instance then overrun the stack",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
overrun_the_stack();
},
true,
),
(
"segfault in a host function",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, r#"(import "" "" (func)) (start 0)"#).unwrap();
let segfault = Func::wrap(&mut store, || segfault());
Instance::new(&mut store, &module, &[segfault.into()]).unwrap();
unreachable!();
},
false,
),
(
"hit async stack guard page",
|| {
let mut config = Config::default();
config.async_support(true);
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
(
"overrun 8k with misconfigured host",
|| overrun_with_big_module(8 << 10),
true,
),
(
"overrun 32k with misconfigured host",
|| overrun_with_big_module(32 << 10),
true,
),
#[cfg(not(any(target_arch = "riscv64")))]
// Due to `InstanceAllocationStrategy::pooling()` trying to alloc more than 6000G memory space.
// https://gitlab.com/qemu-project/qemu/-/issues/1214
// https://gitlab.com/qemu-project/qemu/-/issues/290
(
"hit async stack guard page with pooling allocator",
|| {
let mut config = Config::default();
config.async_support(true);
config.allocation_strategy(InstanceAllocationStrategy::pooling());
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
];
match env::var(VAR_NAME) {
Ok(s) => {
let test = tests
.iter()
.find(|p| p.0 == s)
.expect("failed to find test")
.1;
test();
}
Err(_) => {
for (name, _test, stack_overflow) in tests {
println!("running {name}");
run_test(name, *stack_overflow);
}
}
}
}
fn run_test(name: &str, stack_overflow: bool) {
let me = env::current_exe().unwrap();
let mut cmd = Command::new(me);
cmd.env(VAR_NAME, name);
let output = cmd.output().expect("failed to spawn subprocess");
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
let mut desc = format!("got status: {}", output.status);
if !stdout.trim().is_empty() {
desc.push_str("\nstdout: ----\n");
desc.push_str(" ");
desc.push_str(&stdout.replace("\n", "\n "));
}
if !stderr.trim().is_empty() {
desc.push_str("\nstderr: ----\n");
desc.push_str(" ");
desc.push_str(&stderr.replace("\n", "\n "));
}
if stack_overflow {
if is_stack_overflow(&output.status, &stderr) {
assert!(
stdout.trim().ends_with(CONFIRM),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a stack overflow on `{}`\n{}\n\n", name, desc);
}
} else
|
}
#[cfg(unix)]
fn is_segfault(status: &ExitStatus) -> bool {
use std::os::unix::prelude::*;
match status.signal() {
Some(libc::SIGSEGV) => true,
_ => false,
}
}
#[cfg(unix)]
fn is_stack_overflow(status: &ExitStatus, stderr: &str) -> bool {
use std::os::unix::prelude::*;
// The main thread might overflow or it might be from a fiber stack (SIGSEGV/SIGBUS)
stderr.contains("has overflowed its stack")
|| match status.signal() {
Some(libc::SIGSEGV) | Some(libc::SIGBUS) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_segfault(status: &ExitStatus) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc0000005) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_stack_overflow(status: &ExitStatus, _stderr: &str) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc00000fd) => true,
_ => false,
}
}
fn overrun_with_big_module(approx_stack: usize) {
// Each call to `$get` produces ten 8-byte values which need to be saved
// onto the stack, so divide `approx_stack` by 80 to get
// a rough number of calls to consume `approx_stack` stack
|
{
if is_segfault(&output.status) {
assert!(
stdout.trim().ends_with(CONFIRM) && stderr.is_empty(),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a segfault on `{}`\n{}\n\n", name, desc);
}
}
|
conditional_block
|
host_segfault.rs
|
::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use wasmtime::*;
const VAR_NAME: &str = "__TEST_TO_RUN";
const CONFIRM: &str = "well at least we ran up to the crash";
fn segfault() -> ! {
unsafe {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
*(0x4 as *mut i32) = 3;
unreachable!()
}
}
fn allocate_stack_space() -> ! {
let _a = [0u8; 1024];
for _ in 0..100000 {
allocate_stack_space();
}
unreachable!()
}
fn overrun_the_stack() -> ! {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
allocate_stack_space();
}
fn run_future<F: Future>(future: F) -> F::Output {
let mut f = Pin::from(Box::new(future));
let waker = dummy_waker();
let mut cx = Context::from_waker(&waker);
loop {
match f.as_mut().poll(&mut cx) {
Poll::Ready(val) => break val,
Poll::Pending => {}
}
}
}
fn dummy_waker() -> Waker {
return unsafe { Waker::from_raw(clone(5 as *const _)) };
unsafe fn clone(ptr: *const ()) -> RawWaker {
assert_eq!(ptr as usize, 5);
const VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake_by_ref, drop);
RawWaker::new(ptr, &VTABLE)
}
unsafe fn wake(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn wake_by_ref(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn
|
(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
}
fn main() {
if cfg!(miri) {
return;
}
// Skip this tests if it looks like we're in a cross-compiled situation and
// we're emulating this test for a different platform. In that scenario
// emulators (like QEMU) tend to not report signals the same way and such.
if std::env::vars()
.filter(|(k, _v)| k.starts_with("CARGO_TARGET") && k.ends_with("RUNNER"))
.count()
> 0
{
return;
}
let tests: &[(&str, fn(), bool)] = &[
("normal segfault", || segfault(), false),
(
"make instance then segfault",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
segfault();
},
false,
),
(
"make instance then overrun the stack",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
overrun_the_stack();
},
true,
),
(
"segfault in a host function",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, r#"(import "" "" (func)) (start 0)"#).unwrap();
let segfault = Func::wrap(&mut store, || segfault());
Instance::new(&mut store, &module, &[segfault.into()]).unwrap();
unreachable!();
},
false,
),
(
"hit async stack guard page",
|| {
let mut config = Config::default();
config.async_support(true);
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
(
"overrun 8k with misconfigured host",
|| overrun_with_big_module(8 << 10),
true,
),
(
"overrun 32k with misconfigured host",
|| overrun_with_big_module(32 << 10),
true,
),
#[cfg(not(any(target_arch = "riscv64")))]
// Due to `InstanceAllocationStrategy::pooling()` trying to alloc more than 6000G memory space.
// https://gitlab.com/qemu-project/qemu/-/issues/1214
// https://gitlab.com/qemu-project/qemu/-/issues/290
(
"hit async stack guard page with pooling allocator",
|| {
let mut config = Config::default();
config.async_support(true);
config.allocation_strategy(InstanceAllocationStrategy::pooling());
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
];
match env::var(VAR_NAME) {
Ok(s) => {
let test = tests
.iter()
.find(|p| p.0 == s)
.expect("failed to find test")
.1;
test();
}
Err(_) => {
for (name, _test, stack_overflow) in tests {
println!("running {name}");
run_test(name, *stack_overflow);
}
}
}
}
fn run_test(name: &str, stack_overflow: bool) {
let me = env::current_exe().unwrap();
let mut cmd = Command::new(me);
cmd.env(VAR_NAME, name);
let output = cmd.output().expect("failed to spawn subprocess");
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
let mut desc = format!("got status: {}", output.status);
if !stdout.trim().is_empty() {
desc.push_str("\nstdout: ----\n");
desc.push_str(" ");
desc.push_str(&stdout.replace("\n", "\n "));
}
if !stderr.trim().is_empty() {
desc.push_str("\nstderr: ----\n");
desc.push_str(" ");
desc.push_str(&stderr.replace("\n", "\n "));
}
if stack_overflow {
if is_stack_overflow(&output.status, &stderr) {
assert!(
stdout.trim().ends_with(CONFIRM),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a stack overflow on `{}`\n{}\n\n", name, desc);
}
} else {
if is_segfault(&output.status) {
assert!(
stdout.trim().ends_with(CONFIRM) && stderr.is_empty(),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a segfault on `{}`\n{}\n\n", name, desc);
}
}
}
#[cfg(unix)]
fn is_segfault(status: &ExitStatus) -> bool {
use std::os::unix::prelude::*;
match status.signal() {
Some(libc::SIGSEGV) => true,
_ => false,
}
}
#[cfg(unix)]
fn is_stack_overflow(status: &ExitStatus, stderr: &str) -> bool {
use std::os::unix::prelude::*;
// The main thread might overflow or it might be from a fiber stack (SIGSEGV/SIGBUS)
stderr.contains("has overflowed its stack")
|| match status.signal() {
Some(libc::SIGSEGV) | Some(libc::SIGBUS) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_segfault(status: &ExitStatus) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc0000005) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_stack_overflow(status: &ExitStatus, _stderr: &str) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc00000fd) => true,
_ => false,
}
}
fn overrun_with_big_module(approx_stack: usize) {
// Each call to `$get` produces ten 8-byte values which need to be saved
// onto the stack, so divide `approx_stack` by 80 to get
// a rough number of calls to consume `approx_stack` stack.
|
drop
|
identifier_name
|
builtins.go
|
.Sprintf("The gopherbot install directory is: %s", installPath))
msg = append(msg, fmt.Sprintf("My home directory ($GOPHER_HOME) is: %s", homePath))
if custom, ok := os.LookupEnv("GOPHER_CUSTOM_REPOSITORY"); ok {
msg = append(msg, fmt.Sprintf("My git repository is: %s", custom))
}
}
msg = append(msg, fmt.Sprintf("My software version is: Gopherbot %s, commit: %s", botVersion.Version, botVersion.Commit))
msg = append(msg, fmt.Sprintf("The administrators for this robot are: %s", admins))
adminContact := r.GetBotAttribute("contact")
if len(adminContact.Attribute) > 0 {
msg = append(msg, fmt.Sprintf("The administrative contact for this robot is: %s", adminContact))
}
r.MessageFormat(robot.Variable).SayThread(strings.Join(msg, "\n"))
}
if command == "help" || command == "help-all" {
tasks := r.tasks
var term, helpOutput string
hasKeyword := false
lineSeparator := "\n\n"
if len(args) == 1 && len(args[0]) > 0 {
hasKeyword = true
term = args[0]
Log(robot.Trace, "Help requested for term '%s'", term)
}
// Nothing we need will ever change for a worker.
w := getLockedWorker(r.tid)
w.Unlock()
helpLines := make([]string, 0, 14)
if command == "help" {
if !hasKeyword {
defaultHelpLines := interfaces.DefaultHelp()
if len(defaultHelpLines) == 0 {
defaultHelpLines = defaultHelp()
}
for _, line := range defaultHelpLines {
helpLines = append(helpLines, r.formatHelpLine(line))
}
}
}
want_specific := command == "help" || hasKeyword
for _, t := range tasks.t[1:] {
task, plugin, _ := getTask(t)
if plugin == nil {
continue
}
// If a keyword was supplied, give help for all matching commands with channels;
// without a keyword, show help for all commands available in the channel.
available, specific := w.pluginAvailable(task, hasKeyword, true)
if !available
|
if want_specific && !specific {
continue
}
Log(robot.Trace, "Checking help for plugin %s (term: %s)", task.name, term)
if !hasKeyword { // if you ask for help without a term, you just get help for whatever commands are available to you
for _, phelp := range plugin.Help {
for _, helptext := range phelp.Helptext {
if len(phelp.Keywords) > 0 && phelp.Keywords[0] == "*" {
// * signifies help that should be prepended
prepend := make([]string, 1, len(helpLines)+1)
prepend[0] = r.formatHelpLine(helptext)
helpLines = append(prepend, helpLines...)
} else {
helpLines = append(helpLines, r.formatHelpLine(helptext))
}
}
}
} else { // when there's a search term, give all help for that term, but add (channels: xxx) at the end
for _, phelp := range plugin.Help {
for _, keyword := range phelp.Keywords {
if term == keyword {
chantext := ""
if task.DirectOnly {
// Look: the right paren gets added below
chantext = " (direct message only"
} else {
if len(task.Channels) > tooManyChannels {
chantext += " (channels: (many) "
} else {
for _, pchan := range task.Channels {
if len(chantext) == 0 {
chantext += " (channels: " + pchan
} else {
chantext += ", " + pchan
}
}
}
}
if len(chantext) != 0 {
chantext += ")"
}
for _, helptext := range phelp.Helptext {
helpLines = append(helpLines, r.formatHelpLine(helptext)+chantext)
}
}
}
}
}
}
if len(helpLines) == 0 {
// Unless builtins are disabled or reconfigured, 'ping' is available in all channels
if r.Incoming.ThreadedMessage {
r.Reply("Sorry, I didn't find any commands matching your keyword")
} else {
r.SayThread("Sorry, I didn't find any commands matching your keyword")
}
} else {
if hasKeyword {
helpOutput = "Command(s) matching keyword: " + term + "\n" + strings.Join(helpLines, lineSeparator)
} else {
helpOutput = "Command(s) available in this channel:\n" + strings.Join(helpLines, lineSeparator)
}
if r.Incoming.ThreadedMessage {
r.Reply(helpOutput)
} else {
r.SayThread(helpOutput)
}
}
}
return
}
func dmadmin(m robot.Robot, command string, args ...string) (retval robot.TaskRetVal) {
r := m.(Robot)
if command == "init" {
return // ignore init
}
switch command {
case "dumprobot":
if r.Protocol != robot.Terminal && r.Protocol != robot.Test {
r.Say("This command is only valid with the 'terminal' connector")
return
}
confLock.RLock()
c, _ := yaml.Marshal(config)
confLock.RUnlock()
r.Fixed().Say("Here's how I've been configured, irrespective of interactive changes:\n%s", c)
case "dumpplugdefault":
if plug, ok := pluginHandlers[args[0]]; ok {
r.Fixed().Say("Here's the default configuration for \"%s\":\n%s", args[0], plug.DefaultConfig)
} else { // look for an external plugin
found := false
for _, t := range r.tasks.t[1:] {
task, plugin, _ := getTask(t)
if args[0] == task.name {
if plugin == nil {
r.Say("No default configuration available for task type 'job'")
return
}
if plugin.taskType == taskExternal {
found = true
if cfg, err := getExtDefCfg(plugin.Task); err == nil {
r.Fixed().Say("Here's the default configuration for \"%s\":\n%s", args[0], *cfg)
} else {
r.Say("I had a problem looking that up - somebody should check my logs")
}
}
}
}
if !found {
r.Say("Didn't find a plugin named " + args[0])
}
}
case "dumpplugin":
if r.Protocol != robot.Terminal && r.Protocol != robot.Test {
r.Say("This command is only valid with the 'terminal' connector")
return
}
found := false
for _, t := range r.tasks.t[1:] {
task, plugin, _ := getTask(t)
if args[0] == task.name {
if plugin == nil {
r.Say("Task '%s' is a job, not a plugin", task.name)
return
}
found = true
c, _ := yaml.Marshal(plugin)
r.Fixed().Say("%s", c)
}
}
if !found {
r.Say("Didn't find a plugin named " + args[0])
}
case "listplugins":
joiner := ", "
message := "Here are the plugins I have configured:\n%s"
wantDisabled := false
if len(args[0]) > 0 {
wantDisabled = true
joiner = "\n"
message = "Here's a list of all disabled plugins:\n%s"
}
plist := make([]string, 0, len(r.tasks.t))
for _, t := range r.tasks.t[1:] {
task, plugin, _ := getTask(t)
if plugin == nil {
continue
}
ptext := task.name
if wantDisabled {
if task.Disabled {
ptext += "; reason: " + task.reason
plist = append(plist, ptext)
}
} else {
if task.Disabled {
ptext += " (disabled)"
}
plist = append(plist, ptext)
}
}
if len(plist) > 0 {
r.Say(message, strings.Join(plist, joiner))
} else { // note because of builtin plugins, plist is ALWAYS > 0 if disabled wasn't specified
r.Say("There are no disabled plugins")
}
}
return
}
var byebye = []string{
"Sayonara!",
"Adios",
"Hasta la vista!",
"Later gator!",
}
var rightback =
|
{
continue
}
|
conditional_block
|
builtins.go
|
GOPHER_HOME) is: %s", homePath))
if custom, ok := os.LookupEnv("GOPHER_CUSTOM_REPOSITORY"); ok {
msg = append(msg, fmt.Sprintf("My git repository is: %s", custom))
}
}
msg = append(msg, fmt.Sprintf("My software version is: Gopherbot %s, commit: %s", botVersion.Version, botVersion.Commit))
msg = append(msg, fmt.Sprintf("The administrators for this robot are: %s", admins))
adminContact := r.GetBotAttribute("contact")
if len(adminContact.Attribute) > 0 {
msg = append(msg, fmt.Sprintf("The administrative contact for this robot is: %s", adminContact))
}
r.MessageFormat(robot.Variable).SayThread(strings.Join(msg, "\n"))
}
if command == "help" || command == "help-all" {
tasks := r.tasks
var term, helpOutput string
hasKeyword := false
lineSeparator := "\n\n"
if len(args) == 1 && len(args[0]) > 0 {
hasKeyword = true
term = args[0]
Log(robot.Trace, "Help requested for term '%s'", term)
}
// Nothing we need will ever change for a worker.
w := getLockedWorker(r.tid)
w.Unlock()
helpLines := make([]string, 0, 14)
if command == "help" {
if !hasKeyword {
defaultHelpLines := interfaces.DefaultHelp()
if len(defaultHelpLines) == 0 {
defaultHelpLines = defaultHelp()
}
for _, line := range defaultHelpLines {
helpLines = append(helpLines, r.formatHelpLine(line))
}
}
}
want_specific := command == "help" || hasKeyword
for _, t := range tasks.t[1:] {
task, plugin, _ := getTask(t)
if plugin == nil {
continue
}
// If a keyword was supplied, give help for all matching commands with channels;
// without a keyword, show help for all commands available in the channel.
available, specific := w.pluginAvailable(task, hasKeyword, true)
if !available {
continue
}
if want_specific && !specific {
continue
}
Log(robot.Trace, "Checking help for plugin %s (term: %s)", task.name, term)
if !hasKeyword { // if you ask for help without a term, you just get help for whatever commands are available to you
for _, phelp := range plugin.Help {
for _, helptext := range phelp.Helptext {
if len(phelp.Keywords) > 0 && phelp.Keywords[0] == "*" {
// * signifies help that should be prepended
prepend := make([]string, 1, len(helpLines)+1)
prepend[0] = r.formatHelpLine(helptext)
helpLines = append(prepend, helpLines...)
} else {
helpLines = append(helpLines, r.formatHelpLine(helptext))
}
}
}
} else { // when there's a search term, give all help for that term, but add (channels: xxx) at the end
for _, phelp := range plugin.Help {
for _, keyword := range phelp.Keywords {
if term == keyword {
chantext := ""
if task.DirectOnly {
// Look: the right paren gets added below
chantext = " (direct message only"
} else {
if len(task.Channels) > tooManyChannels {
chantext += " (channels: (many) "
} else {
for _, pchan := range task.Channels {
if len(chantext) == 0 {
chantext += " (channels: " + pchan
} else {
chantext += ", " + pchan
}
}
}
}
if len(chantext) != 0 {
chantext += ")"
}
for _, helptext := range phelp.Helptext {
helpLines = append(helpLines, r.formatHelpLine(helptext)+chantext)
}
}
}
}
}
}
if len(helpLines) == 0 {
// Unless builtins are disabled or reconfigured, 'ping' is available in all channels
if r.Incoming.ThreadedMessage {
r.Reply("Sorry, I didn't find any commands matching your keyword")
} else {
r.SayThread("Sorry, I didn't find any commands matching your keyword")
}
} else {
if hasKeyword {
helpOutput = "Command(s) matching keyword: " + term + "\n" + strings.Join(helpLines, lineSeparator)
} else {
helpOutput = "Command(s) available in this channel:\n" + strings.Join(helpLines, lineSeparator)
}
if r.Incoming.ThreadedMessage {
r.Reply(helpOutput)
} else {
r.SayThread(helpOutput)
}
}
}
return
}
func dmadmin(m robot.Robot, command string, args ...string) (retval robot.TaskRetVal) {
r := m.(Robot)
if command == "init" {
return // ignore init
}
switch command {
case "dumprobot":
if r.Protocol != robot.Terminal && r.Protocol != robot.Test {
r.Say("This command is only valid with the 'terminal' connector")
return
}
confLock.RLock()
c, _ := yaml.Marshal(config)
confLock.RUnlock()
r.Fixed().Say("Here's how I've been configured, irrespective of interactive changes:\n%s", c)
case "dumpplugdefault":
if plug, ok := pluginHandlers[args[0]]; ok {
r.Fixed().Say("Here's the default configuration for \"%s\":\n%s", args[0], plug.DefaultConfig)
} else { // look for an external plugin
found := false
for _, t := range r.tasks.t[1:] {
task, plugin, _ := getTask(t)
if args[0] == task.name {
if plugin == nil {
r.Say("No default configuration available for task type 'job'")
return
}
if plugin.taskType == taskExternal {
found = true
if cfg, err := getExtDefCfg(plugin.Task); err == nil {
r.Fixed().Say("Here's the default configuration for \"%s\":\n%s", args[0], *cfg)
} else {
r.Say("I had a problem looking that up - somebody should check my logs")
}
}
}
}
if !found {
r.Say("Didn't find a plugin named " + args[0])
}
}
case "dumpplugin":
if r.Protocol != robot.Terminal && r.Protocol != robot.Test {
r.Say("This command is only valid with the 'terminal' connector")
return
}
found := false
for _, t := range r.tasks.t[1:] {
task, plugin, _ := getTask(t)
if args[0] == task.name {
if plugin == nil {
r.Say("Task '%s' is a job, not a plugin", task.name)
return
}
found = true
c, _ := yaml.Marshal(plugin)
r.Fixed().Say("%s", c)
}
}
if !found {
r.Say("Didn't find a plugin named " + args[0])
}
case "listplugins":
joiner := ", "
message := "Here are the plugins I have configured:\n%s"
wantDisabled := false
if len(args[0]) > 0 {
wantDisabled = true
joiner = "\n"
message = "Here's a list of all disabled plugins:\n%s"
}
plist := make([]string, 0, len(r.tasks.t))
for _, t := range r.tasks.t[1:] {
task, plugin, _ := getTask(t)
if plugin == nil {
continue
}
ptext := task.name
if wantDisabled {
if task.Disabled {
ptext += "; reason: " + task.reason
plist = append(plist, ptext)
}
} else {
if task.Disabled {
ptext += " (disabled)"
}
plist = append(plist, ptext)
}
}
if len(plist) > 0 {
r.Say(message, strings.Join(plist, joiner))
} else { // note because of builtin plugins, plist is ALWAYS > 0 if disabled wasn't specified
r.Say("There are no disabled plugins")
}
}
return
}
var byebye = []string{
"Sayonara!",
"Adios",
"Hasta la vista!",
"Later gator!",
}
var rightback = []string{
"Back in a flash!",
"Be right back!",
"You won't even have time to miss me...",
}
func
|
logging
|
identifier_name
|
|
builtins.go
|
.Sprintf("The gopherbot install directory is: %s", installPath))
msg = append(msg, fmt.Sprintf("My home directory ($GOPHER_HOME) is: %s", homePath))
if custom, ok := os.LookupEnv("GOPHER_CUSTOM_REPOSITORY"); ok {
msg = append(msg, fmt.Sprintf("My git repository is: %s", custom))
}
}
msg = append(msg, fmt.Sprintf("My software version is: Gopherbot %s, commit: %s", botVersion.Version, botVersion.Commit))
msg = append(msg, fmt.Sprintf("The administrators for this robot are: %s", admins))
adminContact := r.GetBotAttribute("contact")
if len(adminContact.Attribute) > 0 {
msg = append(msg, fmt.Sprintf("The administrative contact for this robot is: %s", adminContact))
}
r.MessageFormat(robot.Variable).SayThread(strings.Join(msg, "\n"))
}
if command == "help" || command == "help-all" {
tasks := r.tasks
var term, helpOutput string
hasKeyword := false
lineSeparator := "\n\n"
if len(args) == 1 && len(args[0]) > 0 {
hasKeyword = true
term = args[0]
Log(robot.Trace, "Help requested for term '%s'", term)
}
// Nothing we need will ever change for a worker.
w := getLockedWorker(r.tid)
w.Unlock()
helpLines := make([]string, 0, 14)
if command == "help" {
if !hasKeyword {
defaultHelpLines := interfaces.DefaultHelp()
if len(defaultHelpLines) == 0 {
defaultHelpLines = defaultHelp()
}
for _, line := range defaultHelpLines {
helpLines = append(helpLines, r.formatHelpLine(line))
}
}
}
want_specific := command == "help" || hasKeyword
for _, t := range tasks.t[1:] {
task, plugin, _ := getTask(t)
if plugin == nil {
continue
}
// If a keyword was supplied, give help for all matching commands with channels;
// without a keyword, show help for all commands available in the channel.
available, specific := w.pluginAvailable(task, hasKeyword, true)
if !available {
continue
}
if want_specific && !specific {
continue
}
Log(robot.Trace, "Checking help for plugin %s (term: %s)", task.name, term)
if !hasKeyword { // if you ask for help without a term, you just get help for whatever commands are available to you
for _, phelp := range plugin.Help {
for _, helptext := range phelp.Helptext {
if len(phelp.Keywords) > 0 && phelp.Keywords[0] == "*" {
// * signifies help that should be prepended
prepend := make([]string, 1, len(helpLines)+1)
prepend[0] = r.formatHelpLine(helptext)
helpLines = append(prepend, helpLines...)
} else {
helpLines = append(helpLines, r.formatHelpLine(helptext))
}
}
}
} else { // when there's a search term, give all help for that term, but add (channels: xxx) at the end
for _, phelp := range plugin.Help {
for _, keyword := range phelp.Keywords {
if term == keyword {
chantext := ""
if task.DirectOnly {
// Look: the right paren gets added below
chantext = " (direct message only"
} else {
if len(task.Channels) > tooManyChannels {
chantext += " (channels: (many) "
} else {
for _, pchan := range task.Channels {
if len(chantext) == 0 {
chantext += " (channels: " + pchan
} else {
chantext += ", " + pchan
}
}
}
}
if len(chantext) != 0 {
chantext += ")"
}
for _, helptext := range phelp.Helptext {
helpLines = append(helpLines, r.formatHelpLine(helptext)+chantext)
}
}
}
}
}
}
if len(helpLines) == 0 {
// Unless builtins are disabled or reconfigured, 'ping' is available in all channels
if r.Incoming.ThreadedMessage {
r.Reply("Sorry, I didn't find any commands matching your keyword")
} else {
r.SayThread("Sorry, I didn't find any commands matching your keyword")
}
} else {
if hasKeyword {
helpOutput = "Command(s) matching keyword: " + term + "\n" + strings.Join(helpLines, lineSeparator)
} else {
helpOutput = "Command(s) available in this channel:\n" + strings.Join(helpLines, lineSeparator)
}
if r.Incoming.ThreadedMessage {
r.Reply(helpOutput)
} else {
r.SayThread(helpOutput)
}
}
}
return
}
func dmadmin(m robot.Robot, command string, args ...string) (retval robot.TaskRetVal) {
r := m.(Robot)
if command == "init" {
return // ignore init
}
switch command {
case "dumprobot":
if r.Protocol != robot.Terminal && r.Protocol != robot.Test {
r.Say("This command is only valid with the 'terminal' connector")
return
}
confLock.RLock()
c, _ := yaml.Marshal(config)
confLock.RUnlock()
r.Fixed().Say("Here's how I've been configured, irrespective of interactive changes:\n%s", c)
case "dumpplugdefault":
if plug, ok := pluginHandlers[args[0]]; ok {
r.Fixed().Say("Here's the default configuration for \"%s\":\n%s", args[0], plug.DefaultConfig)
} else { // look for an external plugin
found := false
for _, t := range r.tasks.t[1:] {
task, plugin, _ := getTask(t)
if args[0] == task.name {
if plugin == nil {
r.Say("No default configuration available for task type 'job'")
return
}
if plugin.taskType == taskExternal {
found = true
if cfg, err := getExtDefCfg(plugin.Task); err == nil {
r.Fixed().Say("Here's the default configuration for \"%s\":\n%s", args[0], *cfg)
} else {
r.Say("I had a problem looking that up - somebody should check my logs")
}
}
}
}
if !found {
r.Say("Didn't find a plugin named " + args[0])
}
}
case "dumpplugin":
if r.Protocol != robot.Terminal && r.Protocol != robot.Test {
r.Say("This command is only valid with the 'terminal' connector")
return
}
|
if args[0] == task.name {
if plugin == nil {
r.Say("Task '%s' is a job, not a plugin", task.name)
return
}
found = true
c, _ := yaml.Marshal(plugin)
r.Fixed().Say("%s", c)
}
}
if !found {
r.Say("Didn't find a plugin named " + args[0])
}
case "listplugins":
joiner := ", "
message := "Here are the plugins I have configured:\n%s"
wantDisabled := false
if len(args[0]) > 0 {
wantDisabled = true
joiner = "\n"
message = "Here's a list of all disabled plugins:\n%s"
}
plist := make([]string, 0, len(r.tasks.t))
for _, t := range r.tasks.t[1:] {
task, plugin, _ := getTask(t)
if plugin == nil {
continue
}
ptext := task.name
if wantDisabled {
if task.Disabled {
ptext += "; reason: " + task.reason
plist = append(plist, ptext)
}
} else {
if task.Disabled {
ptext += " (disabled)"
}
plist = append(plist, ptext)
}
}
if len(plist) > 0 {
r.Say(message, strings.Join(plist, joiner))
} else { // note because of builtin plugins, plist is ALWAYS > 0 if disabled wasn't specified
r.Say("There are no disabled plugins")
}
}
return
}
var byebye = []string{
"Sayonara!",
"Adios",
"Hasta la vista!",
"Later gator!",
}
var rightback = []string
|
found := false
for _, t := range r.tasks.t[1:] {
task, plugin, _ := getTask(t)
|
random_line_split
|
builtins.go
|
if command == "help" || command == "help-all" {
tasks := r.tasks
var term, helpOutput string
hasKeyword := false
lineSeparator := "\n\n"
if len(args) == 1 && len(args[0]) > 0 {
hasKeyword = true
term = args[0]
Log(robot.Trace, "Help requested for term '%s'", term)
}
// Nothing we need will ever change for a worker.
w := getLockedWorker(r.tid)
w.Unlock()
helpLines := make([]string, 0, 14)
if command == "help" {
if !hasKeyword {
defaultHelpLines := interfaces.DefaultHelp()
if len(defaultHelpLines) == 0 {
defaultHelpLines = defaultHelp()
}
for _, line := range defaultHelpLines {
helpLines = append(helpLines, r.formatHelpLine(line))
}
}
}
want_specific := command == "help" || hasKeyword
for _, t := range tasks.t[1:] {
task, plugin, _ := getTask(t)
if plugin == nil {
continue
}
// If a keyword was supplied, give help for all matching commands with channels;
// without a keyword, show help for all commands available in the channel.
available, specific := w.pluginAvailable(task, hasKeyword, true)
if !available {
continue
}
if want_specific && !specific {
continue
}
Log(robot.Trace, "Checking help for plugin %s (term: %s)", task.name, term)
if !hasKeyword { // if you ask for help without a term, you just get help for whatever commands are available to you
for _, phelp := range plugin.Help {
for _, helptext := range phelp.Helptext {
if len(phelp.Keywords) > 0 && phelp.Keywords[0] == "*" {
// * signifies help that should be prepended
prepend := make([]string, 1, len(helpLines)+1)
prepend[0] = r.formatHelpLine(helptext)
helpLines = append(prepend, helpLines...)
} else {
helpLines = append(helpLines, r.formatHelpLine(helptext))
}
}
}
} else { // when there's a search term, give all help for that term, but add (channels: xxx) at the end
for _, phelp := range plugin.Help {
for _, keyword := range phelp.Keywords {
if term == keyword {
chantext := ""
if task.DirectOnly {
// Look: the right paren gets added below
chantext = " (direct message only"
} else {
if len(task.Channels) > tooManyChannels {
chantext += " (channels: (many) "
} else {
for _, pchan := range task.Channels {
if len(chantext) == 0 {
chantext += " (channels: " + pchan
} else {
chantext += ", " + pchan
}
}
}
}
if len(chantext) != 0 {
chantext += ")"
}
for _, helptext := range phelp.Helptext {
helpLines = append(helpLines, r.formatHelpLine(helptext)+chantext)
}
}
}
}
}
}
if len(helpLines) == 0 {
// Unless builtins are disabled or reconfigured, 'ping' is available in all channels
if r.Incoming.ThreadedMessage {
r.Reply("Sorry, I didn't find any commands matching your keyword")
} else {
r.SayThread("Sorry, I didn't find any commands matching your keyword")
}
} else {
if hasKeyword {
helpOutput = "Command(s) matching keyword: " + term + "\n" + strings.Join(helpLines, lineSeparator)
} else {
helpOutput = "Command(s) available in this channel:\n" + strings.Join(helpLines, lineSeparator)
}
if r.Incoming.ThreadedMessage {
r.Reply(helpOutput)
} else {
r.SayThread(helpOutput)
}
}
}
return
}
func dmadmin(m robot.Robot, command string, args ...string) (retval robot.TaskRetVal) {
r := m.(Robot)
if command == "init" {
return // ignore init
}
switch command {
case "dumprobot":
if r.Protocol != robot.Terminal && r.Protocol != robot.Test {
r.Say("This command is only valid with the 'terminal' connector")
return
}
confLock.RLock()
c, _ := yaml.Marshal(config)
confLock.RUnlock()
r.Fixed().Say("Here's how I've been configured, irrespective of interactive changes:\n%s", c)
case "dumpplugdefault":
if plug, ok := pluginHandlers[args[0]]; ok {
r.Fixed().Say("Here's the default configuration for \"%s\":\n%s", args[0], plug.DefaultConfig)
} else { // look for an external plugin
found := false
for _, t := range r.tasks.t[1:] {
task, plugin, _ := getTask(t)
if args[0] == task.name {
if plugin == nil {
r.Say("No default configuration available for task type 'job'")
return
}
if plugin.taskType == taskExternal {
found = true
if cfg, err := getExtDefCfg(plugin.Task); err == nil {
r.Fixed().Say("Here's the default configuration for \"%s\":\n%s", args[0], *cfg)
} else {
r.Say("I had a problem looking that up - somebody should check my logs")
}
}
}
}
if !found {
r.Say("Didn't find a plugin named " + args[0])
}
}
case "dumpplugin":
if r.Protocol != robot.Terminal && r.Protocol != robot.Test {
r.Say("This command is only valid with the 'terminal' connector")
return
}
found := false
for _, t := range r.tasks.t[1:] {
task, plugin, _ := getTask(t)
if args[0] == task.name {
if plugin == nil {
r.Say("Task '%s' is a job, not a plugin", task.name)
return
}
found = true
c, _ := yaml.Marshal(plugin)
r.Fixed().Say("%s", c)
}
}
if !found {
r.Say("Didn't find a plugin named " + args[0])
}
case "listplugins":
joiner := ", "
message := "Here are the plugins I have configured:\n%s"
wantDisabled := false
if len(args[0]) > 0 {
wantDisabled = true
joiner = "\n"
message = "Here's a list of all disabled plugins:\n%s"
}
plist := make([]string, 0, len(r.tasks.t))
for _, t := range r.tasks.t[1:] {
task, plugin, _ := getTask(t)
if plugin == nil {
continue
}
ptext := task.name
if wantDisabled {
if task.Disabled {
ptext += "; reason: " + task.reason
plist = append(plist, ptext)
}
} else {
if task.Disabled {
ptext += " (disabled)"
}
plist = append(plist, ptext)
}
}
if len(plist) > 0 {
r.Say(message, strings.Join(plist, joiner))
} else { // note because of builtin plugins, plist is ALWAYS > 0 if disabled wasn't specified
r.Say("There are no disabled plugins")
}
}
return
}
var byebye = []string{
"Sayonara!",
"Adios",
"Hasta la vista!",
"Later gator!",
}
var rightback = []string{
"Back in a flash!",
"Be right back!",
"You won't even have time to miss me...",
}
func logging(m robot.Robot, command string, args ...string) (retval robot.TaskRetVal)
|
{
r := m.(Robot)
switch command {
case "init":
return
case "level":
setLogLevel(logStrToLevel(args[0]))
r.Say("I've adjusted the log level to %s", args[0])
Log(robot.Info, "User %s changed logging level to %s", r.User, args[0])
case "show":
page := 0
if len(args) == 1 {
page, _ = strconv.Atoi(args[0])
}
lines, wrap := logPage(page)
if wrap {
r.Say("(warning: value too large for pages, wrapped past beginning of log)")
}
r.Fixed().Say(strings.Join(lines, ""))
case "showlevel":
|
identifier_body
|
|
server.go
|
err)
}
return c
}
// Server is a wrapper for docker's client.ContainerAPIClient which operates on a specific container.
type Server struct {
client.ContainerAPIClient
ContainerName, ContainerID string
}
// New creates a new craft server container and returns a docker client for it.
// It is the equivalent of the following docker command:
//
// docker run -d -e EULA=TRUE -p <HOST_PORT>:19132/udp <imageName>
//
// If mountVolume is true, a local volume will also be mounted and autoremove will be disabled.
func New(hostPort int, name string, mountVolume bool) (*Server, error) {
if hostPort == 0 {
hostPort = nextAvailablePort()
}
c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
logger.Error.Fatalf("Error: Failed to create new docker client: %s", err)
}
ctx := context.Background()
hostBinding := nat.PortBinding{
HostIP: anyIP,
HostPort: strconv.Itoa(hostPort),
}
// -p <HOST_PORT>:19132/udp
containerPort, err := nat.NewPort(protocol, strconv.Itoa(defaultPort))
if err != nil {
return nil, fmt.Errorf("creating container port: %s", err)
}
portBinding := nat.PortMap{containerPort: []nat.PortBinding{hostBinding}}
var mounts []mount.Mount
if mountVolume {
volName := fmt.Sprintf("%s-%s", volumeLabel, name)
vol, err := c.VolumeCreate(ctx, volume.VolumeCreateBody{
Name: volName,
})
if err != nil {
return nil, fmt.Errorf("creating vol '%s': %w", volName, err)
}
mounts = []mount.Mount{{
Type: mount.TypeVolume,
Source: vol.Name,
Target: files.Directory,
}}
}
// docker run -d -e EULA=TRUE
createResp, err := c.ContainerCreate(
ctx,
&container.Config{
Image: ImageName,
Env: []string{"EULA=TRUE"},
ExposedPorts: nat.PortSet{containerPort: struct{}{}},
AttachStdin: true, AttachStdout: true, AttachStderr: true,
Tty: true,
OpenStdin: true,
Labels: map[string]string{CraftLabel: ""},
},
&container.HostConfig{
PortBindings: portBinding,
AutoRemove: !mountVolume,
Mounts: mounts,
},
nil, nil, name,
)
if err != nil {
return nil, fmt.Errorf("creating docker container: %s", err)
}
err = c.ContainerStart(ctx, createResp.ID, docker.ContainerStartOptions{})
if err != nil {
return nil, fmt.Errorf("starting container: %s", err)
}
s := Server{
ContainerAPIClient: c,
ContainerName: name,
ContainerID: createResp.ID,
}
return &s, nil
}
// Get searches for a server with the given name (stopped or running) and checks that it has a label identifying it as
// a craft server. If no craft server with that name exists, an error of type NotFoundError. If the server is found but
// is not a craft server, an error of type NotCraftError is returned.
func Get(cl client.ContainerAPIClient, containerName string) (*Server, error) {
id, err := containerID(containerName, cl)
if err != nil {
return nil, err
}
c := Server{
ContainerAPIClient: cl,
ContainerName: containerName,
ContainerID: id,
}
containerJSON, err := cl.ContainerInspect(context.Background(), c.ContainerID)
if err != nil {
return nil, fmt.Errorf("inspecting container: %s", err)
}
_, ok := containerJSON.Config.Labels[CraftLabel]
if !ok {
return nil, &NotCraftError{Name: containerName}
}
return &c, nil
}
// Stop executes a stop command first in the server process cli then on the container itself, stopping the
// server. The server must be saved separately to persist the world and settings.
func (s *Server) Stop() error {
if err := s.Command([]string{"stop"}); err != nil {
return fmt.Errorf("%s: running 'stop' command in server cli to stop server process: %s", s.ContainerName, err)
}
logger.Info.Printf("stopping %s\n", s.ContainerName)
timeout := time.Duration(stopTimeout)
err := s.ContainerStop(
context.Background(),
s.ContainerID,
&timeout,
)
if err != nil {
return fmt.Errorf("%s: stopping docker container: %s", s.ContainerName, err)
}
return nil
}
func (s *Server) IsRunning() bool {
inspect, err := s.ContainerInspect(context.Background(), s.ContainerID)
if err != nil {
logger.Error.Panic(err)
}
return inspect.State.Running
}
func (s *Server) HasVolume() bool {
inspect, err := s.ContainerInspect(context.Background(), s.ContainerID)
if err != nil {
logger.Error.Panic(err)
}
return len(inspect.Mounts) > 0
}
// RunBedrock runs the bedrock server process and waits for confirmation from the server that the process has started.
// The server should be join-able when this function returns.
func (s *Server) RunBedrock() error {
// New the bedrock_server process
if err := s.Command(strings.Split(RunMCCommand, " ")); err != nil {
s.StopOrPanic()
return err
}
logs, err := s.LogReader(1)
if err != nil {
s.StopOrPanic()
return err
}
scanner := bufio.NewScanner(logs)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
if scanner.Text() == "[INFO] Server started." {
// Server has finished starting
return nil
}
}
return fmt.Errorf("reached end of log reader without finding the 'Server started' message")
}
// StopOrPanic stops the server's container. The server process may not be stopped gracefully, call Server.Stop() to
// safely stop the server. If an error occurs while attempting to stop the server the program exits with a panic.
func (s *Server) StopOrPanic() {
logger.Info.Printf("stopping %s\n", s.ContainerName)
timeout := time.Duration(stopTimeout)
err := s.ContainerStop(
context.Background(),
s.ContainerID,
&timeout,
)
if err != nil {
logger.Error.Panicf("while stopping %s another error occurred: %s\n", s.ContainerName, err)
}
}
// Command attaches to the container and runs the given arguments separated by spaces.
func (s *Server) Command(args []string) error {
conn, err := s.CommandWriter()
if err != nil {
return err
}
commandString := strings.Join(args, " ") + "\n"
_, err = conn.Write([]byte(commandString))
if err != nil {
return err
}
return nil
}
// CommandWriter returns a *net.Conn which streams to the container process stdin.
func (s *Server)
|
() (net.Conn, error) {
waiter, err := s.ContainerAttach(
context.Background(),
s.ContainerID,
docker.ContainerAttachOptions{
Stdin: true,
Stream: true,
},
)
if err != nil {
return nil, err
}
return waiter.Conn, err
}
// LogReader returns a buffer with the stdout and stderr from the running mc server process. New output will continually
// be sent to the buffer. A negative tail value will result in the 'all' value being used.
func (s *Server) LogReader(tail int) (*bufio.Reader, error) {
logs, err := s.ContainerLogs(
context.Background(),
s.ContainerID,
docker.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Tail: strconv.Itoa(tail),
Follow: true,
},
)
if err != nil {
return nil, fmt.Errorf("getting docker container logs: %s", err)
}
return bufio.NewReader(logs), nil
}
// Port returns the port players use to connect to this server.
func (s *Server) Port() (int, error) {
cj, err := s.ContainerInspect(context.Background(), s.ContainerID)
if err != nil {
return 0, err
}
portBindings := cj.HostConfig.PortBindings
if len(portBindings) == 0 {
return 0, fmt.Errorf("no ports bound for container %s", s.ContainerName)
}
var port int
for _, v := range portBindings {
p, err := strconv.Atoi(v[0].HostPort)
if err != nil {
return 0, fmt.Errorf("error reading container port: %s", err)
}
port = p
}
if port == 0 {
panic("port is 0")
}
return port, nil
}
// All returns a client for each active server.
func All(c client.ContainerAPIClient
|
CommandWriter
|
identifier_name
|
server.go
|
err)
}
return c
}
// Server is a wrapper for docker's client.ContainerAPIClient which operates on a specific container.
type Server struct {
client.ContainerAPIClient
ContainerName, ContainerID string
}
// New creates a new craft server container and returns a docker client for it.
// It is the equivalent of the following docker command:
//
// docker run -d -e EULA=TRUE -p <HOST_PORT>:19132/udp <imageName>
//
// If mountVolume is true, a local volume will also be mounted and autoremove will be disabled.
func New(hostPort int, name string, mountVolume bool) (*Server, error) {
if hostPort == 0 {
hostPort = nextAvailablePort()
}
c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
logger.Error.Fatalf("Error: Failed to create new docker client: %s", err)
}
ctx := context.Background()
hostBinding := nat.PortBinding{
HostIP: anyIP,
HostPort: strconv.Itoa(hostPort),
}
// -p <HOST_PORT>:19132/udp
containerPort, err := nat.NewPort(protocol, strconv.Itoa(defaultPort))
if err != nil {
return nil, fmt.Errorf("creating container port: %s", err)
}
portBinding := nat.PortMap{containerPort: []nat.PortBinding{hostBinding}}
var mounts []mount.Mount
if mountVolume {
volName := fmt.Sprintf("%s-%s", volumeLabel, name)
vol, err := c.VolumeCreate(ctx, volume.VolumeCreateBody{
Name: volName,
})
if err != nil {
return nil, fmt.Errorf("creating vol '%s': %w", volName, err)
}
mounts = []mount.Mount{{
Type: mount.TypeVolume,
Source: vol.Name,
Target: files.Directory,
}}
}
// docker run -d -e EULA=TRUE
createResp, err := c.ContainerCreate(
ctx,
&container.Config{
Image: ImageName,
Env: []string{"EULA=TRUE"},
ExposedPorts: nat.PortSet{containerPort: struct{}{}},
AttachStdin: true, AttachStdout: true, AttachStderr: true,
Tty: true,
OpenStdin: true,
Labels: map[string]string{CraftLabel: ""},
},
&container.HostConfig{
PortBindings: portBinding,
AutoRemove: !mountVolume,
Mounts: mounts,
},
nil, nil, name,
)
if err != nil {
return nil, fmt.Errorf("creating docker container: %s", err)
}
err = c.ContainerStart(ctx, createResp.ID, docker.ContainerStartOptions{})
if err != nil {
return nil, fmt.Errorf("starting container: %s", err)
}
s := Server{
ContainerAPIClient: c,
ContainerName: name,
ContainerID: createResp.ID,
}
return &s, nil
}
// Get searches for a server with the given name (stopped or running) and checks that it has a label identifying it as
// a craft server. If no craft server with that name exists, an error of type NotFoundError. If the server is found but
// is not a craft server, an error of type NotCraftError is returned.
func Get(cl client.ContainerAPIClient, containerName string) (*Server, error) {
id, err := containerID(containerName, cl)
if err != nil {
return nil, err
}
c := Server{
ContainerAPIClient: cl,
ContainerName: containerName,
ContainerID: id,
}
containerJSON, err := cl.ContainerInspect(context.Background(), c.ContainerID)
if err != nil {
return nil, fmt.Errorf("inspecting container: %s", err)
}
_, ok := containerJSON.Config.Labels[CraftLabel]
if !ok {
return nil, &NotCraftError{Name: containerName}
}
return &c, nil
}
// Stop executes a stop command first in the server process cli then on the container itself, stopping the
// server. The server must be saved separately to persist the world and settings.
func (s *Server) Stop() error {
if err := s.Command([]string{"stop"}); err != nil {
return fmt.Errorf("%s: running 'stop' command in server cli to stop server process: %s", s.ContainerName, err)
}
logger.Info.Printf("stopping %s\n", s.ContainerName)
timeout := time.Duration(stopTimeout)
err := s.ContainerStop(
context.Background(),
s.ContainerID,
&timeout,
)
if err != nil {
return fmt.Errorf("%s: stopping docker container: %s", s.ContainerName, err)
}
return nil
}
func (s *Server) IsRunning() bool {
inspect, err := s.ContainerInspect(context.Background(), s.ContainerID)
if err != nil {
logger.Error.Panic(err)
}
return inspect.State.Running
}
func (s *Server) HasVolume() bool {
inspect, err := s.ContainerInspect(context.Background(), s.ContainerID)
if err != nil {
logger.Error.Panic(err)
}
return len(inspect.Mounts) > 0
}
// RunBedrock runs the bedrock server process and waits for confirmation from the server that the process has started.
// The server should be join-able when this function returns.
func (s *Server) RunBedrock() error {
// New the bedrock_server process
if err := s.Command(strings.Split(RunMCCommand, " ")); err != nil {
s.StopOrPanic()
return err
}
logs, err := s.LogReader(1)
if err != nil {
s.StopOrPanic()
return err
}
scanner := bufio.NewScanner(logs)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
if scanner.Text() == "[INFO] Server started." {
// Server has finished starting
return nil
}
}
return fmt.Errorf("reached end of log reader without finding the 'Server started' message")
}
// StopOrPanic stops the server's container. The server process may not be stopped gracefully, call Server.Stop() to
// safely stop the server. If an error occurs while attempting to stop the server the program exits with a panic.
func (s *Server) StopOrPanic() {
logger.Info.Printf("stopping %s\n", s.ContainerName)
timeout := time.Duration(stopTimeout)
err := s.ContainerStop(
context.Background(),
s.ContainerID,
&timeout,
)
|
logger.Error.Panicf("while stopping %s another error occurred: %s\n", s.ContainerName, err)
}
}
// Command attaches to the container and runs the given arguments separated by spaces.
func (s *Server) Command(args []string) error {
conn, err := s.CommandWriter()
if err != nil {
return err
}
commandString := strings.Join(args, " ") + "\n"
_, err = conn.Write([]byte(commandString))
if err != nil {
return err
}
return nil
}
// CommandWriter returns a *net.Conn which streams to the container process stdin.
func (s *Server) CommandWriter() (net.Conn, error) {
waiter, err := s.ContainerAttach(
context.Background(),
s.ContainerID,
docker.ContainerAttachOptions{
Stdin: true,
Stream: true,
},
)
if err != nil {
return nil, err
}
return waiter.Conn, err
}
// LogReader returns a buffer with the stdout and stderr from the running mc server process. New output will continually
// be sent to the buffer. A negative tail value will result in the 'all' value being used.
func (s *Server) LogReader(tail int) (*bufio.Reader, error) {
logs, err := s.ContainerLogs(
context.Background(),
s.ContainerID,
docker.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Tail: strconv.Itoa(tail),
Follow: true,
},
)
if err != nil {
return nil, fmt.Errorf("getting docker container logs: %s", err)
}
return bufio.NewReader(logs), nil
}
// Port returns the port players use to connect to this server.
func (s *Server) Port() (int, error) {
cj, err := s.ContainerInspect(context.Background(), s.ContainerID)
if err != nil {
return 0, err
}
portBindings := cj.HostConfig.PortBindings
if len(portBindings) == 0 {
return 0, fmt.Errorf("no ports bound for container %s", s.ContainerName)
}
var port int
for _, v := range portBindings {
p, err := strconv.Atoi(v[0].HostPort)
if err != nil {
return 0, fmt.Errorf("error reading container port: %s", err)
}
port = p
}
if port == 0 {
panic("port is 0")
}
return port, nil
}
// All returns a client for each active server.
func All(c client.ContainerAPIClient)
|
if err != nil {
|
random_line_split
|
server.go
|
err)
}
return c
}
// Server is a wrapper for docker's client.ContainerAPIClient which operates on a specific container.
type Server struct {
client.ContainerAPIClient
ContainerName, ContainerID string
}
// New creates a new craft server container and returns a docker client for it.
// It is the equivalent of the following docker command:
//
// docker run -d -e EULA=TRUE -p <HOST_PORT>:19132/udp <imageName>
//
// If mountVolume is true, a local volume will also be mounted and autoremove will be disabled.
func New(hostPort int, name string, mountVolume bool) (*Server, error) {
if hostPort == 0 {
hostPort = nextAvailablePort()
}
c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
logger.Error.Fatalf("Error: Failed to create new docker client: %s", err)
}
ctx := context.Background()
hostBinding := nat.PortBinding{
HostIP: anyIP,
HostPort: strconv.Itoa(hostPort),
}
// -p <HOST_PORT>:19132/udp
containerPort, err := nat.NewPort(protocol, strconv.Itoa(defaultPort))
if err != nil {
return nil, fmt.Errorf("creating container port: %s", err)
}
portBinding := nat.PortMap{containerPort: []nat.PortBinding{hostBinding}}
var mounts []mount.Mount
if mountVolume {
volName := fmt.Sprintf("%s-%s", volumeLabel, name)
vol, err := c.VolumeCreate(ctx, volume.VolumeCreateBody{
Name: volName,
})
if err != nil {
return nil, fmt.Errorf("creating vol '%s': %w", volName, err)
}
mounts = []mount.Mount{{
Type: mount.TypeVolume,
Source: vol.Name,
Target: files.Directory,
}}
}
// docker run -d -e EULA=TRUE
createResp, err := c.ContainerCreate(
ctx,
&container.Config{
Image: ImageName,
Env: []string{"EULA=TRUE"},
ExposedPorts: nat.PortSet{containerPort: struct{}{}},
AttachStdin: true, AttachStdout: true, AttachStderr: true,
Tty: true,
OpenStdin: true,
Labels: map[string]string{CraftLabel: ""},
},
&container.HostConfig{
PortBindings: portBinding,
AutoRemove: !mountVolume,
Mounts: mounts,
},
nil, nil, name,
)
if err != nil {
return nil, fmt.Errorf("creating docker container: %s", err)
}
err = c.ContainerStart(ctx, createResp.ID, docker.ContainerStartOptions{})
if err != nil {
return nil, fmt.Errorf("starting container: %s", err)
}
s := Server{
ContainerAPIClient: c,
ContainerName: name,
ContainerID: createResp.ID,
}
return &s, nil
}
// Get searches for a server with the given name (stopped or running) and checks that it has a label identifying it as
// a craft server. If no craft server with that name exists, an error of type NotFoundError. If the server is found but
// is not a craft server, an error of type NotCraftError is returned.
func Get(cl client.ContainerAPIClient, containerName string) (*Server, error)
|
return nil, &NotCraftError{Name: containerName}
}
return &c, nil
}
// Stop executes a stop command first in the server process cli then on the container itself, stopping the
// server. The server must be saved separately to persist the world and settings.
func (s *Server) Stop() error {
if err := s.Command([]string{"stop"}); err != nil {
return fmt.Errorf("%s: running 'stop' command in server cli to stop server process: %s", s.ContainerName, err)
}
logger.Info.Printf("stopping %s\n", s.ContainerName)
timeout := time.Duration(stopTimeout)
err := s.ContainerStop(
context.Background(),
s.ContainerID,
&timeout,
)
if err != nil {
return fmt.Errorf("%s: stopping docker container: %s", s.ContainerName, err)
}
return nil
}
func (s *Server) IsRunning() bool {
inspect, err := s.ContainerInspect(context.Background(), s.ContainerID)
if err != nil {
logger.Error.Panic(err)
}
return inspect.State.Running
}
func (s *Server) HasVolume() bool {
inspect, err := s.ContainerInspect(context.Background(), s.ContainerID)
if err != nil {
logger.Error.Panic(err)
}
return len(inspect.Mounts) > 0
}
// RunBedrock runs the bedrock server process and waits for confirmation from the server that the process has started.
// The server should be join-able when this function returns.
func (s *Server) RunBedrock() error {
// New the bedrock_server process
if err := s.Command(strings.Split(RunMCCommand, " ")); err != nil {
s.StopOrPanic()
return err
}
logs, err := s.LogReader(1)
if err != nil {
s.StopOrPanic()
return err
}
scanner := bufio.NewScanner(logs)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
if scanner.Text() == "[INFO] Server started." {
// Server has finished starting
return nil
}
}
return fmt.Errorf("reached end of log reader without finding the 'Server started' message")
}
// StopOrPanic stops the server's container. The server process may not be stopped gracefully, call Server.Stop() to
// safely stop the server. If an error occurs while attempting to stop the server the program exits with a panic.
func (s *Server) StopOrPanic() {
logger.Info.Printf("stopping %s\n", s.ContainerName)
timeout := time.Duration(stopTimeout)
err := s.ContainerStop(
context.Background(),
s.ContainerID,
&timeout,
)
if err != nil {
logger.Error.Panicf("while stopping %s another error occurred: %s\n", s.ContainerName, err)
}
}
// Command attaches to the container and runs the given arguments separated by spaces.
func (s *Server) Command(args []string) error {
conn, err := s.CommandWriter()
if err != nil {
return err
}
commandString := strings.Join(args, " ") + "\n"
_, err = conn.Write([]byte(commandString))
if err != nil {
return err
}
return nil
}
// CommandWriter returns a *net.Conn which streams to the container process stdin.
func (s *Server) CommandWriter() (net.Conn, error) {
waiter, err := s.ContainerAttach(
context.Background(),
s.ContainerID,
docker.ContainerAttachOptions{
Stdin: true,
Stream: true,
},
)
if err != nil {
return nil, err
}
return waiter.Conn, err
}
// LogReader returns a buffer with the stdout and stderr from the running mc server process. New output will continually
// be sent to the buffer. A negative tail value will result in the 'all' value being used.
func (s *Server) LogReader(tail int) (*bufio.Reader, error) {
logs, err := s.ContainerLogs(
context.Background(),
s.ContainerID,
docker.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Tail: strconv.Itoa(tail),
Follow: true,
},
)
if err != nil {
return nil, fmt.Errorf("getting docker container logs: %s", err)
}
return bufio.NewReader(logs), nil
}
// Port returns the port players use to connect to this server.
func (s *Server) Port() (int, error) {
cj, err := s.ContainerInspect(context.Background(), s.ContainerID)
if err != nil {
return 0, err
}
portBindings := cj.HostConfig.PortBindings
if len(portBindings) == 0 {
return 0, fmt.Errorf("no ports bound for container %s", s.ContainerName)
}
var port int
for _, v := range portBindings {
p, err := strconv.Atoi(v[0].HostPort)
if err != nil {
return 0, fmt.Errorf("error reading container port: %s", err)
}
port = p
}
if port == 0 {
panic("port is 0")
}
return port, nil
}
// All returns a client for each active server.
func All(c client.ContainerAPIClient
|
{
id, err := containerID(containerName, cl)
if err != nil {
return nil, err
}
c := Server{
ContainerAPIClient: cl,
ContainerName: containerName,
ContainerID: id,
}
containerJSON, err := cl.ContainerInspect(context.Background(), c.ContainerID)
if err != nil {
return nil, fmt.Errorf("inspecting container: %s", err)
}
_, ok := containerJSON.Config.Labels[CraftLabel]
if !ok {
|
identifier_body
|
server.go
|
)
}
return c
}
// Server is a wrapper for docker's client.ContainerAPIClient which operates on a specific container.
type Server struct {
client.ContainerAPIClient
ContainerName, ContainerID string
}
// New creates a new craft server container and returns a docker client for it.
// It is the equivalent of the following docker command:
//
// docker run -d -e EULA=TRUE -p <HOST_PORT>:19132/udp <imageName>
//
// If mountVolume is true, a local volume will also be mounted and autoremove will be disabled.
func New(hostPort int, name string, mountVolume bool) (*Server, error) {
if hostPort == 0 {
hostPort = nextAvailablePort()
}
c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
logger.Error.Fatalf("Error: Failed to create new docker client: %s", err)
}
ctx := context.Background()
hostBinding := nat.PortBinding{
HostIP: anyIP,
HostPort: strconv.Itoa(hostPort),
}
// -p <HOST_PORT>:19132/udp
containerPort, err := nat.NewPort(protocol, strconv.Itoa(defaultPort))
if err != nil
|
portBinding := nat.PortMap{containerPort: []nat.PortBinding{hostBinding}}
var mounts []mount.Mount
if mountVolume {
volName := fmt.Sprintf("%s-%s", volumeLabel, name)
vol, err := c.VolumeCreate(ctx, volume.VolumeCreateBody{
Name: volName,
})
if err != nil {
return nil, fmt.Errorf("creating vol '%s': %w", volName, err)
}
mounts = []mount.Mount{{
Type: mount.TypeVolume,
Source: vol.Name,
Target: files.Directory,
}}
}
// docker run -d -e EULA=TRUE
createResp, err := c.ContainerCreate(
ctx,
&container.Config{
Image: ImageName,
Env: []string{"EULA=TRUE"},
ExposedPorts: nat.PortSet{containerPort: struct{}{}},
AttachStdin: true, AttachStdout: true, AttachStderr: true,
Tty: true,
OpenStdin: true,
Labels: map[string]string{CraftLabel: ""},
},
&container.HostConfig{
PortBindings: portBinding,
AutoRemove: !mountVolume,
Mounts: mounts,
},
nil, nil, name,
)
if err != nil {
return nil, fmt.Errorf("creating docker container: %s", err)
}
err = c.ContainerStart(ctx, createResp.ID, docker.ContainerStartOptions{})
if err != nil {
return nil, fmt.Errorf("starting container: %s", err)
}
s := Server{
ContainerAPIClient: c,
ContainerName: name,
ContainerID: createResp.ID,
}
return &s, nil
}
// Get searches for a server with the given name (stopped or running) and checks that it has a label identifying it as
// a craft server. If no craft server with that name exists, an error of type NotFoundError. If the server is found but
// is not a craft server, an error of type NotCraftError is returned.
func Get(cl client.ContainerAPIClient, containerName string) (*Server, error) {
id, err := containerID(containerName, cl)
if err != nil {
return nil, err
}
c := Server{
ContainerAPIClient: cl,
ContainerName: containerName,
ContainerID: id,
}
containerJSON, err := cl.ContainerInspect(context.Background(), c.ContainerID)
if err != nil {
return nil, fmt.Errorf("inspecting container: %s", err)
}
_, ok := containerJSON.Config.Labels[CraftLabel]
if !ok {
return nil, &NotCraftError{Name: containerName}
}
return &c, nil
}
// Stop executes a stop command first in the server process cli then on the container itself, stopping the
// server. The server must be saved separately to persist the world and settings.
func (s *Server) Stop() error {
if err := s.Command([]string{"stop"}); err != nil {
return fmt.Errorf("%s: running 'stop' command in server cli to stop server process: %s", s.ContainerName, err)
}
logger.Info.Printf("stopping %s\n", s.ContainerName)
timeout := time.Duration(stopTimeout)
err := s.ContainerStop(
context.Background(),
s.ContainerID,
&timeout,
)
if err != nil {
return fmt.Errorf("%s: stopping docker container: %s", s.ContainerName, err)
}
return nil
}
func (s *Server) IsRunning() bool {
inspect, err := s.ContainerInspect(context.Background(), s.ContainerID)
if err != nil {
logger.Error.Panic(err)
}
return inspect.State.Running
}
func (s *Server) HasVolume() bool {
inspect, err := s.ContainerInspect(context.Background(), s.ContainerID)
if err != nil {
logger.Error.Panic(err)
}
return len(inspect.Mounts) > 0
}
// RunBedrock runs the bedrock server process and waits for confirmation from the server that the process has started.
// The server should be join-able when this function returns.
func (s *Server) RunBedrock() error {
// New the bedrock_server process
if err := s.Command(strings.Split(RunMCCommand, " ")); err != nil {
s.StopOrPanic()
return err
}
logs, err := s.LogReader(1)
if err != nil {
s.StopOrPanic()
return err
}
scanner := bufio.NewScanner(logs)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
if scanner.Text() == "[INFO] Server started." {
// Server has finished starting
return nil
}
}
return fmt.Errorf("reached end of log reader without finding the 'Server started' message")
}
// StopOrPanic stops the server's container. The server process may not be stopped gracefully, call Server.Stop() to
// safely stop the server. If an error occurs while attempting to stop the server the program exits with a panic.
func (s *Server) StopOrPanic() {
logger.Info.Printf("stopping %s\n", s.ContainerName)
timeout := time.Duration(stopTimeout)
err := s.ContainerStop(
context.Background(),
s.ContainerID,
&timeout,
)
if err != nil {
logger.Error.Panicf("while stopping %s another error occurred: %s\n", s.ContainerName, err)
}
}
// Command attaches to the container and runs the given arguments separated by spaces.
func (s *Server) Command(args []string) error {
conn, err := s.CommandWriter()
if err != nil {
return err
}
commandString := strings.Join(args, " ") + "\n"
_, err = conn.Write([]byte(commandString))
if err != nil {
return err
}
return nil
}
// CommandWriter returns a *net.Conn which streams to the container process stdin.
func (s *Server) CommandWriter() (net.Conn, error) {
waiter, err := s.ContainerAttach(
context.Background(),
s.ContainerID,
docker.ContainerAttachOptions{
Stdin: true,
Stream: true,
},
)
if err != nil {
return nil, err
}
return waiter.Conn, err
}
// LogReader returns a buffer with the stdout and stderr from the running mc server process. New output will continually
// be sent to the buffer. A negative tail value will result in the 'all' value being used.
func (s *Server) LogReader(tail int) (*bufio.Reader, error) {
logs, err := s.ContainerLogs(
context.Background(),
s.ContainerID,
docker.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Tail: strconv.Itoa(tail),
Follow: true,
},
)
if err != nil {
return nil, fmt.Errorf("getting docker container logs: %s", err)
}
return bufio.NewReader(logs), nil
}
// Port returns the port players use to connect to this server.
func (s *Server) Port() (int, error) {
cj, err := s.ContainerInspect(context.Background(), s.ContainerID)
if err != nil {
return 0, err
}
portBindings := cj.HostConfig.PortBindings
if len(portBindings) == 0 {
return 0, fmt.Errorf("no ports bound for container %s", s.ContainerName)
}
var port int
for _, v := range portBindings {
p, err := strconv.Atoi(v[0].HostPort)
if err != nil {
return 0, fmt.Errorf("error reading container port: %s", err)
}
port = p
}
if port == 0 {
panic("port is 0")
}
return port, nil
}
// All returns a client for each active server.
func All(c client.ContainerAPIClient
|
{
return nil, fmt.Errorf("creating container port: %s", err)
}
|
conditional_block
|
common.js
|
dd;
break;
default:
return yyyy + "-" + mm + "-" + dd ;
break;
}
}
catch(e){
return("")
}
}
}
//้กต้ข่ทณ่ฝฌๅฐๅฆไธไธช้กต้ข
function JumpUrl(url){
window.location.href=url;
}
//ๅผนๅบ็ชๅฃ
function winOpen(tourl)
{
window.open(encodeURIComponent(tourl));
}
//ๅ่ทฏๅพๅๆฐ
fu
|
getUrlParam(name){
var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var r = window.location.search.substr(1).match(reg);
if (r!=null)
{return decodeURIComponent(r[2]);}
else
{return ""; }
}
function Trim(ss)
{
// ็จๆญฃๅ่กจ่พพๅผๅฐๅๅ็ฉบๆ ผ
// ็จ็ฉบๅญ็ฌฆไธฒๆฟไปฃใ
return ss.replace(/(^\s*)|(\s*$)/g, "");
}
Array.prototype.inArray = function (value) {
var i;
for (i=0; i < this.length; i++) {
if (this[i] === value) {
return true;
}
}
return false;
};
function addEvent( obj, type, fn ) {
if (obj.addEventListener) {
obj.addEventListener( type, fn, false );
EventCache.add(obj, type, fn);
}
else if (obj.attachEvent) {
obj["e"+type+fn] = fn;
obj[type+fn] = function() { obj["e"+type+fn]( window.event ); }
obj.attachEvent( "on"+type, obj[type+fn] );
EventCache.add(obj, type, fn);
}
else {
obj["on"+type] = obj["e"+type+fn];
}
}
var EventCache = function(){
var listEvents = [];
return {
listEvents : listEvents,
add : function(node, sEventName, fHandler){
listEvents.push(arguments);
},
flush : function(){
var i, item;
for(i = listEvents.length - 1; i >= 0; i = i - 1){
item = listEvents[i];
if(item[0].removeEventListener){
item[0].removeEventListener(item[1], item[2], item[3]);
};
if(item[1].substring(0, 2) != "on"){
item[1] = "on" + item[1];
};
if(item[0].detachEvent){
item[0].detachEvent(item[1], item[2]);
};
item[0][item[1]] = null;
};
}
};
}();
addEvent(window,'unload',EventCache.flush);
function getTop(e){
var offset=e.offsetTop;
if(e.offsetParent!=null) offset+=getTop(e.offsetParent);
return offset;
}
function getLeft(e){
var offset=e.offsetLeft;
if(e.offsetParent!=null) offset+=getLeft(e.offsetParent);
return offset;
}
function getParent(el){
return el.parentNode ? el.parentNode : el.parentElement;
}
//็ปๅฝๆ
ๅตไธไปๅ
ฌๅ
ฑๅนณๅฐ่ทณ่ฝฌๅฐไผๅ็ฎก็ๅนณๅฐ๏ผๅๆฐtourl ไพๅฆ๏ผ่ฆไฟฎๆนไผๅไฟกๆฏๅฐฑไผ hqenmanger(CompanyInfo/MemberModify.aspx)
//CompanyInfoไปฃ่กจๆ ็ฎ๏ผMemberModify.aspxไปฃ่กจๆไปถ
function hqenmanger(tourl)
{
window.open("/Web/Hqen/"+encodeURIComponent(tourl));
}
//็ปๅฝๆ
ๅตไธไปๅ
ฌๅ
ฑๅนณๅฐ่ทณ่ฝฌๅฐไผๅ็ฎก็ๅนณๅฐ๏ผๅๆฐtourl ไพๅฆ๏ผ่ฆไฟฎๆนไผๅไฟกๆฏๅฐฑไผ g_ibs('CompanyInfo/MemberModify.aspx'),block
var g_Ibs_url = "http://ibs.hqew.com";
var g_Main_url = "http://www.hqew.com";
function g_ibs(tourl,blank)
{
Check_CurrentDomain();
if(blank==undefined ){
window.open(g_Ibs_url+tourl);
}else if(blank==true){
if(tourl.toString().toLowerCase().indexOf("/ibs/")>-1 || tourl.toString().toLowerCase().indexOf("/web/")>-1){
window.open(g_Ibs_url+tourl);
}else{
window.open(g_Main_url+tourl);
}
}else{
window.location.href = g_Ibs_url+tourl;
}
}
function Check_CurrentDomain(){
if(typeof(currentDomain)!='undefined' && currentDomain!=null){
g_Ibs_url = currentDomain.HqewIBSSite;
g_Main_url = currentDomain.HqewMainSite;
}
else if(Cookie.read("HqewIBSSite")!="" && Cookie.read("HqewMainSite")!=""){
//ๅฐ่ฏไปcookieไธญๅ
g_Ibs_url = Cookie.read("HqewIBSSite");
g_Main_url = Cookie.read("HqewMainSite");
}
}
//inputๅปๆ้ป่ฎคๅ
ๅฎน่ฟๅ็ผ่พๆ ทๅผ
function g_on_setvalue(obj,value,cname)
{
if(obj.value!=value){
return false;
}
obj.value="";
obj.className = cname;
}
function g_logout()
{
g_ibs("/Web/Hqen/Logout.aspx", false);
}
function HTMLEnCode(str)
{
var s = "";
if(str.length == 0)
{
return "";
}
s = str.replace(/&/g,">");
s = s.replace(/</g,"<");
s = s.replace(/>/g,">");
s = s.replace(/ /g," ");
s = s.replace(/\'/g,"'");
s = s.replace(/\"/g,""");
s = s.replace(/\n/g,"<br>");
return s;
}
function HTMLDeCode(str)
{
var s = "";
if(str.length == 0)
{
return "";
}
s = str.replace(/>/g,"&");
s = s.replace(/</g,"<");
s = s.replace(/>/g,">");
s = s.replace(/ /g," ");
s = s.replace(/'/g,"\'");
s = s.replace(/"/g,"\"");
s = s.replace(/<br>/g,"\n");
return s;
}
//ๅๅญ็ฌฆไธฒ
function CountCharacters(str,size){
var totalCount = 0;
var newStr = "";
for (var i=0; i<str.length; i++) {
var c = str.charCodeAt(i);
if ((c >= 0x0001 && c <= 0x007e) || (0xff60<=c && c<=0xff9f)) {
totalCount++;
}else {
totalCount+=2;
}
if(totalCount<=size){
newStr = str.substring(0,i+1);
}else{
return newStr;
}
}
return newStr;
}
//็จไบ็ฝ้กตๅฐๅๅๆฐ
//ๅๆฐไธญๅ
ๅซๅบไบ่ฑๆไธญๆๆฐๅญไนๅค็ๅ
ถไป็ฌฆๅทๆถ่ฟ่ก็ผ็ ๅนถๅจๅ้ขๅ "=="่ฟ่กๆ ่ฏ๏ผๅฆๅ็ดๆฅ่ฟๅ
//่งฃ็ ๆถๆ นๆฎๆฏๅฆๅซๆ"=="ๆ ่ฏๆฅๅณๅฎๆฏๅฆ่ฆ่งฃ็
var base64EncodeChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var base64Encode = new Array(
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1,
-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1,
-1, 26, 27, 28, 29, 30, 31, 32, 33
|
nction
|
identifier_name
|
common.js
|
dd;
break;
default:
return yyyy + "-" + mm + "-" + dd ;
break;
}
}
catch(e){
return("")
}
}
}
//้กต้ข่ทณ่ฝฌๅฐๅฆไธไธช้กต้ข
function JumpUrl(url){
window.location.href=url;
}
//ๅผนๅบ็ชๅฃ
function winOpen(tourl)
{
window.open(encodeURIComponent(tourl));
}
//ๅ่ทฏๅพๅๆฐ
function getUrlParam(name){
var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var r = window.location.search.substr(1).match(reg);
if (r!=null)
{return decodeURIComponent(r[2]);}
else
{return ""; }
}
function Trim(ss)
{
// ็จๆญฃๅ่กจ่พพๅผๅฐๅๅ็ฉบๆ ผ
// ็จ็ฉบๅญ็ฌฆไธฒๆฟไปฃใ
return ss.replace(/(^\s*)|(\s*$)/g, "");
}
Array.prototype.inArray = function (value) {
var i;
for (i=0; i < this.length; i++) {
if (this[i] === value) {
return true;
}
}
return false;
};
function addEvent( obj, type, fn ) {
if (obj.addEventListener) {
obj.addEventListener( type, fn, false );
EventCache.add(obj, type, fn);
}
else if (obj.attachEvent) {
obj["e"+type+fn] = fn;
obj[type+fn] = function() { obj["e"+type+fn]( window.event ); }
obj.attachEvent( "on"+type, obj[type+fn] );
EventCache.add(obj, type, fn);
}
else {
obj["on"+type] = obj["e"+type+fn];
}
}
var EventCache = function(){
var listEvents = [];
return {
listEvents : listEvents,
add : function(node, sEventName, fHandler){
listEvents.push(arguments);
},
flush : function(){
var i, item;
for(i = listEvents.length - 1; i >= 0; i = i - 1){
item = listEvents[i];
if(item[0].removeEventListener){
item[0].removeEventListener(item[1], item[2], item[3]);
};
if(item[1].substring(0, 2) != "on"){
item[1] = "on" + item[1];
};
if(item[0].detachEvent){
item[0].detachEvent(item[1], item[2]);
};
item[0][item[1]] = null;
};
}
};
}();
addEvent(window,'unload',EventCache.flush);
function getTop(e){
var offset=e.offsetTop;
if(e.offsetParent!=null) offset+=getTop(e.offsetParent);
return offset;
}
function
|
rn offset;
}
function getParent(el){
return el.parentNode ? el.parentNode : el.parentElement;
}
//็ปๅฝๆ
ๅตไธไปๅ
ฌๅ
ฑๅนณๅฐ่ทณ่ฝฌๅฐไผๅ็ฎก็ๅนณๅฐ๏ผๅๆฐtourl ไพๅฆ๏ผ่ฆไฟฎๆนไผๅไฟกๆฏๅฐฑไผ hqenmanger(CompanyInfo/MemberModify.aspx)
//CompanyInfoไปฃ่กจๆ ็ฎ๏ผMemberModify.aspxไปฃ่กจๆไปถ
function hqenmanger(tourl)
{
window.open("/Web/Hqen/"+encodeURIComponent(tourl));
}
//็ปๅฝๆ
ๅตไธไปๅ
ฌๅ
ฑๅนณๅฐ่ทณ่ฝฌๅฐไผๅ็ฎก็ๅนณๅฐ๏ผๅๆฐtourl ไพๅฆ๏ผ่ฆไฟฎๆนไผๅไฟกๆฏๅฐฑไผ g_ibs('CompanyInfo/MemberModify.aspx'),block
var g_Ibs_url = "http://ibs.hqew.com";
var g_Main_url = "http://www.hqew.com";
function g_ibs(tourl,blank)
{
Check_CurrentDomain();
if(blank==undefined ){
window.open(g_Ibs_url+tourl);
}else if(blank==true){
if(tourl.toString().toLowerCase().indexOf("/ibs/")>-1 || tourl.toString().toLowerCase().indexOf("/web/")>-1){
window.open(g_Ibs_url+tourl);
}else{
window.open(g_Main_url+tourl);
}
}else{
window.location.href = g_Ibs_url+tourl;
}
}
function Check_CurrentDomain(){
if(typeof(currentDomain)!='undefined' && currentDomain!=null){
g_Ibs_url = currentDomain.HqewIBSSite;
g_Main_url = currentDomain.HqewMainSite;
}
else if(Cookie.read("HqewIBSSite")!="" && Cookie.read("HqewMainSite")!=""){
//ๅฐ่ฏไปcookieไธญๅ
g_Ibs_url = Cookie.read("HqewIBSSite");
g_Main_url = Cookie.read("HqewMainSite");
}
}
//inputๅปๆ้ป่ฎคๅ
ๅฎน่ฟๅ็ผ่พๆ ทๅผ
function g_on_setvalue(obj,value,cname)
{
if(obj.value!=value){
return false;
}
obj.value="";
obj.className = cname;
}
function g_logout()
{
g_ibs("/Web/Hqen/Logout.aspx", false);
}
function HTMLEnCode(str)
{
var s = "";
if(str.length == 0)
{
return "";
}
s = str.replace(/&/g,">");
s = s.replace(/</g,"<");
s = s.replace(/>/g,">");
s = s.replace(/ /g," ");
s = s.replace(/\'/g,"'");
s = s.replace(/\"/g,""");
s = s.replace(/\n/g,"<br>");
return s;
}
function HTMLDeCode(str)
{
var s = "";
if(str.length == 0)
{
return "";
}
s = str.replace(/>/g,"&");
s = s.replace(/</g,"<");
s = s.replace(/>/g,">");
s = s.replace(/ /g," ");
s = s.replace(/'/g,"\'");
s = s.replace(/"/g,"\"");
s = s.replace(/<br>/g,"\n");
return s;
}
//ๅๅญ็ฌฆไธฒ
function CountCharacters(str,size){
var totalCount = 0;
var newStr = "";
for (var i=0; i<str.length; i++) {
var c = str.charCodeAt(i);
if ((c >= 0x0001 && c <= 0x007e) || (0xff60<=c && c<=0xff9f)) {
totalCount++;
}else {
totalCount+=2;
}
if(totalCount<=size){
newStr = str.substring(0,i+1);
}else{
return newStr;
}
}
return newStr;
}
//็จไบ็ฝ้กตๅฐๅๅๆฐ
//ๅๆฐไธญๅ
ๅซๅบไบ่ฑๆไธญๆๆฐๅญไนๅค็ๅ
ถไป็ฌฆๅทๆถ่ฟ่ก็ผ็ ๅนถๅจๅ้ขๅ "=="่ฟ่กๆ ่ฏ๏ผๅฆๅ็ดๆฅ่ฟๅ
//่งฃ็ ๆถๆ นๆฎๆฏๅฆๅซๆ"=="ๆ ่ฏๆฅๅณๅฎๆฏๅฆ่ฆ่งฃ็
var base64EncodeChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var base64Encode = new Array(
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1,
-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1,
-1, 26, 27, 28, 29, 30, 31, 32, 33
|
getLeft(e){
var offset=e.offsetLeft;
if(e.offsetParent!=null) offset+=getLeft(e.offsetParent);
retu
|
identifier_body
|
common.js
|
dd;
break;
default:
return yyyy + "-" + mm + "-" + dd ;
break;
}
}
catch(e){
return("")
}
}
}
//้กต้ข่ทณ่ฝฌๅฐๅฆไธไธช้กต้ข
function JumpUrl(url){
window.location.href=url;
}
//ๅผนๅบ็ชๅฃ
function winOpen(tourl)
{
window.open(encodeURIComponent(tourl));
}
//ๅ่ทฏๅพๅๆฐ
function getUrlParam(name){
var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var r = window.location.search.substr(1).match(reg);
if (r!=null)
{return decodeURIComponent(r[2]);}
else
{return ""; }
}
function Trim(ss)
{
// ็จๆญฃๅ่กจ่พพๅผๅฐๅๅ็ฉบๆ ผ
// ็จ็ฉบๅญ็ฌฆไธฒๆฟไปฃใ
return ss.replace(/(^\s*)|(\s*$)/g, "");
}
Array.prototype.inArray = function (value) {
var i;
for (i=0; i < this.length; i++) {
if (this[i] === value) {
return true;
}
}
return false;
};
function addEvent( obj, type, fn ) {
if (obj.addEventListener) {
obj.addEventListener( type, fn, false );
EventCache.add(obj, type, fn);
}
else if (obj.attachEvent) {
obj["e"+type+fn] = fn;
obj[type+fn] = function() { obj["e"+type+fn]( window.event ); }
obj.attachEvent( "on"+type, obj[type+fn] );
EventCache.add(obj, type, fn);
}
else {
obj["on"+type] = obj["e"+type+fn];
}
}
var EventCache = function(){
var listEvents = [];
return {
listEvents : listEvents,
add : function(node, sEventName, fHandler){
listEvents.push(arguments);
},
flush : function(){
var i, item;
for(i = listEvents.length - 1; i >= 0; i = i - 1){
item = listEvents[i];
if(item[0].removeEventListener){
item[0].removeEventListener(item[1], item[2], item[3]);
};
if(item[1].substring(0, 2) != "on"){
item[1] = "on" + item[1];
};
if(item[0].detachEvent){
item[0].detachEvent(item[1], item[2]);
};
item[0][item[1]] = null;
};
}
};
}();
addEvent(window,'unload',EventCache.flush);
function getTop(e){
var offset=e.offsetTop;
if(e.offsetParent!=null) offset+=getTop(e.offsetParent);
return offset;
}
function getLeft(e){
var offset=e.offsetLeft;
if(e.offsetParent!=null) offset+=getLeft(e.offsetParent);
return offset;
}
function getParent(el){
return el.parentNode ? el.parentNode : el.parentElement;
}
//็ปๅฝๆ
ๅตไธไปๅ
ฌๅ
ฑๅนณๅฐ่ทณ่ฝฌๅฐไผๅ็ฎก็ๅนณๅฐ๏ผๅๆฐtourl ไพๅฆ๏ผ่ฆไฟฎๆนไผๅไฟกๆฏๅฐฑไผ hqenmanger(CompanyInfo/MemberModify.aspx)
//CompanyInfoไปฃ่กจๆ ็ฎ๏ผMemberModify.aspxไปฃ่กจๆไปถ
function hqenmanger(tourl)
{
window.open("/Web/Hqen/"+encodeURIComponent(tourl));
}
//็ปๅฝๆ
ๅตไธไปๅ
ฌๅ
ฑๅนณๅฐ่ทณ่ฝฌๅฐไผๅ็ฎก็ๅนณๅฐ๏ผๅๆฐtourl ไพๅฆ๏ผ่ฆไฟฎๆนไผๅไฟกๆฏๅฐฑไผ g_ibs('CompanyInfo/MemberModify.aspx'),block
var g_Ibs_url = "http://ibs.hqew.com";
var g_Main_url = "http://www.hqew.com";
function g_ibs(tourl,blank)
{
Check_CurrentDomain();
if(blank==undefined ){
window.open(g_Ibs_url+tourl);
}else if(blank==true){
if(tourl.toString().toLowerCase().indexOf("/ibs/")>-1 || tourl.toString().toLowerCase().indexOf("/web/")>-1){
window.open(g_Ibs_url+tourl);
}else{
window.open(g_Main_url+tourl);
}
}else{
window.location.href = g_Ibs_url+tourl;
}
}
function Check_CurrentDomain(){
if(typeof(currentDomain)!='undefined' && currentDomain!=null){
g_Ibs_url = currentDomain.HqewIBSSite;
g_Main_url = currentDomain.HqewMainSite;
}
else if(Cookie.read("HqewIBSSite")!="" && Cookie.read("HqewMainSite")!=""){
//ๅฐ่ฏไปcookieไธญๅ
g_Ibs_url = Cookie.read("HqewIBSSite");
g_Main_url = Cookie.read("HqewMainSite");
}
}
|
if(obj.value!=value){
return false;
}
obj.value="";
obj.className = cname;
}
function g_logout()
{
g_ibs("/Web/Hqen/Logout.aspx", false);
}
function HTMLEnCode(str)
{
var s = "";
if(str.length == 0)
{
return "";
}
s = str.replace(/&/g,">");
s = s.replace(/</g,"<");
s = s.replace(/>/g,">");
s = s.replace(/ /g," ");
s = s.replace(/\'/g,"'");
s = s.replace(/\"/g,""");
s = s.replace(/\n/g,"<br>");
return s;
}
function HTMLDeCode(str)
{
var s = "";
if(str.length == 0)
{
return "";
}
s = str.replace(/>/g,"&");
s = s.replace(/</g,"<");
s = s.replace(/>/g,">");
s = s.replace(/ /g," ");
s = s.replace(/'/g,"\'");
s = s.replace(/"/g,"\"");
s = s.replace(/<br>/g,"\n");
return s;
}
//ๅๅญ็ฌฆไธฒ
function CountCharacters(str,size){
var totalCount = 0;
var newStr = "";
for (var i=0; i<str.length; i++) {
var c = str.charCodeAt(i);
if ((c >= 0x0001 && c <= 0x007e) || (0xff60<=c && c<=0xff9f)) {
totalCount++;
}else {
totalCount+=2;
}
if(totalCount<=size){
newStr = str.substring(0,i+1);
}else{
return newStr;
}
}
return newStr;
}
//็จไบ็ฝ้กตๅฐๅๅๆฐ
//ๅๆฐไธญๅ
ๅซๅบไบ่ฑๆไธญๆๆฐๅญไนๅค็ๅ
ถไป็ฌฆๅทๆถ่ฟ่ก็ผ็ ๅนถๅจๅ้ขๅ "=="่ฟ่กๆ ่ฏ๏ผๅฆๅ็ดๆฅ่ฟๅ
//่งฃ็ ๆถๆ นๆฎๆฏๅฆๅซๆ"=="ๆ ่ฏๆฅๅณๅฎๆฏๅฆ่ฆ่งฃ็
var base64EncodeChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var base64Encode = new Array(
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1,
-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1,
-1, 26, 27, 28, 29, 30, 31, 32, 33, 3
|
//inputๅปๆ้ป่ฎคๅ
ๅฎน่ฟๅ็ผ่พๆ ทๅผ
function g_on_setvalue(obj,value,cname)
{
|
random_line_split
|
gateway-sia-cachelayer.go
|
// Sys returns system interface
func (o SiaFileInfo) Sys() interface{} {
return o.FileSys
}
// newSiaCacheLayer creates a new Sia cache layer
func newSiaCacheLayer(siadAddress string, cacheDir string, dbFile string, debug bool) (*SiaCacheLayer, error) {
cache := &SiaCacheLayer{
SiadAddress: siadAddress,
CacheDir: cacheDir,
DbFile: dbFile,
DebugMode: debug,
ManagerDelaySec: 30,
UploadCheckFreqMs: 3000,
MaxCacheSizeBytes: 10000000000,
CacheTicker: nil,
Db: nil,
DbMutex: &sync.Mutex{},
}
cache.loadSiaEnv()
return cache, nil
}
// Start will start running the Cache Layer
func (cache *SiaCacheLayer) Start() *SiaServiceError {
cache.debugmsg("SiaCacheLayer.Start")
cache.DbMutex = &sync.Mutex{}
cache.ensureCacheDirExists()
// Open and initialize database
err := cache.dbOpenDatabase()
if err != nil {
return err
}
// Start the cache management process
cache.CacheTicker = time.NewTicker(time.Second * time.Duration(cache.ManagerDelaySec))
go func() {
for _ = range cache.CacheTicker.C {
cache.manager()
}
}()
return nil
}
// Stop will stop the SiaCacheLayer
func (cache *SiaCacheLayer) Stop() {
cache.debugmsg("SiaCacheLayer.Stop")
// Stop cache management process
cache.CacheTicker.Stop()
// Close the database
cache.dbCloseDatabase()
}
// InsertBucket will attempt to insert a new bucket
func (cache *SiaCacheLayer) InsertBucket(bucket string) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.InsertBucket")
return cache.dbInsertBucket(bucket)
}
// DeleteBucket will attempt to delete an existing bucket
func (cache *SiaCacheLayer) DeleteBucket(bucket string) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.DeleteBucket")
// Do NOT delete if objects exist in bucket.
objects, serr := cache.ListObjects(bucket)
if serr != nil {
return serr
}
if (len(objects) > 0) {
return siaErrorBucketNotEmpty
}
return cache.dbDeleteBucket(bucket)
}
// ListBuckets will return a list of all existing buckets
func (cache *SiaCacheLayer) ListBuckets() (buckets []SiaBucketInfo, e *SiaServiceError) {
cache.debugmsg("SiaCacheLayer.ListBuckets")
return cache.dbListBuckets()
}
// DeleteObject will attempt to delete the object from Sia
func (cache *SiaCacheLayer) DeleteObject(bucket string, objectName string) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.DeleteObject")
err := cache.dbUpdateObjectDeletedStatus(bucket, objectName, 1)
if err != nil {
return err
}
// Tell Sia daemon to delete the object
var siaObj = cache.getSiaObjectName(bucket, objectName)
derr := post(cache.SiadAddress, "/renter/delete/"+siaObj, "")
if derr != nil {
return &SiaServiceError{Code: "SiaErrorDaemon", Message: derr.Error()}
}
return cache.dbDeleteObject(bucket, objectName)
}
// PutObject will attempt to put an object on Sia
func (cache *SiaCacheLayer) PutObject(bucket string, objectName string, size int64, purgeAfter int64, srcFile string) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.PutObject")
// Before inserting to DB, there is a very rare chance that the object already exists in DB
// from a failed upload and Minio crashed or was killed before DB updated to reflect. So just in case
// we will check if the object exists and has a not uploaded status. If so, we will delete that
// record and then continue as normal.
objInfo, e := cache.GetObjectInfo(bucket, objectName)
if e == nil {
// Object does exist. If uploaded, return error. If not uploaded, delete it and continue.
if objInfo.Uploaded.Unix() > 0 {
return siaErrorObjectAlreadyExists
}
e = cache.dbDeleteObject(bucket, objectName)
if e != nil {
return e
}
}
err := cache.dbInsertObject(bucket, objectName, size, time.Now().Unix(), 0, purgeAfter, srcFile, 1)
if err != nil {
return err
}
// Tell Sia daemon to upload the object
siaObj := cache.getSiaObjectName(bucket, objectName)
derr := post(cache.SiadAddress, "/renter/upload/"+siaObj, "source="+srcFile)
if derr != nil {
cache.dbDeleteObject(bucket, objectName)
return &SiaServiceError{Code: "SiaErrorDaemon", Message: derr.Error()}
}
// Need to wait for upload to complete unless background uploading is enabled
if (!cache.BackgroundUpload) {
err = cache.waitTillSiaUploadCompletes(siaObj)
if err != nil {
cache.dbDeleteObject(bucket, objectName)
return err
}
// Mark object as uploaded
err = cache.dbUpdateObjectUploadedStatus(bucket, objectName, 1)
if err != nil {
cache.dbDeleteObject(bucket, objectName)
return err
}
}
return nil
}
// ListObjects will return a list of existing objects in the bucket provided
func (cache *SiaCacheLayer) ListObjects(bucket string) (objects []SiaObjectInfo, e *SiaServiceError) {
cache.debugmsg("SiaCacheLayer.ListObjects")
return cache.dbListObjects(bucket)
}
// GuaranteeObjectIsInCache will guarantee that the specified object exists in the local cache
func (cache *SiaCacheLayer) GuaranteeObjectIsInCache(bucket string, objectName string) *SiaServiceError {
defer cache.timeTrack(time.Now(), "GuaranteeObjectIsInCache")
cache.debugmsg("SiaCacheLayer.GuaranteeObjectIsInCache")
// Minio filesystem layer may request files from .minio.sys bucket
// If we get a request for Minio, we'll pass back success and let Minio deal with it.
if bucket == ".minio.sys" {
return nil
}
// Make sure object exists in database
objInfo, err := cache.GetObjectInfo(bucket, objectName)
if err != nil {
return err
}
// Is file already in cache?
_, serr := os.Stat(objInfo.SrcFile)
if serr == nil {
// File exists in cache
err = cache.dbUpdateCachedStatus(bucket, objectName, 1)
if err != nil {
return err
}
// Increment cached fetch count and update last_fetch
return cache.dbUpdateCachedFetches(bucket, objectName, objInfo.CachedFetches+1)
}
// Object not in cache, must download from Sia.
// First, though, make sure the file was completely uploaded to Sia.
if objInfo.Uploaded == time.Unix(0, 0) {
// File never completed uploading, or was never marked as uploaded in database.
// Neither of these cases should happen, but just in case.
return siaErrorUnknown
}
// Make sure bucket path exists in cache directory
cache.ensureCacheBucketDirExists(bucket)
// Make sure enough space exists in cache
err = cache.guaranteeCacheSpace(objInfo.Size)
if err != nil {
return err
}
// Increment fetch count and update last_fetch BEFORE requesting d/l from Sia.
// This will prevent the cache manager from removing the partially downloaded file.
err = cache.dbUpdateSiaFetches(bucket, objectName, objInfo.SiaFetches+1)
if err != nil {
return err
}
var siaObj = cache.getSiaObjectName(bucket, objectName)
derr := get(cache.SiadAddress, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(objInfo.SrcFile))
if derr != nil {
cache.debugmsg(fmt.Sprintf("Error: %s", derr))
return &SiaServiceError{Code: "SiaErrorDaemon", Message: derr.Error()}
}
// After successfully downloading to the cache, make sure the cached flag of the object is set.
return cache.dbUpdateCachedStatus(bucket, objectName, 1)
}
// GetObjectInfo will return object information for the object specified
func (cache *SiaCacheLayer) GetObjectInfo(bucket string, objectName string) (objInfo SiaObjectInfo, e *SiaServiceError) {
cache.debugmsg("SiaCacheLayer.GetObjectInfo")
return cache.dbGetObjectInfo(bucket, objectName)
}
// SetBucketPolicies sets policy on bucket
func (cache *SiaCacheLayer) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.SetBucketPolicies")
res, _ := json.Marshal(&policyInfo)
return cache.dbUpdateBucketPolicies(bucket, string(res))
}
// GetBucketPolicies will get policy on bucket
func (
|
{
return o.FileIsDir
}
|
identifier_body
|
|
gateway-sia-cachelayer.go
|
range buckets {
objects, err := cache.ListObjects(bucket.Name)
if err != nil {
return err
}
for _, object := range objects {
// Only remove an object from cache here if:
// 1. Object is cached
// 1. Object was uploaded over PurgeAfter seconds ago
// 2. Object hasn't been fetched in over PurgeAfter seconds
if object.Cached == 1 && object.Uploaded != time.Unix(0, 0) {
sinceUploaded := time.Now().Unix() - object.Uploaded.Unix()
sinceFetched := time.Now().Unix() - object.LastFetch.Unix()
if sinceUploaded > object.PurgeAfter && sinceFetched > object.PurgeAfter {
err = cache.removeFromCache(object)
if err != nil {
return err
}
}
}
}
}
return nil
}
func (cache *SiaCacheLayer) removeFromCache(objInfo SiaObjectInfo) *SiaServiceError {
cache.debugmsg(fmt.Sprintf("removeFromCache: %s", objInfo.SrcFile))
// If file doesn't exist in cache, it's falsely labelled. Update and return.
_, err := os.Stat(objInfo.SrcFile)
if err != nil {
return cache.dbUpdateCachedStatus(objInfo.Bucket, objInfo.Name, 0)
}
err = os.Remove(objInfo.SrcFile)
if err != nil {
// File exists but couldn't be deleted. Permission issue?
return siaErrorFailedToDeleteCachedFile
}
return cache.dbUpdateCachedStatus(objInfo.Bucket, objInfo.Name, 0)
}
func (cache *SiaCacheLayer) checkSiaUploads() *SiaServiceError {
cache.debugmsg("SiaCacheLayer.checkSiaUploads")
// Get list of all uploading objects
objs, err := cache.dbListUploadingObjects()
if err != nil {
return err
}
// Get list of all renter files
var rf api.RenterFiles
derr := getAPI(cache.SiadAddress, "/renter/files", &rf)
if derr != nil {
return &SiaServiceError{Code: "SiaErrorDaemon", Message: derr.Error()}
}
// If uploading object is available on Sia, update database
for _, obj := range objs {
var siaObj = cache.getSiaObjectName(obj.Bucket, obj.Name)
for _, file := range rf.Files {
if file.SiaPath == siaObj && file.Available {
cache.debugmsg(fmt.Sprintf(" Upload to Sia completed: %s", obj.Name))
err = cache.dbUpdateObjectUploadedStatus(obj.Bucket, obj.Name, 1)
if err != nil {
return err
}
}
}
}
return nil
}
func (cache *SiaCacheLayer) waitTillSiaUploadCompletes(siaObj string) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.waitTillSiaUploadCompletes")
complete := false
for !complete {
avail, e := cache.isSiaFileAvailable(siaObj)
if e != nil {
return e
}
if avail {
return nil
}
time.Sleep(time.Duration(cache.UploadCheckFreqMs) * time.Millisecond)
}
return nil
}
func (cache *SiaCacheLayer) isSiaFileAvailable(siaObj string) (bool, *SiaServiceError) {
cache.debugmsg(fmt.Sprintf("SiaCacheLayer.isSiaFileAvailable: %s", siaObj))
var rf api.RenterFiles
err := getAPI(cache.SiadAddress, "/renter/files", &rf)
if err != nil {
return false, &SiaServiceError{Code: "SiaErrorDaemon", Message: err.Error()}
}
for _, file := range rf.Files {
cache.debugmsg(fmt.Sprintf(" Renter file: %s", file.SiaPath))
if file.SiaPath == siaObj {
return file.Available, nil
}
}
return false, &SiaServiceError{Code: "SiaErrorDaemon", Message: "File not in Sia renter list"}
}
func (cache *SiaCacheLayer) guaranteeCacheSpace(cacheNeeded int64) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.guaranteeCacheSpace")
avail, e := cache.getCacheAvailable()
if e != nil {
return e
}
cache.debugmsg(fmt.Sprintf(" Cache space available: %d\n", avail))
for avail < cacheNeeded {
e = cache.forceDeleteOldestCacheFile()
if e != nil {
return e
}
avail, e = cache.getCacheAvailable()
if e != nil {
return e
}
cache.debugmsg(fmt.Sprintf(" Cache space available: %d\n", avail))
}
return nil
}
func (cache *SiaCacheLayer) getSiaObjectName(bucket string, objectName string) string {
reg, _ := regexp.Compile("[^a-zA-Z0-9.]+")
cleanedName := reg.ReplaceAllString(objectName, "+")
return bucket + "/" + cleanedName
}
func (cache *SiaCacheLayer) forceDeleteOldestCacheFile() *SiaServiceError {
cache.debugmsg("SiaCacheLayer.forceDeleteOldestCacheFile")
buckets, serr := cache.dbListBuckets()
if serr != nil {
return serr
}
var objToDelete *SiaObjectInfo;
objToDelete = nil
for _, bkt := range buckets {
objs, serr := cache.dbListObjects(bkt.Name)
if serr != nil {
return serr
}
for _, obj := range objs {
if obj.Uploaded.Unix() > 0 && obj.Cached == 1 {
if objToDelete == nil || obj.LastFetch.Unix() < objToDelete.LastFetch.Unix() {
objToDelete = &obj
}
}
}
}
if objToDelete == nil {
return siaErrorUnableToClearAnyCachedFiles
}
// Make certain cached item exists, then delete it.
_, err := os.Stat(objToDelete.SrcFile)
if err != nil {
// Item does NOT exist in cache. Could have been deleted manually by user.
// Update the cached flag and return. (Returning failure would stop cache manager.)
return cache.dbUpdateCachedStatus(objToDelete.Bucket, objToDelete.Name, 0)
}
err = os.Remove(objToDelete.SrcFile)
if err != nil {
return siaErrorUnableToClearAnyCachedFiles
}
err = cache.dbUpdateCachedStatus(objToDelete.Bucket, objToDelete.Name, 0)
if err != nil {
return siaErrorUnableToClearAnyCachedFiles
}
return nil
}
func (cache *SiaCacheLayer) getCacheUsed() (int64, *SiaServiceError) {
cache.debugmsg("SiaCacheLayer.getCacheUsed")
var size int64
size = 0
err := filepath.Walk(cache.CacheDir, func(_ string, info os.FileInfo, e error) error {
if !info.IsDir() {
cache.debugmsg(fmt.Sprintf(" %s: %d", info.Name(), info.Size()))
size += info.Size()
}
return e
})
if err != nil {
return 0, siaErrorDeterminingCacheSize
}
return size, nil
}
func (cache *SiaCacheLayer) getCacheAvailable() (int64, *SiaServiceError) {
cache.debugmsg("SiaCacheLayer.getCacheAvailable")
used, serr := cache.getCacheUsed()
return (cache.MaxCacheSizeBytes - used), serr
}
func (cache *SiaCacheLayer) ensureCacheDirExists() {
cache.debugmsg("SiaCacheLayer.ensureCacheDirExists")
// Make sure cache directory exists
os.Mkdir(cache.CacheDir, 0744)
}
func (cache *SiaCacheLayer) ensureCacheBucketDirExists(bucket string) {
cache.debugmsg("SiaCacheLayer.ensureCacheBucketDirExists")
os.Mkdir(filepath.Join(cache.CacheDir, bucket), 0744)
}
func (cache *SiaCacheLayer) debugmsg(str string) {
if cache.DebugMode {
fmt.Println(str)
}
}
func (cache *SiaCacheLayer) timeTrack(start time.Time, name string) {
if cache.DebugMode {
elapsed := time.Since(start)
fmt.Printf("%s took %s\n", name, elapsed)
}
}
// Attempt to load Sia config from ENV
func (cache *SiaCacheLayer) loadSiaEnv() {
tmp := os.Getenv("SIA_MANAGER_DELAY_SEC")
if tmp != "" {
i, err := strconv.ParseInt(tmp, 10, 64)
if err == nil {
cache.ManagerDelaySec = i
}
}
tmp = os.Getenv("SIA_UPLOAD_CHECK_FREQ_MS")
if tmp != "" {
i, err := strconv.ParseInt(tmp, 10, 64)
if err == nil {
cache.UploadCheckFreqMs = i
}
}
tmp = os.Getenv("SIA_CACHE_MAX_SIZE_BYTES")
if tmp != "" {
i, err := strconv.ParseInt(tmp, 10, 64)
if err == nil
|
{
cache.MaxCacheSizeBytes = i
}
|
conditional_block
|
|
gateway-sia-cachelayer.go
|
, srcFile, 1)
if err != nil {
return err
}
// Tell Sia daemon to upload the object
siaObj := cache.getSiaObjectName(bucket, objectName)
derr := post(cache.SiadAddress, "/renter/upload/"+siaObj, "source="+srcFile)
if derr != nil {
cache.dbDeleteObject(bucket, objectName)
return &SiaServiceError{Code: "SiaErrorDaemon", Message: derr.Error()}
}
// Need to wait for upload to complete unless background uploading is enabled
if (!cache.BackgroundUpload) {
err = cache.waitTillSiaUploadCompletes(siaObj)
if err != nil {
cache.dbDeleteObject(bucket, objectName)
return err
}
// Mark object as uploaded
err = cache.dbUpdateObjectUploadedStatus(bucket, objectName, 1)
if err != nil {
cache.dbDeleteObject(bucket, objectName)
return err
}
}
return nil
}
// ListObjects will return a list of existing objects in the bucket provided
func (cache *SiaCacheLayer) ListObjects(bucket string) (objects []SiaObjectInfo, e *SiaServiceError) {
cache.debugmsg("SiaCacheLayer.ListObjects")
return cache.dbListObjects(bucket)
}
// GuaranteeObjectIsInCache will guarantee that the specified object exists in the local cache
func (cache *SiaCacheLayer) GuaranteeObjectIsInCache(bucket string, objectName string) *SiaServiceError {
defer cache.timeTrack(time.Now(), "GuaranteeObjectIsInCache")
cache.debugmsg("SiaCacheLayer.GuaranteeObjectIsInCache")
// Minio filesystem layer may request files from .minio.sys bucket
// If we get a request for Minio, we'll pass back success and let Minio deal with it.
if bucket == ".minio.sys" {
return nil
}
// Make sure object exists in database
objInfo, err := cache.GetObjectInfo(bucket, objectName)
if err != nil {
return err
}
// Is file already in cache?
_, serr := os.Stat(objInfo.SrcFile)
if serr == nil {
// File exists in cache
err = cache.dbUpdateCachedStatus(bucket, objectName, 1)
if err != nil {
return err
}
// Increment cached fetch count and update last_fetch
return cache.dbUpdateCachedFetches(bucket, objectName, objInfo.CachedFetches+1)
}
// Object not in cache, must download from Sia.
// First, though, make sure the file was completely uploaded to Sia.
if objInfo.Uploaded == time.Unix(0, 0) {
// File never completed uploading, or was never marked as uploaded in database.
// Neither of these cases should happen, but just in case.
return siaErrorUnknown
}
// Make sure bucket path exists in cache directory
cache.ensureCacheBucketDirExists(bucket)
// Make sure enough space exists in cache
err = cache.guaranteeCacheSpace(objInfo.Size)
if err != nil {
return err
}
// Increment fetch count and update last_fetch BEFORE requesting d/l from Sia.
// This will prevent the cache manager from removing the partially downloaded file.
err = cache.dbUpdateSiaFetches(bucket, objectName, objInfo.SiaFetches+1)
if err != nil {
return err
}
var siaObj = cache.getSiaObjectName(bucket, objectName)
derr := get(cache.SiadAddress, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(objInfo.SrcFile))
if derr != nil {
cache.debugmsg(fmt.Sprintf("Error: %s", derr))
return &SiaServiceError{Code: "SiaErrorDaemon", Message: derr.Error()}
}
// After successfully downloading to the cache, make sure the cached flag of the object is set.
return cache.dbUpdateCachedStatus(bucket, objectName, 1)
}
// GetObjectInfo will return object information for the object specified
func (cache *SiaCacheLayer) GetObjectInfo(bucket string, objectName string) (objInfo SiaObjectInfo, e *SiaServiceError) {
cache.debugmsg("SiaCacheLayer.GetObjectInfo")
return cache.dbGetObjectInfo(bucket, objectName)
}
// SetBucketPolicies sets policy on bucket
func (cache *SiaCacheLayer) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.SetBucketPolicies")
res, _ := json.Marshal(&policyInfo)
return cache.dbUpdateBucketPolicies(bucket, string(res))
}
// GetBucketPolicies will get policy on bucket
func (cache *SiaCacheLayer) GetBucketPolicies(bucket string) (bal policy.BucketAccessPolicy, e *SiaServiceError) {
cache.debugmsg("SiaCacheLayer.GetBucketPolicies")
return cache.dbGetBucketPolicies(bucket)
}
// DeleteBucketPolicies deletes all policies on bucket
func (cache *SiaCacheLayer) DeleteBucketPolicies(bucket string) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.DeleteBucketPolicies")
return cache.dbUpdateBucketPolicies(bucket, "")
}
// Runs periodically to manage the database and cache
func (cache *SiaCacheLayer) manager() {
cache.debugmsg("SiaCacheLayer.manager")
// Check to see if any files in database have completed uploading to Sia.
// If so, update uploaded timestamp in database.
err := cache.checkSiaUploads()
if err != nil {
fmt.Println("Error in DB/Cache Management Process:")
fmt.Println(err)
}
// Remove files from cache that have not been uploaded or fetched in purge_after seconds.
err = cache.purgeCache()
if err != nil {
fmt.Println("Error in DB/Cache Management Process:")
fmt.Println(err)
}
// Check cache disk usage
err = cache.guaranteeCacheSpace(0)
if err != nil {
fmt.Println("Error in DB/Cache Management Process:")
fmt.Println(err)
}
}
// Purge older, infrequently accessed files from cache.
// This function is less strict and doesn't consider max space quota.
func (cache *SiaCacheLayer) purgeCache() *SiaServiceError {
cache.debugmsg("SiaCacheLayer.purgeCache")
buckets, err := cache.ListBuckets()
if err != nil {
return err
}
for _, bucket := range buckets {
objects, err := cache.ListObjects(bucket.Name)
if err != nil {
return err
}
for _, object := range objects {
// Only remove an object from cache here if:
// 1. Object is cached
// 1. Object was uploaded over PurgeAfter seconds ago
// 2. Object hasn't been fetched in over PurgeAfter seconds
if object.Cached == 1 && object.Uploaded != time.Unix(0, 0) {
sinceUploaded := time.Now().Unix() - object.Uploaded.Unix()
sinceFetched := time.Now().Unix() - object.LastFetch.Unix()
if sinceUploaded > object.PurgeAfter && sinceFetched > object.PurgeAfter {
err = cache.removeFromCache(object)
if err != nil {
return err
}
}
}
}
}
return nil
}
func (cache *SiaCacheLayer) removeFromCache(objInfo SiaObjectInfo) *SiaServiceError {
cache.debugmsg(fmt.Sprintf("removeFromCache: %s", objInfo.SrcFile))
// If file doesn't exist in cache, it's falsely labelled. Update and return.
_, err := os.Stat(objInfo.SrcFile)
if err != nil {
return cache.dbUpdateCachedStatus(objInfo.Bucket, objInfo.Name, 0)
}
err = os.Remove(objInfo.SrcFile)
if err != nil {
// File exists but couldn't be deleted. Permission issue?
return siaErrorFailedToDeleteCachedFile
}
return cache.dbUpdateCachedStatus(objInfo.Bucket, objInfo.Name, 0)
}
func (cache *SiaCacheLayer) checkSiaUploads() *SiaServiceError {
cache.debugmsg("SiaCacheLayer.checkSiaUploads")
// Get list of all uploading objects
objs, err := cache.dbListUploadingObjects()
if err != nil {
return err
}
// Get list of all renter files
var rf api.RenterFiles
derr := getAPI(cache.SiadAddress, "/renter/files", &rf)
if derr != nil {
return &SiaServiceError{Code: "SiaErrorDaemon", Message: derr.Error()}
}
// If uploading object is available on Sia, update database
for _, obj := range objs {
var siaObj = cache.getSiaObjectName(obj.Bucket, obj.Name)
for _, file := range rf.Files {
if file.SiaPath == siaObj && file.Available {
cache.debugmsg(fmt.Sprintf(" Upload to Sia completed: %s", obj.Name))
err = cache.dbUpdateObjectUploadedStatus(obj.Bucket, obj.Name, 1)
if err != nil {
return err
}
}
}
}
return nil
}
func (cache *SiaCacheLayer)
|
waitTillSiaUploadCompletes
|
identifier_name
|
|
gateway-sia-cachelayer.go
|
Layer{
|
UploadCheckFreqMs: 3000,
MaxCacheSizeBytes: 10000000000,
CacheTicker: nil,
Db: nil,
DbMutex: &sync.Mutex{},
}
cache.loadSiaEnv()
return cache, nil
}
// Start will start running the Cache Layer
func (cache *SiaCacheLayer) Start() *SiaServiceError {
cache.debugmsg("SiaCacheLayer.Start")
cache.DbMutex = &sync.Mutex{}
cache.ensureCacheDirExists()
// Open and initialize database
err := cache.dbOpenDatabase()
if err != nil {
return err
}
// Start the cache management process
cache.CacheTicker = time.NewTicker(time.Second * time.Duration(cache.ManagerDelaySec))
go func() {
for _ = range cache.CacheTicker.C {
cache.manager()
}
}()
return nil
}
// Stop will stop the SiaCacheLayer
func (cache *SiaCacheLayer) Stop() {
cache.debugmsg("SiaCacheLayer.Stop")
// Stop cache management process
cache.CacheTicker.Stop()
// Close the database
cache.dbCloseDatabase()
}
// InsertBucket will attempt to insert a new bucket
func (cache *SiaCacheLayer) InsertBucket(bucket string) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.InsertBucket")
return cache.dbInsertBucket(bucket)
}
// DeleteBucket will attempt to delete an existing bucket
func (cache *SiaCacheLayer) DeleteBucket(bucket string) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.DeleteBucket")
// Do NOT delete if objects exist in bucket.
objects, serr := cache.ListObjects(bucket)
if serr != nil {
return serr
}
if (len(objects) > 0) {
return siaErrorBucketNotEmpty
}
return cache.dbDeleteBucket(bucket)
}
// ListBuckets will return a list of all existing buckets
func (cache *SiaCacheLayer) ListBuckets() (buckets []SiaBucketInfo, e *SiaServiceError) {
cache.debugmsg("SiaCacheLayer.ListBuckets")
return cache.dbListBuckets()
}
// DeleteObject will attempt to delete the object from Sia
func (cache *SiaCacheLayer) DeleteObject(bucket string, objectName string) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.DeleteObject")
err := cache.dbUpdateObjectDeletedStatus(bucket, objectName, 1)
if err != nil {
return err
}
// Tell Sia daemon to delete the object
var siaObj = cache.getSiaObjectName(bucket, objectName)
derr := post(cache.SiadAddress, "/renter/delete/"+siaObj, "")
if derr != nil {
return &SiaServiceError{Code: "SiaErrorDaemon", Message: derr.Error()}
}
return cache.dbDeleteObject(bucket, objectName)
}
// PutObject will attempt to put an object on Sia
func (cache *SiaCacheLayer) PutObject(bucket string, objectName string, size int64, purgeAfter int64, srcFile string) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.PutObject")
// Before inserting to DB, there is a very rare chance that the object already exists in DB
// from a failed upload and Minio crashed or was killed before DB updated to reflect. So just in case
// we will check if the object exists and has a not uploaded status. If so, we will delete that
// record and then continue as normal.
objInfo, e := cache.GetObjectInfo(bucket, objectName)
if e == nil {
// Object does exist. If uploaded, return error. If not uploaded, delete it and continue.
if objInfo.Uploaded.Unix() > 0 {
return siaErrorObjectAlreadyExists
}
e = cache.dbDeleteObject(bucket, objectName)
if e != nil {
return e
}
}
err := cache.dbInsertObject(bucket, objectName, size, time.Now().Unix(), 0, purgeAfter, srcFile, 1)
if err != nil {
return err
}
// Tell Sia daemon to upload the object
siaObj := cache.getSiaObjectName(bucket, objectName)
derr := post(cache.SiadAddress, "/renter/upload/"+siaObj, "source="+srcFile)
if derr != nil {
cache.dbDeleteObject(bucket, objectName)
return &SiaServiceError{Code: "SiaErrorDaemon", Message: derr.Error()}
}
// Need to wait for upload to complete unless background uploading is enabled
if (!cache.BackgroundUpload) {
err = cache.waitTillSiaUploadCompletes(siaObj)
if err != nil {
cache.dbDeleteObject(bucket, objectName)
return err
}
// Mark object as uploaded
err = cache.dbUpdateObjectUploadedStatus(bucket, objectName, 1)
if err != nil {
cache.dbDeleteObject(bucket, objectName)
return err
}
}
return nil
}
// ListObjects will return a list of existing objects in the bucket provided
func (cache *SiaCacheLayer) ListObjects(bucket string) (objects []SiaObjectInfo, e *SiaServiceError) {
cache.debugmsg("SiaCacheLayer.ListObjects")
return cache.dbListObjects(bucket)
}
// GuaranteeObjectIsInCache will guarantee that the specified object exists in the local cache
func (cache *SiaCacheLayer) GuaranteeObjectIsInCache(bucket string, objectName string) *SiaServiceError {
defer cache.timeTrack(time.Now(), "GuaranteeObjectIsInCache")
cache.debugmsg("SiaCacheLayer.GuaranteeObjectIsInCache")
// Minio filesystem layer may request files from .minio.sys bucket
// If we get a request for Minio, we'll pass back success and let Minio deal with it.
if bucket == ".minio.sys" {
return nil
}
// Make sure object exists in database
objInfo, err := cache.GetObjectInfo(bucket, objectName)
if err != nil {
return err
}
// Is file already in cache?
_, serr := os.Stat(objInfo.SrcFile)
if serr == nil {
// File exists in cache
err = cache.dbUpdateCachedStatus(bucket, objectName, 1)
if err != nil {
return err
}
// Increment cached fetch count and update last_fetch
return cache.dbUpdateCachedFetches(bucket, objectName, objInfo.CachedFetches+1)
}
// Object not in cache, must download from Sia.
// First, though, make sure the file was completely uploaded to Sia.
if objInfo.Uploaded == time.Unix(0, 0) {
// File never completed uploading, or was never marked as uploaded in database.
// Neither of these cases should happen, but just in case.
return siaErrorUnknown
}
// Make sure bucket path exists in cache directory
cache.ensureCacheBucketDirExists(bucket)
// Make sure enough space exists in cache
err = cache.guaranteeCacheSpace(objInfo.Size)
if err != nil {
return err
}
// Increment fetch count and update last_fetch BEFORE requesting d/l from Sia.
// This will prevent the cache manager from removing the partially downloaded file.
err = cache.dbUpdateSiaFetches(bucket, objectName, objInfo.SiaFetches+1)
if err != nil {
return err
}
var siaObj = cache.getSiaObjectName(bucket, objectName)
derr := get(cache.SiadAddress, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(objInfo.SrcFile))
if derr != nil {
cache.debugmsg(fmt.Sprintf("Error: %s", derr))
return &SiaServiceError{Code: "SiaErrorDaemon", Message: derr.Error()}
}
// After successfully downloading to the cache, make sure the cached flag of the object is set.
return cache.dbUpdateCachedStatus(bucket, objectName, 1)
}
// GetObjectInfo will return object information for the object specified
func (cache *SiaCacheLayer) GetObjectInfo(bucket string, objectName string) (objInfo SiaObjectInfo, e *SiaServiceError) {
cache.debugmsg("SiaCacheLayer.GetObjectInfo")
return cache.dbGetObjectInfo(bucket, objectName)
}
// SetBucketPolicies sets policy on bucket
func (cache *SiaCacheLayer) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) *SiaServiceError {
cache.debugmsg("SiaCacheLayer.SetBucketPolicies")
res, _ := json.Marshal(&policyInfo)
return cache.dbUpdateBucketPolicies(bucket, string(res))
}
// GetBucketPolicies will get policy on bucket
func (cache *SiaCacheLayer) GetBucketPolicies(bucket string) (bal policy.BucketAccessPolicy, e *SiaServiceError) {
cache.debugmsg("SiaCacheLayer.GetBucketPolicies")
return cache.dbGetBucketPolicies(bucket)
}
// DeleteBucketPolicies deletes all policies on bucket
func (cache *SiaCacheLayer) DeleteBucketPolicies(bucket string) *SiaService
|
SiadAddress: siadAddress,
CacheDir: cacheDir,
DbFile: dbFile,
DebugMode: debug,
ManagerDelaySec: 30,
|
random_line_split
|
basic_unet.py
|
###########################################################
# Define parameters
###########################################################
DATA = 'processed_data'
TRAIN = 'train'
MASKS = 'masks'
TEST = 'test'
OUTPUT = 'output'
SEED = 'some'
ids = np.array([f'image_{i}.png' for i in range(1,31)])
###########################################################
# Set seed
###########################################################
def seed_all(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def seed_some(seed):
random.seed(seed)
torch.manual_seed(seed)
if SEED == 'all':
print("[ Seed setting : slow and reproducible ]")
seed_all(2001)
else:
print("[ Seed setting : fast and random ]")
seed_some(2001)
###########################################################
# Define dataset
###########################################################
class ISBI_Dataset(Dataset):
def __init__(self, train = True, tfms=None):
self.fnames = ids
self.tfms = tfms
def __len__(self):
return len(self.fnames)
def __getitem__(self, idx):
fname = self.fnames[idx]
img = cv2.imread(os.path.join(DATA,TRAIN,fname), cv2.IMREAD_GRAYSCALE)
mask = cv2.imread(os.path.join(DATA,MASKS,fname),cv2.IMREAD_GRAYSCALE)
if self.tfms is not None:
augmented = self.tfms(image=img,mask=mask)
img,mask = augmented['image'],augmented['mask']
img = img/255.0
img = np.expand_dims(img, 0)
img = torch.from_numpy(img.astype(np.float32, copy=False))
mask = mask/255.0
mask = simulation.center_crop(mask)
mask = simulation.oned_to_twod(mask)
mask = torch.from_numpy(mask.astype(np.float32, copy=False))
return img, mask
###########################################################
# Test if dataset load works
###########################################################
#ds = ISBI_Dataset(tfms = simulation.get_aug_train())
#dl = DataLoader(ds,batch_size=4)
#imgs,masks = next(iter(dl))
#print(imgs.shape, masks.shape)
#print(imgs.dtype, masks.dtype)
#for x in [imgs.numpy(), masks.numpy()]:
# print(x.min(), x.max(), x.mean(), x.std())
# Convert tensors back to arrays
#imgs = imgs.numpy()
#masks = masks.numpy()
#masks = [mask[1] for mask in masks]
#for image, mask in zip(imgs,masks):
# plt.imshow(np.squeeze(image), cmap='gray')
# plt.show()
# plt.clf()
# plt.imshow(mask, cmap='gray')
# plt.show()
# plt.clf()
###########################################################
# Load test and validation dataset
###########################################################
train_set = ISBI_Dataset(train=True, tfms=simulation.get_aug_train())
val_set = ISBI_Dataset(train=False, tfms=simulation.get_aug_train())
image_datasets = {
'train': train_set, 'val': val_set
}
batch_size = 1
dataloaders = {
'train': DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0),
'val': DataLoader(val_set, batch_size=batch_size, shuffle=True, num_workers=0)
}
dataset_sizes = {
x: len(image_datasets[x]) for x in image_datasets.keys()
}
print(dataset_sizes)
###########################################################
# Load U-net
###########################################################
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = pytorch_unet.UNet()
model = model.to(device)
summary(model, input_size=(1, 572, 572))
###########################################################
# Define loss calculation
###########################################################
def calc_loss(pred, target, metrics, bce_weight=0.5):
bce = F.binary_cross_entropy_with_logits(pred, target)
pred = torch.sigmoid(pred)
dice = dice_loss(pred, target)
loss = bce * bce_weight + dice * (1 - bce_weight)
metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
metrics['loss'] += loss.data.cpu().numpy() * target.size(0)
return loss
def print_metrics(metrics, epoch_samples, phase):
outputs = []
for k in metrics.keys():
outputs.append("{}: {:4f}".format(k, metrics[k] / epoch_samples))
print("{}: {}".format(phase, ", ".join(outputs)))
###########################################################
# Define training
###########################################################
def train_model(model, optimizer, scheduler, num_epochs=25):
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 1e10
early_stopping = False
# for figure
epochs = []
train_loss = []
val_loss = []
for epoch in range(num_epochs):
print('-' * 10)
print('Epoch {}/{}'.format(epoch + 1, num_epochs))
epochs.append(epoch+1)
since = time.time()
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
|
for param_group in optimizer.param_groups:
print("LR", param_group['lr'])
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
metrics = defaultdict(float)
epoch_samples = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
loss = calc_loss(outputs, labels, metrics)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
epoch_samples += inputs.size(0)
print_metrics(metrics, epoch_samples, phase)
epoch_loss = metrics['loss'] / epoch_samples
# collect statistics for figure and take lr step
if phase == 'train':
train_loss.append(metrics['loss']/epoch_samples)
scheduler.step()
else:
val_loss.append(metrics['loss']/epoch_samples)
# deep copy the model
if phase == 'val' and epoch_loss < best_loss:
print("saving best model")
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
epochs_no_improve = 0
elif phase == 'val' and epoch_loss >= best_loss:
epochs_no_improve += 1
if epochs_no_improve == 500:
print('Early stopping!')
early_stopping = True
time_elapsed = time.time() - since
print('{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
if early_stopping == True:
break
else:
continue
print('Best val loss: {:4f}'.format(best_loss))
# Save loss figure
plt.plot(epochs, train_loss, color='g', label = 'train')
plt.plot(epochs, val_loss, color='orange', label = 'test')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Losses')
plt.legend(loc="upper left")
plt.savefig(os.path.join(OUTPUT, 'losses.png'))
#plt.show()
plt.clf()
# load best model weights
model.load_state_dict(best_model_wts)
torch.save(model.state_dict(), os.path.join(OUTPUT, 'bst_unet.model'))
return model
###########################################################
# Run model
###########################################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
model = pytorch_unet.UNet().to(device)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model.parameters(), lr=1e-2, momentum = 0.99)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=1000, gamma=0.1)
model = train_model(model, optimizer_ft, exp_lr_scheduler, num_epochs=1000)
###########################################################
# Predict
###########################################################
class ISBI_Dataset_test(Dataset):
def __init__(self, tfms=None):
self.fnames = np.array([f'image_{i}.png' for i in range(1,4)])
self.tfms = tfms
def __len__(self):
return len(self.fnames)
def __getitem__(self, idx):
fname = self.fnames[idx]
img = cv2.imread(os.path.join(DATA,TEST,fname), cv2.IMREAD_GRAYSCALE)
if self.tfms is not None:
augmented = self.tfms(image=img)
img = augmented['image']
img = img/255.0
return img
model.eval() # Set model to evaluate mode
test_dataset = ISBI_Dataset_test(tfms=simulation.get_aug_test())
# Important to keep batch size equalt to one, as each image gets
# split into several tiles
|
if phase == 'train':
|
random_line_split
|
basic_unet.py
|
###########################################################
# Define parameters
###########################################################
DATA = 'processed_data'
TRAIN = 'train'
MASKS = 'masks'
TEST = 'test'
OUTPUT = 'output'
SEED = 'some'
ids = np.array([f'image_{i}.png' for i in range(1,31)])
###########################################################
# Set seed
###########################################################
def seed_all(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def seed_some(seed):
random.seed(seed)
torch.manual_seed(seed)
if SEED == 'all':
print("[ Seed setting : slow and reproducible ]")
seed_all(2001)
else:
print("[ Seed setting : fast and random ]")
seed_some(2001)
###########################################################
# Define dataset
###########################################################
class ISBI_Dataset(Dataset):
def __init__(self, train = True, tfms=None):
self.fnames = ids
self.tfms = tfms
def __len__(self):
return len(self.fnames)
def __getitem__(self, idx):
fname = self.fnames[idx]
img = cv2.imread(os.path.join(DATA,TRAIN,fname), cv2.IMREAD_GRAYSCALE)
mask = cv2.imread(os.path.join(DATA,MASKS,fname),cv2.IMREAD_GRAYSCALE)
if self.tfms is not None:
augmented = self.tfms(image=img,mask=mask)
img,mask = augmented['image'],augmented['mask']
img = img/255.0
img = np.expand_dims(img, 0)
img = torch.from_numpy(img.astype(np.float32, copy=False))
mask = mask/255.0
mask = simulation.center_crop(mask)
mask = simulation.oned_to_twod(mask)
mask = torch.from_numpy(mask.astype(np.float32, copy=False))
return img, mask
###########################################################
# Test if dataset load works
###########################################################
#ds = ISBI_Dataset(tfms = simulation.get_aug_train())
#dl = DataLoader(ds,batch_size=4)
#imgs,masks = next(iter(dl))
#print(imgs.shape, masks.shape)
#print(imgs.dtype, masks.dtype)
#for x in [imgs.numpy(), masks.numpy()]:
# print(x.min(), x.max(), x.mean(), x.std())
# Convert tensors back to arrays
#imgs = imgs.numpy()
#masks = masks.numpy()
#masks = [mask[1] for mask in masks]
#for image, mask in zip(imgs,masks):
# plt.imshow(np.squeeze(image), cmap='gray')
# plt.show()
# plt.clf()
# plt.imshow(mask, cmap='gray')
# plt.show()
# plt.clf()
###########################################################
# Load test and validation dataset
###########################################################
train_set = ISBI_Dataset(train=True, tfms=simulation.get_aug_train())
val_set = ISBI_Dataset(train=False, tfms=simulation.get_aug_train())
image_datasets = {
'train': train_set, 'val': val_set
}
batch_size = 1
dataloaders = {
'train': DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0),
'val': DataLoader(val_set, batch_size=batch_size, shuffle=True, num_workers=0)
}
dataset_sizes = {
x: len(image_datasets[x]) for x in image_datasets.keys()
}
print(dataset_sizes)
###########################################################
# Load U-net
###########################################################
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = pytorch_unet.UNet()
model = model.to(device)
summary(model, input_size=(1, 572, 572))
###########################################################
# Define loss calculation
###########################################################
def calc_loss(pred, target, metrics, bce_weight=0.5):
bce = F.binary_cross_entropy_with_logits(pred, target)
pred = torch.sigmoid(pred)
dice = dice_loss(pred, target)
loss = bce * bce_weight + dice * (1 - bce_weight)
metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
metrics['loss'] += loss.data.cpu().numpy() * target.size(0)
return loss
def print_metrics(metrics, epoch_samples, phase):
outputs = []
for k in metrics.keys():
outputs.append("{}: {:4f}".format(k, metrics[k] / epoch_samples))
print("{}: {}".format(phase, ", ".join(outputs)))
###########################################################
# Define training
###########################################################
def train_model(model, optimizer, scheduler, num_epochs=25):
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 1e10
early_stopping = False
# for figure
epochs = []
train_loss = []
val_loss = []
for epoch in range(num_epochs):
print('-' * 10)
print('Epoch {}/{}'.format(epoch + 1, num_epochs))
epochs.append(epoch+1)
since = time.time()
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
for param_group in optimizer.param_groups:
print("LR", param_group['lr'])
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
metrics = defaultdict(float)
epoch_samples = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
loss = calc_loss(outputs, labels, metrics)
# backward + optimize only if in training phase
if phase == 'train':
|
# statistics
epoch_samples += inputs.size(0)
print_metrics(metrics, epoch_samples, phase)
epoch_loss = metrics['loss'] / epoch_samples
# collect statistics for figure and take lr step
if phase == 'train':
train_loss.append(metrics['loss']/epoch_samples)
scheduler.step()
else:
val_loss.append(metrics['loss']/epoch_samples)
# deep copy the model
if phase == 'val' and epoch_loss < best_loss:
print("saving best model")
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
epochs_no_improve = 0
elif phase == 'val' and epoch_loss >= best_loss:
epochs_no_improve += 1
if epochs_no_improve == 500:
print('Early stopping!')
early_stopping = True
time_elapsed = time.time() - since
print('{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
if early_stopping == True:
break
else:
continue
print('Best val loss: {:4f}'.format(best_loss))
# Save loss figure
plt.plot(epochs, train_loss, color='g', label = 'train')
plt.plot(epochs, val_loss, color='orange', label = 'test')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Losses')
plt.legend(loc="upper left")
plt.savefig(os.path.join(OUTPUT, 'losses.png'))
#plt.show()
plt.clf()
# load best model weights
model.load_state_dict(best_model_wts)
torch.save(model.state_dict(), os.path.join(OUTPUT, 'bst_unet.model'))
return model
###########################################################
# Run model
###########################################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
model = pytorch_unet.UNet().to(device)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model.parameters(), lr=1e-2, momentum = 0.99)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=1000, gamma=0.1)
model = train_model(model, optimizer_ft, exp_lr_scheduler, num_epochs=1000)
###########################################################
# Predict
###########################################################
class ISBI_Dataset_test(Dataset):
def __init__(self, tfms=None):
self.fnames = np.array([f'image_{i}.png' for i in range(1,4)])
self.tfms = tfms
def __len__(self):
return len(self.fnames)
def __getitem__(self, idx):
fname = self.fnames[idx]
img = cv2.imread(os.path.join(DATA,TEST,fname), cv2.IMREAD_GRAYSCALE)
if self.tfms is not None:
augmented = self.tfms(image=img)
img = augmented['image']
img = img/255.0
return img
model.eval() # Set model to evaluate mode
test_dataset = ISBI_Dataset_test(tfms=simulation.get_aug_test())
# Important to keep batch size equalt to one, as each image gets
# split into several
|
loss.backward()
optimizer.step()
|
conditional_block
|
basic_unet.py
|
###########################################################
# Define parameters
###########################################################
DATA = 'processed_data'
TRAIN = 'train'
MASKS = 'masks'
TEST = 'test'
OUTPUT = 'output'
SEED = 'some'
ids = np.array([f'image_{i}.png' for i in range(1,31)])
###########################################################
# Set seed
###########################################################
def seed_all(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def seed_some(seed):
random.seed(seed)
torch.manual_seed(seed)
if SEED == 'all':
print("[ Seed setting : slow and reproducible ]")
seed_all(2001)
else:
print("[ Seed setting : fast and random ]")
seed_some(2001)
###########################################################
# Define dataset
###########################################################
class ISBI_Dataset(Dataset):
def __init__(self, train = True, tfms=None):
self.fnames = ids
self.tfms = tfms
def __len__(self):
return len(self.fnames)
def __getitem__(self, idx):
fname = self.fnames[idx]
img = cv2.imread(os.path.join(DATA,TRAIN,fname), cv2.IMREAD_GRAYSCALE)
mask = cv2.imread(os.path.join(DATA,MASKS,fname),cv2.IMREAD_GRAYSCALE)
if self.tfms is not None:
augmented = self.tfms(image=img,mask=mask)
img,mask = augmented['image'],augmented['mask']
img = img/255.0
img = np.expand_dims(img, 0)
img = torch.from_numpy(img.astype(np.float32, copy=False))
mask = mask/255.0
mask = simulation.center_crop(mask)
mask = simulation.oned_to_twod(mask)
mask = torch.from_numpy(mask.astype(np.float32, copy=False))
return img, mask
###########################################################
# Test if dataset load works
###########################################################
#ds = ISBI_Dataset(tfms = simulation.get_aug_train())
#dl = DataLoader(ds,batch_size=4)
#imgs,masks = next(iter(dl))
#print(imgs.shape, masks.shape)
#print(imgs.dtype, masks.dtype)
#for x in [imgs.numpy(), masks.numpy()]:
# print(x.min(), x.max(), x.mean(), x.std())
# Convert tensors back to arrays
#imgs = imgs.numpy()
#masks = masks.numpy()
#masks = [mask[1] for mask in masks]
#for image, mask in zip(imgs,masks):
# plt.imshow(np.squeeze(image), cmap='gray')
# plt.show()
# plt.clf()
# plt.imshow(mask, cmap='gray')
# plt.show()
# plt.clf()
###########################################################
# Load test and validation dataset
###########################################################
train_set = ISBI_Dataset(train=True, tfms=simulation.get_aug_train())
val_set = ISBI_Dataset(train=False, tfms=simulation.get_aug_train())
image_datasets = {
'train': train_set, 'val': val_set
}
batch_size = 1
dataloaders = {
'train': DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0),
'val': DataLoader(val_set, batch_size=batch_size, shuffle=True, num_workers=0)
}
dataset_sizes = {
x: len(image_datasets[x]) for x in image_datasets.keys()
}
print(dataset_sizes)
###########################################################
# Load U-net
###########################################################
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = pytorch_unet.UNet()
model = model.to(device)
summary(model, input_size=(1, 572, 572))
###########################################################
# Define loss calculation
###########################################################
def calc_loss(pred, target, metrics, bce_weight=0.5):
bce = F.binary_cross_entropy_with_logits(pred, target)
pred = torch.sigmoid(pred)
dice = dice_loss(pred, target)
loss = bce * bce_weight + dice * (1 - bce_weight)
metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
metrics['loss'] += loss.data.cpu().numpy() * target.size(0)
return loss
def
|
(metrics, epoch_samples, phase):
outputs = []
for k in metrics.keys():
outputs.append("{}: {:4f}".format(k, metrics[k] / epoch_samples))
print("{}: {}".format(phase, ", ".join(outputs)))
###########################################################
# Define training
###########################################################
def train_model(model, optimizer, scheduler, num_epochs=25):
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 1e10
early_stopping = False
# for figure
epochs = []
train_loss = []
val_loss = []
for epoch in range(num_epochs):
print('-' * 10)
print('Epoch {}/{}'.format(epoch + 1, num_epochs))
epochs.append(epoch+1)
since = time.time()
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
for param_group in optimizer.param_groups:
print("LR", param_group['lr'])
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
metrics = defaultdict(float)
epoch_samples = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
loss = calc_loss(outputs, labels, metrics)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
epoch_samples += inputs.size(0)
print_metrics(metrics, epoch_samples, phase)
epoch_loss = metrics['loss'] / epoch_samples
# collect statistics for figure and take lr step
if phase == 'train':
train_loss.append(metrics['loss']/epoch_samples)
scheduler.step()
else:
val_loss.append(metrics['loss']/epoch_samples)
# deep copy the model
if phase == 'val' and epoch_loss < best_loss:
print("saving best model")
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
epochs_no_improve = 0
elif phase == 'val' and epoch_loss >= best_loss:
epochs_no_improve += 1
if epochs_no_improve == 500:
print('Early stopping!')
early_stopping = True
time_elapsed = time.time() - since
print('{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
if early_stopping == True:
break
else:
continue
print('Best val loss: {:4f}'.format(best_loss))
# Save loss figure
plt.plot(epochs, train_loss, color='g', label = 'train')
plt.plot(epochs, val_loss, color='orange', label = 'test')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Losses')
plt.legend(loc="upper left")
plt.savefig(os.path.join(OUTPUT, 'losses.png'))
#plt.show()
plt.clf()
# load best model weights
model.load_state_dict(best_model_wts)
torch.save(model.state_dict(), os.path.join(OUTPUT, 'bst_unet.model'))
return model
###########################################################
# Run model
###########################################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
model = pytorch_unet.UNet().to(device)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model.parameters(), lr=1e-2, momentum = 0.99)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=1000, gamma=0.1)
model = train_model(model, optimizer_ft, exp_lr_scheduler, num_epochs=1000)
###########################################################
# Predict
###########################################################
class ISBI_Dataset_test(Dataset):
def __init__(self, tfms=None):
self.fnames = np.array([f'image_{i}.png' for i in range(1,4)])
self.tfms = tfms
def __len__(self):
return len(self.fnames)
def __getitem__(self, idx):
fname = self.fnames[idx]
img = cv2.imread(os.path.join(DATA,TEST,fname), cv2.IMREAD_GRAYSCALE)
if self.tfms is not None:
augmented = self.tfms(image=img)
img = augmented['image']
img = img/255.0
return img
model.eval() # Set model to evaluate mode
test_dataset = ISBI_Dataset_test(tfms=simulation.get_aug_test())
# Important to keep batch size equalt to one, as each image gets
# split into several
|
print_metrics
|
identifier_name
|
basic_unet.py
|
###########################################################
# Define parameters
###########################################################
DATA = 'processed_data'
TRAIN = 'train'
MASKS = 'masks'
TEST = 'test'
OUTPUT = 'output'
SEED = 'some'
ids = np.array([f'image_{i}.png' for i in range(1,31)])
###########################################################
# Set seed
###########################################################
def seed_all(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def seed_some(seed):
random.seed(seed)
torch.manual_seed(seed)
if SEED == 'all':
print("[ Seed setting : slow and reproducible ]")
seed_all(2001)
else:
print("[ Seed setting : fast and random ]")
seed_some(2001)
###########################################################
# Define dataset
###########################################################
class ISBI_Dataset(Dataset):
def __init__(self, train = True, tfms=None):
self.fnames = ids
self.tfms = tfms
def __len__(self):
return len(self.fnames)
def __getitem__(self, idx):
|
###########################################################
# Test if dataset load works
###########################################################
#ds = ISBI_Dataset(tfms = simulation.get_aug_train())
#dl = DataLoader(ds,batch_size=4)
#imgs,masks = next(iter(dl))
#print(imgs.shape, masks.shape)
#print(imgs.dtype, masks.dtype)
#for x in [imgs.numpy(), masks.numpy()]:
# print(x.min(), x.max(), x.mean(), x.std())
# Convert tensors back to arrays
#imgs = imgs.numpy()
#masks = masks.numpy()
#masks = [mask[1] for mask in masks]
#for image, mask in zip(imgs,masks):
# plt.imshow(np.squeeze(image), cmap='gray')
# plt.show()
# plt.clf()
# plt.imshow(mask, cmap='gray')
# plt.show()
# plt.clf()
###########################################################
# Load test and validation dataset
###########################################################
train_set = ISBI_Dataset(train=True, tfms=simulation.get_aug_train())
val_set = ISBI_Dataset(train=False, tfms=simulation.get_aug_train())
image_datasets = {
'train': train_set, 'val': val_set
}
batch_size = 1
dataloaders = {
'train': DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0),
'val': DataLoader(val_set, batch_size=batch_size, shuffle=True, num_workers=0)
}
dataset_sizes = {
x: len(image_datasets[x]) for x in image_datasets.keys()
}
print(dataset_sizes)
###########################################################
# Load U-net
###########################################################
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = pytorch_unet.UNet()
model = model.to(device)
summary(model, input_size=(1, 572, 572))
###########################################################
# Define loss calculation
###########################################################
def calc_loss(pred, target, metrics, bce_weight=0.5):
bce = F.binary_cross_entropy_with_logits(pred, target)
pred = torch.sigmoid(pred)
dice = dice_loss(pred, target)
loss = bce * bce_weight + dice * (1 - bce_weight)
metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
metrics['loss'] += loss.data.cpu().numpy() * target.size(0)
return loss
def print_metrics(metrics, epoch_samples, phase):
outputs = []
for k in metrics.keys():
outputs.append("{}: {:4f}".format(k, metrics[k] / epoch_samples))
print("{}: {}".format(phase, ", ".join(outputs)))
###########################################################
# Define training
###########################################################
def train_model(model, optimizer, scheduler, num_epochs=25):
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 1e10
early_stopping = False
# for figure
epochs = []
train_loss = []
val_loss = []
for epoch in range(num_epochs):
print('-' * 10)
print('Epoch {}/{}'.format(epoch + 1, num_epochs))
epochs.append(epoch+1)
since = time.time()
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
for param_group in optimizer.param_groups:
print("LR", param_group['lr'])
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
metrics = defaultdict(float)
epoch_samples = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
loss = calc_loss(outputs, labels, metrics)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
epoch_samples += inputs.size(0)
print_metrics(metrics, epoch_samples, phase)
epoch_loss = metrics['loss'] / epoch_samples
# collect statistics for figure and take lr step
if phase == 'train':
train_loss.append(metrics['loss']/epoch_samples)
scheduler.step()
else:
val_loss.append(metrics['loss']/epoch_samples)
# deep copy the model
if phase == 'val' and epoch_loss < best_loss:
print("saving best model")
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
epochs_no_improve = 0
elif phase == 'val' and epoch_loss >= best_loss:
epochs_no_improve += 1
if epochs_no_improve == 500:
print('Early stopping!')
early_stopping = True
time_elapsed = time.time() - since
print('{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
if early_stopping == True:
break
else:
continue
print('Best val loss: {:4f}'.format(best_loss))
# Save loss figure
plt.plot(epochs, train_loss, color='g', label = 'train')
plt.plot(epochs, val_loss, color='orange', label = 'test')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Losses')
plt.legend(loc="upper left")
plt.savefig(os.path.join(OUTPUT, 'losses.png'))
#plt.show()
plt.clf()
# load best model weights
model.load_state_dict(best_model_wts)
torch.save(model.state_dict(), os.path.join(OUTPUT, 'bst_unet.model'))
return model
###########################################################
# Run model
###########################################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
model = pytorch_unet.UNet().to(device)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model.parameters(), lr=1e-2, momentum = 0.99)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=1000, gamma=0.1)
model = train_model(model, optimizer_ft, exp_lr_scheduler, num_epochs=1000)
###########################################################
# Predict
###########################################################
class ISBI_Dataset_test(Dataset):
def __init__(self, tfms=None):
self.fnames = np.array([f'image_{i}.png' for i in range(1,4)])
self.tfms = tfms
def __len__(self):
return len(self.fnames)
def __getitem__(self, idx):
fname = self.fnames[idx]
img = cv2.imread(os.path.join(DATA,TEST,fname), cv2.IMREAD_GRAYSCALE)
if self.tfms is not None:
augmented = self.tfms(image=img)
img = augmented['image']
img = img/255.0
return img
model.eval() # Set model to evaluate mode
test_dataset = ISBI_Dataset_test(tfms=simulation.get_aug_test())
# Important to keep batch size equalt to one, as each image gets
# split into several tiles
|
fname = self.fnames[idx]
img = cv2.imread(os.path.join(DATA,TRAIN,fname), cv2.IMREAD_GRAYSCALE)
mask = cv2.imread(os.path.join(DATA,MASKS,fname),cv2.IMREAD_GRAYSCALE)
if self.tfms is not None:
augmented = self.tfms(image=img,mask=mask)
img,mask = augmented['image'],augmented['mask']
img = img/255.0
img = np.expand_dims(img, 0)
img = torch.from_numpy(img.astype(np.float32, copy=False))
mask = mask/255.0
mask = simulation.center_crop(mask)
mask = simulation.oned_to_twod(mask)
mask = torch.from_numpy(mask.astype(np.float32, copy=False))
return img, mask
|
identifier_body
|
shared.go
|
URL)
serviceRuntime = runtime.NewServiceRuntime(configStore, dns, hostIP)
apps, err := configStore.ListAssignments(env, pool)
if err != nil {
log.Fatalf("ERROR: Could not retrieve service configs for /%s/%s: %s", env, pool, err)
}
workerChans = make(map[string]chan string)
for _, app := range apps {
appCfg, err := configStore.GetApp(app, env)
if err != nil {
log.Fatalf("ERROR: Could not retrieve service config for /%s/%s: %s", env, pool, err)
}
workerChans[appCfg.Name()] = make(chan string)
}
signalsChan = make(chan os.Signal, 1)
signal.Notify(signalsChan, os.Interrupt, os.Kill, syscall.SIGTERM)
go deregisterHost(signalsChan)
}
func ensureEnv() {
envs, err := configStore.ListEnvs()
if err != nil {
log.Fatalf("ERROR: Could not check envs: %s", err)
}
if strings.TrimSpace(env) == "" {
log.Fatalf("ERROR: Need an env. Use '-env <env>'. Existing envs are: %s.", strings.Join(envs, ","))
}
}
func
|
() {
pools, err := configStore.ListPools(env)
if err != nil {
log.Fatalf("ERROR: Could not check pools: %s", err)
}
if strings.TrimSpace(pool) == "" {
log.Fatalf("ERROR: Need a pool. Use '-pool <pool>'. Existing pools are: %s", strings.Join(pools, ","))
}
}
func pullImageAsync(appCfg config.App, errChan chan error) {
// err logged via pullImage
_, err := pullImage(appCfg)
if err != nil {
errChan <- err
return
}
errChan <- nil
}
func pullImage(appCfg config.App) (*docker.Image, error) {
image, err := serviceRuntime.PullImage(appCfg.Version(), appCfg.VersionID())
if image == nil || err != nil {
log.Errorf("ERROR: Could not pull image %s: %s", appCfg.Version(), err)
return nil, err
}
log.Printf("Pulled %s version %s\n", appCfg.Name(), appCfg.Version())
return image, nil
}
func startService(appCfg config.App, logStatus bool) {
desired, err := commander.Balanced(configStore, hostIP, appCfg.Name(), env, pool)
if err != nil {
log.Errorf("ERROR: Could not determine instance count: %s", err)
return
}
running, err := serviceRuntime.InstanceCount(appCfg.Name(), strconv.FormatInt(appCfg.ID(), 10))
if err != nil {
log.Errorf("ERROR: Could not determine running instance count: %s", err)
return
}
for i := 0; i < desired-running; i++ {
container, err := serviceRuntime.Start(env, pool, appCfg)
if err != nil {
log.Errorf("ERROR: Could not start containers: %s", err)
return
}
log.Printf("Started %s version %s as %s\n", appCfg.Name(), appCfg.Version(), container.ID[0:12])
err = serviceRuntime.StopOldVersion(appCfg, 1)
if err != nil {
log.Errorf("ERROR: Could not stop containers: %s", err)
}
}
running, err = serviceRuntime.InstanceCount(appCfg.Name(), strconv.FormatInt(appCfg.ID(), 10))
if err != nil {
log.Errorf("ERROR: Could not determine running instance count: %s", err)
return
}
for i := 0; i < running-desired; i++ {
err := serviceRuntime.Stop(appCfg)
if err != nil {
log.Errorf("ERROR: Could not stop container: %s", err)
}
}
err = serviceRuntime.StopOldVersion(appCfg, -1)
if err != nil {
log.Errorf("ERROR: Could not stop old containers: %s", err)
}
// check the image version, and log any inconsistencies
inspectImage(appCfg)
}
func heartbeatHost() {
_, err := configStore.CreatePool(pool, env)
if err != nil {
log.Fatalf("ERROR: Unabled to create pool %s: %s", pool, err)
}
defer wg.Done()
for {
configStore.UpdateHost(env, pool, config.HostInfo{
HostIP: hostIP,
})
time.Sleep(45 * time.Second)
}
}
func deregisterHost(signals chan os.Signal) {
<-signals
configStore.DeleteHost(env, pool, config.HostInfo{
HostIP: hostIP,
})
discovery.Unregister(serviceRuntime, configStore, env, pool, hostIP, shuttleAddr)
os.Exit(0)
}
func appAssigned(app string) (bool, error) {
assignments, err := configStore.ListAssignments(env, pool)
if err != nil {
return false, err
}
if !utils.StringInSlice(app, assignments) {
return false, nil
}
return true, nil
}
// inspectImage checks that the running image matches the config.
// We only use this to print warnings, since we likely need to deploy a new
// config version to fix the inconsistency.
func inspectImage(appCfg config.App) {
image, err := serviceRuntime.InspectImage(appCfg.Version())
if err != nil {
log.Println("error inspecting image", appCfg.Version())
return
}
if utils.StripSHA(image.ID) != appCfg.VersionID() {
log.Printf("warning: %s image ID does not match config", appCfg.Name())
}
}
func restartContainers(app string, cmdChan chan string) {
defer wg.Done()
logOnce := true
ticker := time.NewTicker(10 * time.Second)
for {
select {
case cmd := <-cmdChan:
assigned, err := appAssigned(app)
if err != nil {
log.Errorf("ERROR: Error retrieving assignments for %s: %s", app, err)
if !loop {
return
}
continue
}
if !assigned {
continue
}
appCfg, err := configStore.GetApp(app, env)
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", app, err)
if !loop {
return
}
continue
}
if appCfg.Version() == "" {
if !loop {
return
}
continue
}
if cmd == "deploy" {
_, err = pullImage(appCfg)
if err != nil {
log.Errorf("ERROR: Error pulling image for %s: %s", app, err)
if !loop {
return
}
continue
}
startService(appCfg, logOnce)
}
if cmd == "restart" {
err := serviceRuntime.Stop(appCfg)
if err != nil {
log.Errorf("ERROR: Could not stop %s: %s",
appCfg.Version(), err)
if !loop {
return
}
startService(appCfg, logOnce)
continue
}
}
logOnce = false
case <-ticker.C:
appCfg, err := configStore.GetApp(app, env)
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", app, err)
continue
}
assigned, err := appAssigned(app)
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", app, err)
if !loop {
return
}
continue
}
if appCfg == nil || !assigned {
log.Errorf("%s no longer exists. Stopping worker.", app)
serviceRuntime.StopAllMatching(app)
delete(workerChans, app)
return
}
if appCfg.Version() == "" {
continue
}
startService(appCfg, logOnce)
}
if !loop {
return
}
}
}
func monitorService(changedConfigs chan *config.ConfigChange) {
for {
var changedConfig *config.ConfigChange
select {
case changedConfig = <-changedConfigs:
if changedConfig.Error != nil {
log.Errorf("ERROR: Error watching changes: %s", changedConfig.Error)
continue
}
if changedConfig.AppConfig == nil {
continue
}
assigned, err := appAssigned(changedConfig.AppConfig.Name())
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", changedConfig.AppConfig.Name(), err)
if !loop {
return
}
continue
}
if !assigned {
continue
}
ch, ok := workerChans[changedConfig.AppConfig.Name()]
if !ok {
name := changedConfig.AppConfig.Name()
ch := make(chan string)
workerChans[name] = ch
wg.Add(1)
go restartContainers(name, ch)
ch <- "deploy"
log.Printf("Started new worker for %s\n", name)
continue
}
if changedConfig.Restart {
log.Printf("Restarting %s", changed
|
ensurePool
|
identifier_name
|
shared.go
|
)
serviceRuntime = runtime.NewServiceRuntime(configStore, dns, hostIP)
apps, err := configStore.ListAssignments(env, pool)
if err != nil {
log.Fatalf("ERROR: Could not retrieve service configs for /%s/%s: %s", env, pool, err)
}
workerChans = make(map[string]chan string)
for _, app := range apps {
appCfg, err := configStore.GetApp(app, env)
if err != nil {
log.Fatalf("ERROR: Could not retrieve service config for /%s/%s: %s", env, pool, err)
}
workerChans[appCfg.Name()] = make(chan string)
}
signalsChan = make(chan os.Signal, 1)
signal.Notify(signalsChan, os.Interrupt, os.Kill, syscall.SIGTERM)
go deregisterHost(signalsChan)
}
func ensureEnv()
|
func ensurePool() {
pools, err := configStore.ListPools(env)
if err != nil {
log.Fatalf("ERROR: Could not check pools: %s", err)
}
if strings.TrimSpace(pool) == "" {
log.Fatalf("ERROR: Need a pool. Use '-pool <pool>'. Existing pools are: %s", strings.Join(pools, ","))
}
}
func pullImageAsync(appCfg config.App, errChan chan error) {
// err logged via pullImage
_, err := pullImage(appCfg)
if err != nil {
errChan <- err
return
}
errChan <- nil
}
func pullImage(appCfg config.App) (*docker.Image, error) {
image, err := serviceRuntime.PullImage(appCfg.Version(), appCfg.VersionID())
if image == nil || err != nil {
log.Errorf("ERROR: Could not pull image %s: %s", appCfg.Version(), err)
return nil, err
}
log.Printf("Pulled %s version %s\n", appCfg.Name(), appCfg.Version())
return image, nil
}
func startService(appCfg config.App, logStatus bool) {
desired, err := commander.Balanced(configStore, hostIP, appCfg.Name(), env, pool)
if err != nil {
log.Errorf("ERROR: Could not determine instance count: %s", err)
return
}
running, err := serviceRuntime.InstanceCount(appCfg.Name(), strconv.FormatInt(appCfg.ID(), 10))
if err != nil {
log.Errorf("ERROR: Could not determine running instance count: %s", err)
return
}
for i := 0; i < desired-running; i++ {
container, err := serviceRuntime.Start(env, pool, appCfg)
if err != nil {
log.Errorf("ERROR: Could not start containers: %s", err)
return
}
log.Printf("Started %s version %s as %s\n", appCfg.Name(), appCfg.Version(), container.ID[0:12])
err = serviceRuntime.StopOldVersion(appCfg, 1)
if err != nil {
log.Errorf("ERROR: Could not stop containers: %s", err)
}
}
running, err = serviceRuntime.InstanceCount(appCfg.Name(), strconv.FormatInt(appCfg.ID(), 10))
if err != nil {
log.Errorf("ERROR: Could not determine running instance count: %s", err)
return
}
for i := 0; i < running-desired; i++ {
err := serviceRuntime.Stop(appCfg)
if err != nil {
log.Errorf("ERROR: Could not stop container: %s", err)
}
}
err = serviceRuntime.StopOldVersion(appCfg, -1)
if err != nil {
log.Errorf("ERROR: Could not stop old containers: %s", err)
}
// check the image version, and log any inconsistencies
inspectImage(appCfg)
}
func heartbeatHost() {
_, err := configStore.CreatePool(pool, env)
if err != nil {
log.Fatalf("ERROR: Unabled to create pool %s: %s", pool, err)
}
defer wg.Done()
for {
configStore.UpdateHost(env, pool, config.HostInfo{
HostIP: hostIP,
})
time.Sleep(45 * time.Second)
}
}
func deregisterHost(signals chan os.Signal) {
<-signals
configStore.DeleteHost(env, pool, config.HostInfo{
HostIP: hostIP,
})
discovery.Unregister(serviceRuntime, configStore, env, pool, hostIP, shuttleAddr)
os.Exit(0)
}
func appAssigned(app string) (bool, error) {
assignments, err := configStore.ListAssignments(env, pool)
if err != nil {
return false, err
}
if !utils.StringInSlice(app, assignments) {
return false, nil
}
return true, nil
}
// inspectImage checks that the running image matches the config.
// We only use this to print warnings, since we likely need to deploy a new
// config version to fix the inconsistency.
func inspectImage(appCfg config.App) {
image, err := serviceRuntime.InspectImage(appCfg.Version())
if err != nil {
log.Println("error inspecting image", appCfg.Version())
return
}
if utils.StripSHA(image.ID) != appCfg.VersionID() {
log.Printf("warning: %s image ID does not match config", appCfg.Name())
}
}
func restartContainers(app string, cmdChan chan string) {
defer wg.Done()
logOnce := true
ticker := time.NewTicker(10 * time.Second)
for {
select {
case cmd := <-cmdChan:
assigned, err := appAssigned(app)
if err != nil {
log.Errorf("ERROR: Error retrieving assignments for %s: %s", app, err)
if !loop {
return
}
continue
}
if !assigned {
continue
}
appCfg, err := configStore.GetApp(app, env)
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", app, err)
if !loop {
return
}
continue
}
if appCfg.Version() == "" {
if !loop {
return
}
continue
}
if cmd == "deploy" {
_, err = pullImage(appCfg)
if err != nil {
log.Errorf("ERROR: Error pulling image for %s: %s", app, err)
if !loop {
return
}
continue
}
startService(appCfg, logOnce)
}
if cmd == "restart" {
err := serviceRuntime.Stop(appCfg)
if err != nil {
log.Errorf("ERROR: Could not stop %s: %s",
appCfg.Version(), err)
if !loop {
return
}
startService(appCfg, logOnce)
continue
}
}
logOnce = false
case <-ticker.C:
appCfg, err := configStore.GetApp(app, env)
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", app, err)
continue
}
assigned, err := appAssigned(app)
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", app, err)
if !loop {
return
}
continue
}
if appCfg == nil || !assigned {
log.Errorf("%s no longer exists. Stopping worker.", app)
serviceRuntime.StopAllMatching(app)
delete(workerChans, app)
return
}
if appCfg.Version() == "" {
continue
}
startService(appCfg, logOnce)
}
if !loop {
return
}
}
}
func monitorService(changedConfigs chan *config.ConfigChange) {
for {
var changedConfig *config.ConfigChange
select {
case changedConfig = <-changedConfigs:
if changedConfig.Error != nil {
log.Errorf("ERROR: Error watching changes: %s", changedConfig.Error)
continue
}
if changedConfig.AppConfig == nil {
continue
}
assigned, err := appAssigned(changedConfig.AppConfig.Name())
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", changedConfig.AppConfig.Name(), err)
if !loop {
return
}
continue
}
if !assigned {
continue
}
ch, ok := workerChans[changedConfig.AppConfig.Name()]
if !ok {
name := changedConfig.AppConfig.Name()
ch := make(chan string)
workerChans[name] = ch
wg.Add(1)
go restartContainers(name, ch)
ch <- "deploy"
log.Printf("Started new worker for %s\n", name)
continue
}
if changedConfig.Restart {
log.Printf("Restarting %s", changed
|
{
envs, err := configStore.ListEnvs()
if err != nil {
log.Fatalf("ERROR: Could not check envs: %s", err)
}
if strings.TrimSpace(env) == "" {
log.Fatalf("ERROR: Need an env. Use '-env <env>'. Existing envs are: %s.", strings.Join(envs, ","))
}
}
|
identifier_body
|
shared.go
|
; i < desired-running; i++ {
container, err := serviceRuntime.Start(env, pool, appCfg)
if err != nil {
log.Errorf("ERROR: Could not start containers: %s", err)
return
}
log.Printf("Started %s version %s as %s\n", appCfg.Name(), appCfg.Version(), container.ID[0:12])
err = serviceRuntime.StopOldVersion(appCfg, 1)
if err != nil {
log.Errorf("ERROR: Could not stop containers: %s", err)
}
}
running, err = serviceRuntime.InstanceCount(appCfg.Name(), strconv.FormatInt(appCfg.ID(), 10))
if err != nil {
log.Errorf("ERROR: Could not determine running instance count: %s", err)
return
}
for i := 0; i < running-desired; i++ {
err := serviceRuntime.Stop(appCfg)
if err != nil {
log.Errorf("ERROR: Could not stop container: %s", err)
}
}
err = serviceRuntime.StopOldVersion(appCfg, -1)
if err != nil {
log.Errorf("ERROR: Could not stop old containers: %s", err)
}
// check the image version, and log any inconsistencies
inspectImage(appCfg)
}
func heartbeatHost() {
_, err := configStore.CreatePool(pool, env)
if err != nil {
log.Fatalf("ERROR: Unabled to create pool %s: %s", pool, err)
}
defer wg.Done()
for {
configStore.UpdateHost(env, pool, config.HostInfo{
HostIP: hostIP,
})
time.Sleep(45 * time.Second)
}
}
func deregisterHost(signals chan os.Signal) {
<-signals
configStore.DeleteHost(env, pool, config.HostInfo{
HostIP: hostIP,
})
discovery.Unregister(serviceRuntime, configStore, env, pool, hostIP, shuttleAddr)
os.Exit(0)
}
func appAssigned(app string) (bool, error) {
assignments, err := configStore.ListAssignments(env, pool)
if err != nil {
return false, err
}
if !utils.StringInSlice(app, assignments) {
return false, nil
}
return true, nil
}
// inspectImage checks that the running image matches the config.
// We only use this to print warnings, since we likely need to deploy a new
// config version to fix the inconsistency.
func inspectImage(appCfg config.App) {
image, err := serviceRuntime.InspectImage(appCfg.Version())
if err != nil {
log.Println("error inspecting image", appCfg.Version())
return
}
if utils.StripSHA(image.ID) != appCfg.VersionID() {
log.Printf("warning: %s image ID does not match config", appCfg.Name())
}
}
func restartContainers(app string, cmdChan chan string) {
defer wg.Done()
logOnce := true
ticker := time.NewTicker(10 * time.Second)
for {
select {
case cmd := <-cmdChan:
assigned, err := appAssigned(app)
if err != nil {
log.Errorf("ERROR: Error retrieving assignments for %s: %s", app, err)
if !loop {
return
}
continue
}
if !assigned {
continue
}
appCfg, err := configStore.GetApp(app, env)
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", app, err)
if !loop {
return
}
continue
}
if appCfg.Version() == "" {
if !loop {
return
}
continue
}
if cmd == "deploy" {
_, err = pullImage(appCfg)
if err != nil {
log.Errorf("ERROR: Error pulling image for %s: %s", app, err)
if !loop {
return
}
continue
}
startService(appCfg, logOnce)
}
if cmd == "restart" {
err := serviceRuntime.Stop(appCfg)
if err != nil {
log.Errorf("ERROR: Could not stop %s: %s",
appCfg.Version(), err)
if !loop {
return
}
startService(appCfg, logOnce)
continue
}
}
logOnce = false
case <-ticker.C:
appCfg, err := configStore.GetApp(app, env)
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", app, err)
continue
}
assigned, err := appAssigned(app)
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", app, err)
if !loop {
return
}
continue
}
if appCfg == nil || !assigned {
log.Errorf("%s no longer exists. Stopping worker.", app)
serviceRuntime.StopAllMatching(app)
delete(workerChans, app)
return
}
if appCfg.Version() == "" {
continue
}
startService(appCfg, logOnce)
}
if !loop {
return
}
}
}
func monitorService(changedConfigs chan *config.ConfigChange) {
for {
var changedConfig *config.ConfigChange
select {
case changedConfig = <-changedConfigs:
if changedConfig.Error != nil {
log.Errorf("ERROR: Error watching changes: %s", changedConfig.Error)
continue
}
if changedConfig.AppConfig == nil {
continue
}
assigned, err := appAssigned(changedConfig.AppConfig.Name())
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", changedConfig.AppConfig.Name(), err)
if !loop {
return
}
continue
}
if !assigned {
continue
}
ch, ok := workerChans[changedConfig.AppConfig.Name()]
if !ok {
name := changedConfig.AppConfig.Name()
ch := make(chan string)
workerChans[name] = ch
wg.Add(1)
go restartContainers(name, ch)
ch <- "deploy"
log.Printf("Started new worker for %s\n", name)
continue
}
if changedConfig.Restart {
log.Printf("Restarting %s", changedConfig.AppConfig.Name())
ch <- "restart"
} else {
ch <- "deploy"
}
}
}
}
type dumpConfig struct {
Pools []string
Hosts []config.HostInfo
Configs []config.AppDefinition
Regs []config.ServiceRegistration
}
// Dump everything related to a single environment from galaxy to stdout,
// including current runtime config, hosts, IPs etc.
// This isn't really useful other than to sync between config backends, but we
// can probably convert this to a better backup once we stabilize the code some
// more.
func dump(env string) {
envDump := &dumpConfig{
Configs: []config.AppDefinition{},
Regs: []config.ServiceRegistration{},
}
pools, err := configStore.ListPools(env)
if err != nil {
log.Fatal(err)
}
envDump.Pools = pools
for _, pool := range pools {
hosts, err := configStore.ListHosts(env, pool)
if err != nil {
log.Fatal(err)
}
for _, host := range hosts {
host.Pool = pool
envDump.Hosts = append(envDump.Hosts, host)
}
}
apps, err := configStore.ListApps(env)
if err != nil {
log.Fatal(err)
}
for _, app := range apps {
// AppDefinition is intended to be serializable itself
if ad, ok := app.(*config.AppDefinition); ok {
envDump.Configs = append(envDump.Configs, *ad)
continue
}
// otherwise, manually convert the App to an AppDefinition
ad := config.AppDefinition{
AppName: app.Name(),
Image: app.Version(),
ImageID: app.VersionID(),
Environment: app.Env(),
}
for _, pool := range app.RuntimePools() {
ad.SetProcesses(pool, app.GetProcesses(pool))
ad.SetMemory(pool, app.GetMemory(pool))
ad.SetCPUShares(pool, app.GetCPUShares(pool))
}
envDump.Configs = append(envDump.Configs, ad)
}
// The registrations are temporary, but dump them anyway, so we can try and
// convert an environment by keeping the runtime config in sync.
regs, err := configStore.ListRegistrations(env)
if err != nil {
log.Fatal(err)
}
envDump.Regs = append(envDump.Regs, regs...)
js, err := json.MarshalIndent(envDump, "", " ")
if err != nil {
log.Fatal(err)
}
os.Stdout.Write(js)
}
// Restore everything we can from a Galaxy dump on stdin.
// This probably will panic if not using consul
func restore(env string) {
js, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
}
envDump := dumpConfig{}
|
err = json.Unmarshal(js, &envDump)
|
random_line_split
|
|
shared.go
|
)
serviceRuntime = runtime.NewServiceRuntime(configStore, dns, hostIP)
apps, err := configStore.ListAssignments(env, pool)
if err != nil {
log.Fatalf("ERROR: Could not retrieve service configs for /%s/%s: %s", env, pool, err)
}
workerChans = make(map[string]chan string)
for _, app := range apps {
appCfg, err := configStore.GetApp(app, env)
if err != nil {
log.Fatalf("ERROR: Could not retrieve service config for /%s/%s: %s", env, pool, err)
}
workerChans[appCfg.Name()] = make(chan string)
}
signalsChan = make(chan os.Signal, 1)
signal.Notify(signalsChan, os.Interrupt, os.Kill, syscall.SIGTERM)
go deregisterHost(signalsChan)
}
func ensureEnv() {
envs, err := configStore.ListEnvs()
if err != nil {
log.Fatalf("ERROR: Could not check envs: %s", err)
}
if strings.TrimSpace(env) == "" {
log.Fatalf("ERROR: Need an env. Use '-env <env>'. Existing envs are: %s.", strings.Join(envs, ","))
}
}
func ensurePool() {
pools, err := configStore.ListPools(env)
if err != nil {
log.Fatalf("ERROR: Could not check pools: %s", err)
}
if strings.TrimSpace(pool) == "" {
log.Fatalf("ERROR: Need a pool. Use '-pool <pool>'. Existing pools are: %s", strings.Join(pools, ","))
}
}
func pullImageAsync(appCfg config.App, errChan chan error) {
// err logged via pullImage
_, err := pullImage(appCfg)
if err != nil {
errChan <- err
return
}
errChan <- nil
}
func pullImage(appCfg config.App) (*docker.Image, error) {
image, err := serviceRuntime.PullImage(appCfg.Version(), appCfg.VersionID())
if image == nil || err != nil {
log.Errorf("ERROR: Could not pull image %s: %s", appCfg.Version(), err)
return nil, err
}
log.Printf("Pulled %s version %s\n", appCfg.Name(), appCfg.Version())
return image, nil
}
func startService(appCfg config.App, logStatus bool) {
desired, err := commander.Balanced(configStore, hostIP, appCfg.Name(), env, pool)
if err != nil {
log.Errorf("ERROR: Could not determine instance count: %s", err)
return
}
running, err := serviceRuntime.InstanceCount(appCfg.Name(), strconv.FormatInt(appCfg.ID(), 10))
if err != nil {
log.Errorf("ERROR: Could not determine running instance count: %s", err)
return
}
for i := 0; i < desired-running; i++ {
container, err := serviceRuntime.Start(env, pool, appCfg)
if err != nil {
log.Errorf("ERROR: Could not start containers: %s", err)
return
}
log.Printf("Started %s version %s as %s\n", appCfg.Name(), appCfg.Version(), container.ID[0:12])
err = serviceRuntime.StopOldVersion(appCfg, 1)
if err != nil {
log.Errorf("ERROR: Could not stop containers: %s", err)
}
}
running, err = serviceRuntime.InstanceCount(appCfg.Name(), strconv.FormatInt(appCfg.ID(), 10))
if err != nil {
log.Errorf("ERROR: Could not determine running instance count: %s", err)
return
}
for i := 0; i < running-desired; i++ {
err := serviceRuntime.Stop(appCfg)
if err != nil {
log.Errorf("ERROR: Could not stop container: %s", err)
}
}
err = serviceRuntime.StopOldVersion(appCfg, -1)
if err != nil {
log.Errorf("ERROR: Could not stop old containers: %s", err)
}
// check the image version, and log any inconsistencies
inspectImage(appCfg)
}
func heartbeatHost() {
_, err := configStore.CreatePool(pool, env)
if err != nil {
log.Fatalf("ERROR: Unabled to create pool %s: %s", pool, err)
}
defer wg.Done()
for {
configStore.UpdateHost(env, pool, config.HostInfo{
HostIP: hostIP,
})
time.Sleep(45 * time.Second)
}
}
func deregisterHost(signals chan os.Signal) {
<-signals
configStore.DeleteHost(env, pool, config.HostInfo{
HostIP: hostIP,
})
discovery.Unregister(serviceRuntime, configStore, env, pool, hostIP, shuttleAddr)
os.Exit(0)
}
func appAssigned(app string) (bool, error) {
assignments, err := configStore.ListAssignments(env, pool)
if err != nil {
return false, err
}
if !utils.StringInSlice(app, assignments) {
return false, nil
}
return true, nil
}
// inspectImage checks that the running image matches the config.
// We only use this to print warnings, since we likely need to deploy a new
// config version to fix the inconsistency.
func inspectImage(appCfg config.App) {
image, err := serviceRuntime.InspectImage(appCfg.Version())
if err != nil {
log.Println("error inspecting image", appCfg.Version())
return
}
if utils.StripSHA(image.ID) != appCfg.VersionID() {
log.Printf("warning: %s image ID does not match config", appCfg.Name())
}
}
func restartContainers(app string, cmdChan chan string) {
defer wg.Done()
logOnce := true
ticker := time.NewTicker(10 * time.Second)
for {
select {
case cmd := <-cmdChan:
assigned, err := appAssigned(app)
if err != nil {
log.Errorf("ERROR: Error retrieving assignments for %s: %s", app, err)
if !loop {
return
}
continue
}
if !assigned {
continue
}
appCfg, err := configStore.GetApp(app, env)
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", app, err)
if !loop {
return
}
continue
}
if appCfg.Version() == "" {
if !loop {
return
}
continue
}
if cmd == "deploy" {
_, err = pullImage(appCfg)
if err != nil {
log.Errorf("ERROR: Error pulling image for %s: %s", app, err)
if !loop
|
continue
}
startService(appCfg, logOnce)
}
if cmd == "restart" {
err := serviceRuntime.Stop(appCfg)
if err != nil {
log.Errorf("ERROR: Could not stop %s: %s",
appCfg.Version(), err)
if !loop {
return
}
startService(appCfg, logOnce)
continue
}
}
logOnce = false
case <-ticker.C:
appCfg, err := configStore.GetApp(app, env)
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", app, err)
continue
}
assigned, err := appAssigned(app)
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", app, err)
if !loop {
return
}
continue
}
if appCfg == nil || !assigned {
log.Errorf("%s no longer exists. Stopping worker.", app)
serviceRuntime.StopAllMatching(app)
delete(workerChans, app)
return
}
if appCfg.Version() == "" {
continue
}
startService(appCfg, logOnce)
}
if !loop {
return
}
}
}
func monitorService(changedConfigs chan *config.ConfigChange) {
for {
var changedConfig *config.ConfigChange
select {
case changedConfig = <-changedConfigs:
if changedConfig.Error != nil {
log.Errorf("ERROR: Error watching changes: %s", changedConfig.Error)
continue
}
if changedConfig.AppConfig == nil {
continue
}
assigned, err := appAssigned(changedConfig.AppConfig.Name())
if err != nil {
log.Errorf("ERROR: Error retrieving service config for %s: %s", changedConfig.AppConfig.Name(), err)
if !loop {
return
}
continue
}
if !assigned {
continue
}
ch, ok := workerChans[changedConfig.AppConfig.Name()]
if !ok {
name := changedConfig.AppConfig.Name()
ch := make(chan string)
workerChans[name] = ch
wg.Add(1)
go restartContainers(name, ch)
ch <- "deploy"
log.Printf("Started new worker for %s\n", name)
continue
}
if changedConfig.Restart {
log.Printf("Restarting %s", changed
|
{
return
}
|
conditional_block
|
board.rs
|
: usize,
// _creature_rank_metric: usize,
// Fields relevant for time or history
year: f64,
// Fields relevant for temperature
pub climate: Climate,
// Miscelanious
pub selected_creature: SelectedCreature<B>,
}
impl<B: NeuralNet + GenerateRandom> Default for Board<B> {
fn default() -> Self {
let board_size = DEFAULT_BOARD_SIZE;
let noise_step_size = DEFAULT_NOISE_STEP_SIZE;
let creature_minimum = DEFAULT_CREATURE_MINIMUM;
let min_temp = DEFAULT_MIN_TEMP;
let max_temp = DEFAULT_MAX_TEMP;
return Board::new_random(
board_size,
noise_step_size,
creature_minimum,
min_temp,
max_temp,
);
}
}
impl<B: NeuralNet> Board<B> {
pub fn new(board_width: usize, board_height: usize, terrain: Terrain, creature_minimum: usize, soft_bodies_in_positions: SoftBodiesInPositions<B>,
creatures: Vec<HLSoftBody<B>>, creature_id_up_to: usize, year: f64, climate: Climate, selected_creature: SelectedCreature<B>) -> Board<B>{
Board {
board_width,
board_height,
terrain,
creature_minimum,
soft_bodies_in_positions,
creatures,
creature_id_up_to,
year,
climate,
selected_creature,
}
}
}
impl<B: NeuralNet + GenerateRandom> Board<B> {
/// Randomly generates a new `Board`.
pub fn new_random(
board_size: BoardSize,
noise_step_size: f64,
creature_minimum: usize,
min_temp: f64,
max_temp: f64,
) -> Self {
let creatures = Vec::with_capacity(creature_minimum);
// Initialize climate.
let mut climate = Climate::new(min_temp, max_temp);
climate.update(0.0);
let mut board = Board {
board_width: board_size.0,
board_height: board_size.1,
terrain: Terrain::generate_perlin(board_size, noise_step_size),
creature_minimum,
soft_bodies_in_positions: SoftBodiesInPositions::new_allocated(board_size),
creatures,
creature_id_up_to: 0,
year: 0.0,
climate,
selected_creature: SelectedCreature::default(),
};
// Initialize creatures.
board.maintain_creature_minimum();
return board;
}
/// Maintains the creature minimum by adding random creatures until there are at least `self.creature_minimum` creatures.
///
/// # Processing equivalent
/// This function is the equivalent of *Board.pde/maintainCreatureMinimum* with *choosePreexisting* set to false.
fn maintain_creature_minimum(&mut self) {
while self.creatures.len() < self.creature_minimum {
let board_size = self.get_board_size();
let creature = HLSoftBody::from(SoftBody::new_random(board_size, self.year));
// Initialize in `SoftBodiesInPositions` as well.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
// Just to set the prevSBIP variables.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
self.creatures.push(creature);
self.creature_id_up_to += 1;
}
}
}
impl<B: NeuralNet + RecombinationInfinite + GenerateRandom> Board<B> {
pub fn update(&mut self, time_step: f64) {
self.year += time_step;
self.climate.update(self.year);
let temp_change_into_frame =
self.climate.get_temperature() - self.climate.get_growth_rate(self.year - time_step);
let temp_change_out_of_frame =
self.climate.get_growth_rate(self.year + time_step) - self.climate.get_temperature();
if temp_change_into_frame * temp_change_out_of_frame < 0.0 {
// Temperature change flipped direction
self.terrain.update_all(self.year, &self.climate);
}
self.update_creatures(time_step);
// Kill weak creatures.
self.remove_dead_creatures();
// Let creatures reproduce
self.creatures_reproduce();
// Experimental: this was moved from above to always keep the creature minimum.
self.maintain_creature_minimum();
// Move the creatures around on the board
self.move_creatures(time_step);
}
}
impl<B: NeuralNet + RecombinationInfinite> Board<B> {
fn creatures_reproduce(&mut self) {
let mut babies = Vec::new();
// Keep the borrow checker happy
{
let time = self.get_time();
let board_size = self.get_board_size();
let sbip = &mut self.soft_bodies_in_positions;
for c in &mut self.creatures {
let maybe_baby = c.try_reproduce(time, sbip, board_size);
if let Some(baby) = maybe_baby {
babies.push(baby);
}
}
}
babies.into_iter().for_each(|c| self.creatures.push(c));
}
}
impl<B: NeuralNet> Board<B> {
/// Selects the oldest creature still alive.
pub fn select_oldest(&mut self) {
let oldest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_birth_time() < c_old.borrow().get_birth_time() {
&c
} else {
c_old
}
});
self.selected_creature.select(oldest.clone());
}
/// Selects the biggest creature.
pub fn select_biggest(&mut self) {
let biggest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_energy() > c_old.borrow().get_energy() {
&c
} else {
c_old
}
});
self.selected_creature.select(biggest.clone());
}
#[cfg(not(multithreading))]
fn update_brains(&mut self) {
self.creatures
.iter()
.map(|c| c.borrow_mut())
.for_each(|mut c| {
let creature: &mut SoftBody<B> = &mut c;
let env = crate::brain::Environment::new(&self.terrain, &creature.base);
creature.brain.run_with(&env);
});
}
#[cfg(multithreading)]
fn update_brains(&mut self) {
self.creatures
.map(|c| c.borrow_mut())
.par_iter()
.for_each(|c| {
let env = crate::brain::Environment::new(&self.terrain, &c.base);
c.brain.run_with(&env);
});
}
pub fn update_creatures(&mut self, time_step: f64) {
use crate::brain::EnvironmentMut;
let time = self.year;
let board_size = self.get_board_size();
for c_rc in &self.creatures {
// These functions call `borrow_mut()`
c_rc.collide(&self.soft_bodies_in_positions);
let mut c = c_rc.borrow_mut();
c.record_energy();
c.metabolize(time_step, time);
}
self.update_brains();
let use_output = true;
if use_output {
for c_rc in &self.creatures {
let creature: &mut SoftBody<B> = &mut c_rc.borrow_mut();
let mut env = EnvironmentMut::new(
&mut self.terrain,
&mut creature.base,
board_size,
time,
&self.climate,
&self.soft_bodies_in_positions,
c_rc.clone(),
);
creature.brain.use_output(&mut env, time_step);
}
}
}
// #[cfg(multithreading)]
pub fn move_creatures(&mut self, time_step: f64) {
let board_size = self.get_board_size();
for c in &self.creatures {
c.apply_motions(
time_step * OBJECT_TIMESTEPS_PER_YEAR,
board_size,
&self.terrain,
&mut self.soft_bodies_in_positions,
);
}
}
pub fn prepare_for_drawing(&mut self) {
self.terrain.update_all(self.year, &self.climate);
}
/// Checks for all creatures whether they are fit enough to live and kills them off if they're not.
///
/// Utilizes the `should_die` function of `SoftBody`.
fn remove_dead_creatures(&mut self) {
let time = self.get_time();
let board_size = self.get_board_size();
let terrain = &mut self.terrain;
let climate = &self.climate;
let sbip = &mut self.soft_bodies_in_positions;
// TODO: possibly optimise code
let mut i = 0;
while i < self.creatures.len() {
// let creature = &mut self.creatures[i];
if self.creatures[i].borrow().should_die() {
self.creatures[i].return_to_earth(time, board_size, terrain, climate, sbip);
self.selected_creature
.unselect_if_dead(self.creatures[i].clone());
self.creatures.remove(i);
// println!("Dead!");
} else
|
{
i += 1;
}
|
conditional_block
|
|
board.rs
|
so, removes it by setting `self.0` to `None`.
pub fn unselect_if_dead(&mut self, creature: HLSoftBody<B>) {
if let Some(sel_creature) = &self.0 {
// If `creature` isn't the same as `self.selected_creature`.
if *sel_creature != creature {
// Then don't change to `None`.
return;
}
// Else go on
}
self.0 = None;
}
pub fn select(&mut self, creature: HLSoftBody<B>) {
self.0 = Some(creature);
}
pub fn deselect(&mut self) {
self.0 = None;
}
}
pub struct Board<B: NeuralNet = Brain> {
// Fields relevant for the board itself.
board_width: usize,
board_height: usize,
pub terrain: Terrain,
// Fields relevant for the creatures.
creature_minimum: usize,
pub soft_bodies_in_positions: SoftBodiesInPositions<B>,
pub creatures: Vec<HLSoftBody<B>>,
creature_id_up_to: usize,
// _creature_rank_metric: usize,
// Fields relevant for time or history
year: f64,
// Fields relevant for temperature
pub climate: Climate,
// Miscelanious
pub selected_creature: SelectedCreature<B>,
}
impl<B: NeuralNet + GenerateRandom> Default for Board<B> {
fn default() -> Self {
let board_size = DEFAULT_BOARD_SIZE;
let noise_step_size = DEFAULT_NOISE_STEP_SIZE;
let creature_minimum = DEFAULT_CREATURE_MINIMUM;
let min_temp = DEFAULT_MIN_TEMP;
let max_temp = DEFAULT_MAX_TEMP;
return Board::new_random(
board_size,
noise_step_size,
creature_minimum,
min_temp,
max_temp,
);
}
}
impl<B: NeuralNet> Board<B> {
pub fn new(board_width: usize, board_height: usize, terrain: Terrain, creature_minimum: usize, soft_bodies_in_positions: SoftBodiesInPositions<B>,
creatures: Vec<HLSoftBody<B>>, creature_id_up_to: usize, year: f64, climate: Climate, selected_creature: SelectedCreature<B>) -> Board<B>{
Board {
board_width,
board_height,
terrain,
creature_minimum,
soft_bodies_in_positions,
creatures,
creature_id_up_to,
year,
climate,
selected_creature,
}
}
}
impl<B: NeuralNet + GenerateRandom> Board<B> {
/// Randomly generates a new `Board`.
pub fn new_random(
board_size: BoardSize,
noise_step_size: f64,
creature_minimum: usize,
min_temp: f64,
max_temp: f64,
) -> Self {
let creatures = Vec::with_capacity(creature_minimum);
// Initialize climate.
let mut climate = Climate::new(min_temp, max_temp);
climate.update(0.0);
let mut board = Board {
board_width: board_size.0,
board_height: board_size.1,
terrain: Terrain::generate_perlin(board_size, noise_step_size),
creature_minimum,
soft_bodies_in_positions: SoftBodiesInPositions::new_allocated(board_size),
creatures,
creature_id_up_to: 0,
year: 0.0,
climate,
selected_creature: SelectedCreature::default(),
};
// Initialize creatures.
|
board.maintain_creature_minimum();
return board;
}
/// Maintains the creature minimum by adding random creatures until there are at least `self.creature_minimum` creatures.
///
/// # Processing equivalent
/// This function is the equivalent of *Board.pde/maintainCreatureMinimum* with *choosePreexisting* set to false.
fn maintain_creature_minimum(&mut self) {
while self.creatures.len() < self.creature_minimum {
let board_size = self.get_board_size();
let creature = HLSoftBody::from(SoftBody::new_random(board_size, self.year));
// Initialize in `SoftBodiesInPositions` as well.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
// Just to set the prevSBIP variables.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
self.creatures.push(creature);
self.creature_id_up_to += 1;
}
}
}
impl<B: NeuralNet + RecombinationInfinite + GenerateRandom> Board<B> {
pub fn update(&mut self, time_step: f64) {
self.year += time_step;
self.climate.update(self.year);
let temp_change_into_frame =
self.climate.get_temperature() - self.climate.get_growth_rate(self.year - time_step);
let temp_change_out_of_frame =
self.climate.get_growth_rate(self.year + time_step) - self.climate.get_temperature();
if temp_change_into_frame * temp_change_out_of_frame < 0.0 {
// Temperature change flipped direction
self.terrain.update_all(self.year, &self.climate);
}
self.update_creatures(time_step);
// Kill weak creatures.
self.remove_dead_creatures();
// Let creatures reproduce
self.creatures_reproduce();
// Experimental: this was moved from above to always keep the creature minimum.
self.maintain_creature_minimum();
// Move the creatures around on the board
self.move_creatures(time_step);
}
}
impl<B: NeuralNet + RecombinationInfinite> Board<B> {
fn creatures_reproduce(&mut self) {
let mut babies = Vec::new();
// Keep the borrow checker happy
{
let time = self.get_time();
let board_size = self.get_board_size();
let sbip = &mut self.soft_bodies_in_positions;
for c in &mut self.creatures {
let maybe_baby = c.try_reproduce(time, sbip, board_size);
if let Some(baby) = maybe_baby {
babies.push(baby);
}
}
}
babies.into_iter().for_each(|c| self.creatures.push(c));
}
}
impl<B: NeuralNet> Board<B> {
/// Selects the oldest creature still alive.
pub fn select_oldest(&mut self) {
let oldest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_birth_time() < c_old.borrow().get_birth_time() {
&c
} else {
c_old
}
});
self.selected_creature.select(oldest.clone());
}
/// Selects the biggest creature.
pub fn select_biggest(&mut self) {
let biggest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_energy() > c_old.borrow().get_energy() {
&c
} else {
c_old
}
});
self.selected_creature.select(biggest.clone());
}
#[cfg(not(multithreading))]
fn update_brains(&mut self) {
self.creatures
.iter()
.map(|c| c.borrow_mut())
.for_each(|mut c| {
let creature: &mut SoftBody<B> = &mut c;
let env = crate::brain::Environment::new(&self.terrain, &creature.base);
creature.brain.run_with(&env);
});
}
#[cfg(multithreading)]
fn update_brains(&mut self) {
self.creatures
.map(|c| c.borrow_mut())
.par_iter()
.for_each(|c| {
let env = crate::brain::Environment::new(&self.terrain, &c.base);
c.brain.run_with(&env);
});
}
pub fn update_creatures(&mut self, time_step: f64) {
use crate::brain::EnvironmentMut;
let time = self.year;
let board_size = self.get_board_size();
for c_rc in &self.creatures {
// These functions call `borrow_mut()`
c_rc.collide(&self.soft_bodies_in_positions);
let mut c = c_rc.borrow_mut();
c.record_energy();
c.metabolize(time_step, time);
}
self.update_brains();
let use_output = true;
if use_output {
for c_rc in &self.creatures {
let creature: &mut SoftBody<B> = &mut c_rc.borrow_mut();
let mut env = EnvironmentMut::new(
&mut self.terrain,
&mut creature.base,
board_size,
time,
&self.climate,
&self.soft_bodies_in_positions,
c_rc.clone(),
);
creature.brain.use_output(&mut env, time_step);
}
}
}
// #[cfg(multithreading)]
pub fn move_creatures(&mut self, time_step: f64) {
let board_size = self.get_board_size();
for c in &self.creatures {
c.apply_motions(
time_step * OBJECT_TIMESTEPS_PER_YEAR,
board_size,
&self.terrain,
&mut self.soft_bodies_in_positions,
);
}
}
pub fn prepare_for_drawing(&mut self) {
self.terrain.update_all(self.year
|
random_line_split
|
|
board.rs
|
ATURE_MINIMUM;
let min_temp = DEFAULT_MIN_TEMP;
let max_temp = DEFAULT_MAX_TEMP;
return Board::new_random(
board_size,
noise_step_size,
creature_minimum,
min_temp,
max_temp,
);
}
}
impl<B: NeuralNet> Board<B> {
pub fn new(board_width: usize, board_height: usize, terrain: Terrain, creature_minimum: usize, soft_bodies_in_positions: SoftBodiesInPositions<B>,
creatures: Vec<HLSoftBody<B>>, creature_id_up_to: usize, year: f64, climate: Climate, selected_creature: SelectedCreature<B>) -> Board<B>{
Board {
board_width,
board_height,
terrain,
creature_minimum,
soft_bodies_in_positions,
creatures,
creature_id_up_to,
year,
climate,
selected_creature,
}
}
}
impl<B: NeuralNet + GenerateRandom> Board<B> {
/// Randomly generates a new `Board`.
pub fn new_random(
board_size: BoardSize,
noise_step_size: f64,
creature_minimum: usize,
min_temp: f64,
max_temp: f64,
) -> Self {
let creatures = Vec::with_capacity(creature_minimum);
// Initialize climate.
let mut climate = Climate::new(min_temp, max_temp);
climate.update(0.0);
let mut board = Board {
board_width: board_size.0,
board_height: board_size.1,
terrain: Terrain::generate_perlin(board_size, noise_step_size),
creature_minimum,
soft_bodies_in_positions: SoftBodiesInPositions::new_allocated(board_size),
creatures,
creature_id_up_to: 0,
year: 0.0,
climate,
selected_creature: SelectedCreature::default(),
};
// Initialize creatures.
board.maintain_creature_minimum();
return board;
}
/// Maintains the creature minimum by adding random creatures until there are at least `self.creature_minimum` creatures.
///
/// # Processing equivalent
/// This function is the equivalent of *Board.pde/maintainCreatureMinimum* with *choosePreexisting* set to false.
fn maintain_creature_minimum(&mut self) {
while self.creatures.len() < self.creature_minimum {
let board_size = self.get_board_size();
let creature = HLSoftBody::from(SoftBody::new_random(board_size, self.year));
// Initialize in `SoftBodiesInPositions` as well.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
// Just to set the prevSBIP variables.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
self.creatures.push(creature);
self.creature_id_up_to += 1;
}
}
}
impl<B: NeuralNet + RecombinationInfinite + GenerateRandom> Board<B> {
pub fn update(&mut self, time_step: f64) {
self.year += time_step;
self.climate.update(self.year);
let temp_change_into_frame =
self.climate.get_temperature() - self.climate.get_growth_rate(self.year - time_step);
let temp_change_out_of_frame =
self.climate.get_growth_rate(self.year + time_step) - self.climate.get_temperature();
if temp_change_into_frame * temp_change_out_of_frame < 0.0 {
// Temperature change flipped direction
self.terrain.update_all(self.year, &self.climate);
}
self.update_creatures(time_step);
// Kill weak creatures.
self.remove_dead_creatures();
// Let creatures reproduce
self.creatures_reproduce();
// Experimental: this was moved from above to always keep the creature minimum.
self.maintain_creature_minimum();
// Move the creatures around on the board
self.move_creatures(time_step);
}
}
impl<B: NeuralNet + RecombinationInfinite> Board<B> {
fn creatures_reproduce(&mut self) {
let mut babies = Vec::new();
// Keep the borrow checker happy
{
let time = self.get_time();
let board_size = self.get_board_size();
let sbip = &mut self.soft_bodies_in_positions;
for c in &mut self.creatures {
let maybe_baby = c.try_reproduce(time, sbip, board_size);
if let Some(baby) = maybe_baby {
babies.push(baby);
}
}
}
babies.into_iter().for_each(|c| self.creatures.push(c));
}
}
impl<B: NeuralNet> Board<B> {
/// Selects the oldest creature still alive.
pub fn select_oldest(&mut self) {
let oldest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_birth_time() < c_old.borrow().get_birth_time() {
&c
} else {
c_old
}
});
self.selected_creature.select(oldest.clone());
}
/// Selects the biggest creature.
pub fn select_biggest(&mut self) {
let biggest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_energy() > c_old.borrow().get_energy() {
&c
} else {
c_old
}
});
self.selected_creature.select(biggest.clone());
}
#[cfg(not(multithreading))]
fn update_brains(&mut self) {
self.creatures
.iter()
.map(|c| c.borrow_mut())
.for_each(|mut c| {
let creature: &mut SoftBody<B> = &mut c;
let env = crate::brain::Environment::new(&self.terrain, &creature.base);
creature.brain.run_with(&env);
});
}
#[cfg(multithreading)]
fn update_brains(&mut self) {
self.creatures
.map(|c| c.borrow_mut())
.par_iter()
.for_each(|c| {
let env = crate::brain::Environment::new(&self.terrain, &c.base);
c.brain.run_with(&env);
});
}
pub fn update_creatures(&mut self, time_step: f64) {
use crate::brain::EnvironmentMut;
let time = self.year;
let board_size = self.get_board_size();
for c_rc in &self.creatures {
// These functions call `borrow_mut()`
c_rc.collide(&self.soft_bodies_in_positions);
let mut c = c_rc.borrow_mut();
c.record_energy();
c.metabolize(time_step, time);
}
self.update_brains();
let use_output = true;
if use_output {
for c_rc in &self.creatures {
let creature: &mut SoftBody<B> = &mut c_rc.borrow_mut();
let mut env = EnvironmentMut::new(
&mut self.terrain,
&mut creature.base,
board_size,
time,
&self.climate,
&self.soft_bodies_in_positions,
c_rc.clone(),
);
creature.brain.use_output(&mut env, time_step);
}
}
}
// #[cfg(multithreading)]
pub fn move_creatures(&mut self, time_step: f64) {
let board_size = self.get_board_size();
for c in &self.creatures {
c.apply_motions(
time_step * OBJECT_TIMESTEPS_PER_YEAR,
board_size,
&self.terrain,
&mut self.soft_bodies_in_positions,
);
}
}
pub fn prepare_for_drawing(&mut self) {
self.terrain.update_all(self.year, &self.climate);
}
/// Checks for all creatures whether they are fit enough to live and kills them off if they're not.
///
/// Utilizes the `should_die` function of `SoftBody`.
fn remove_dead_creatures(&mut self) {
let time = self.get_time();
let board_size = self.get_board_size();
let terrain = &mut self.terrain;
let climate = &self.climate;
let sbip = &mut self.soft_bodies_in_positions;
// TODO: possibly optimise code
let mut i = 0;
while i < self.creatures.len() {
// let creature = &mut self.creatures[i];
if self.creatures[i].borrow().should_die() {
self.creatures[i].return_to_earth(time, board_size, terrain, climate, sbip);
self.selected_creature
.unselect_if_dead(self.creatures[i].clone());
self.creatures.remove(i);
// println!("Dead!");
} else {
i += 1;
}
}
}
/// Performs the same function on `self.climate`, filling in `self.year`.
pub fn get_growth_since(&self, last_updated: f64) -> f64 {
return self
.climate
.get_growth_over_time_range(self.year, last_updated);
}
/// Returns the current growth rate (temperature) based on the season.
///
/// Performs the same function on `self.climate`, filling in `self.year`.
pub fn
|
get_current_growth_rate
|
identifier_name
|
|
board.rs
|
_minimum: usize,
min_temp: f64,
max_temp: f64,
) -> Self {
let creatures = Vec::with_capacity(creature_minimum);
// Initialize climate.
let mut climate = Climate::new(min_temp, max_temp);
climate.update(0.0);
let mut board = Board {
board_width: board_size.0,
board_height: board_size.1,
terrain: Terrain::generate_perlin(board_size, noise_step_size),
creature_minimum,
soft_bodies_in_positions: SoftBodiesInPositions::new_allocated(board_size),
creatures,
creature_id_up_to: 0,
year: 0.0,
climate,
selected_creature: SelectedCreature::default(),
};
// Initialize creatures.
board.maintain_creature_minimum();
return board;
}
/// Maintains the creature minimum by adding random creatures until there are at least `self.creature_minimum` creatures.
///
/// # Processing equivalent
/// This function is the equivalent of *Board.pde/maintainCreatureMinimum* with *choosePreexisting* set to false.
fn maintain_creature_minimum(&mut self) {
while self.creatures.len() < self.creature_minimum {
let board_size = self.get_board_size();
let creature = HLSoftBody::from(SoftBody::new_random(board_size, self.year));
// Initialize in `SoftBodiesInPositions` as well.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
// Just to set the prevSBIP variables.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
self.creatures.push(creature);
self.creature_id_up_to += 1;
}
}
}
impl<B: NeuralNet + RecombinationInfinite + GenerateRandom> Board<B> {
pub fn update(&mut self, time_step: f64) {
self.year += time_step;
self.climate.update(self.year);
let temp_change_into_frame =
self.climate.get_temperature() - self.climate.get_growth_rate(self.year - time_step);
let temp_change_out_of_frame =
self.climate.get_growth_rate(self.year + time_step) - self.climate.get_temperature();
if temp_change_into_frame * temp_change_out_of_frame < 0.0 {
// Temperature change flipped direction
self.terrain.update_all(self.year, &self.climate);
}
self.update_creatures(time_step);
// Kill weak creatures.
self.remove_dead_creatures();
// Let creatures reproduce
self.creatures_reproduce();
// Experimental: this was moved from above to always keep the creature minimum.
self.maintain_creature_minimum();
// Move the creatures around on the board
self.move_creatures(time_step);
}
}
impl<B: NeuralNet + RecombinationInfinite> Board<B> {
fn creatures_reproduce(&mut self) {
let mut babies = Vec::new();
// Keep the borrow checker happy
{
let time = self.get_time();
let board_size = self.get_board_size();
let sbip = &mut self.soft_bodies_in_positions;
for c in &mut self.creatures {
let maybe_baby = c.try_reproduce(time, sbip, board_size);
if let Some(baby) = maybe_baby {
babies.push(baby);
}
}
}
babies.into_iter().for_each(|c| self.creatures.push(c));
}
}
impl<B: NeuralNet> Board<B> {
/// Selects the oldest creature still alive.
pub fn select_oldest(&mut self) {
let oldest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_birth_time() < c_old.borrow().get_birth_time() {
&c
} else {
c_old
}
});
self.selected_creature.select(oldest.clone());
}
/// Selects the biggest creature.
pub fn select_biggest(&mut self) {
let biggest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_energy() > c_old.borrow().get_energy() {
&c
} else {
c_old
}
});
self.selected_creature.select(biggest.clone());
}
#[cfg(not(multithreading))]
fn update_brains(&mut self) {
self.creatures
.iter()
.map(|c| c.borrow_mut())
.for_each(|mut c| {
let creature: &mut SoftBody<B> = &mut c;
let env = crate::brain::Environment::new(&self.terrain, &creature.base);
creature.brain.run_with(&env);
});
}
#[cfg(multithreading)]
fn update_brains(&mut self) {
self.creatures
.map(|c| c.borrow_mut())
.par_iter()
.for_each(|c| {
let env = crate::brain::Environment::new(&self.terrain, &c.base);
c.brain.run_with(&env);
});
}
pub fn update_creatures(&mut self, time_step: f64) {
use crate::brain::EnvironmentMut;
let time = self.year;
let board_size = self.get_board_size();
for c_rc in &self.creatures {
// These functions call `borrow_mut()`
c_rc.collide(&self.soft_bodies_in_positions);
let mut c = c_rc.borrow_mut();
c.record_energy();
c.metabolize(time_step, time);
}
self.update_brains();
let use_output = true;
if use_output {
for c_rc in &self.creatures {
let creature: &mut SoftBody<B> = &mut c_rc.borrow_mut();
let mut env = EnvironmentMut::new(
&mut self.terrain,
&mut creature.base,
board_size,
time,
&self.climate,
&self.soft_bodies_in_positions,
c_rc.clone(),
);
creature.brain.use_output(&mut env, time_step);
}
}
}
// #[cfg(multithreading)]
pub fn move_creatures(&mut self, time_step: f64) {
let board_size = self.get_board_size();
for c in &self.creatures {
c.apply_motions(
time_step * OBJECT_TIMESTEPS_PER_YEAR,
board_size,
&self.terrain,
&mut self.soft_bodies_in_positions,
);
}
}
pub fn prepare_for_drawing(&mut self) {
self.terrain.update_all(self.year, &self.climate);
}
/// Checks for all creatures whether they are fit enough to live and kills them off if they're not.
///
/// Utilizes the `should_die` function of `SoftBody`.
fn remove_dead_creatures(&mut self) {
let time = self.get_time();
let board_size = self.get_board_size();
let terrain = &mut self.terrain;
let climate = &self.climate;
let sbip = &mut self.soft_bodies_in_positions;
// TODO: possibly optimise code
let mut i = 0;
while i < self.creatures.len() {
// let creature = &mut self.creatures[i];
if self.creatures[i].borrow().should_die() {
self.creatures[i].return_to_earth(time, board_size, terrain, climate, sbip);
self.selected_creature
.unselect_if_dead(self.creatures[i].clone());
self.creatures.remove(i);
// println!("Dead!");
} else {
i += 1;
}
}
}
/// Performs the same function on `self.climate`, filling in `self.year`.
pub fn get_growth_since(&self, last_updated: f64) -> f64 {
return self
.climate
.get_growth_over_time_range(self.year, last_updated);
}
/// Returns the current growth rate (temperature) based on the season.
///
/// Performs the same function on `self.climate`, filling in `self.year`.
pub fn get_current_growth_rate(&self) -> f64 {
self.climate.get_growth_rate(self.year)
}
/// Returns the current time, i.e. `self.year`.
pub fn get_time(&self) -> f64 {
return self.year;
}
/// Returns a tuple with the width and height of this `Board`.
///
/// Equivalent to `(board.get_board_width(), board.get_board_height())`.
pub fn get_board_size(&self) -> (usize, usize) {
return (self.board_width, self.board_height);
}
/// Returns the width of the board.
pub fn get_board_width(&self) -> usize {
return self.board_width;
}
/// Returns the height of the board.
pub fn get_board_height(&self) -> usize {
return self.board_height;
}
/// Returns the minimum amount of creatures that should be on the `Board`
///
/// When the population drops below this `maintain_creature_minimum()` spawns new creatures to fill the gap.
pub fn get_creature_minimum(&self) -> usize
|
{
self.creature_minimum
}
|
identifier_body
|
|
stream.rs
|
_run
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # #[cfg(unix)] {
/// use interprocess::os::unix::udsocket::UdStream;
/// use std::io::prelude::*;
///
/// let mut conn = UdStream::connect("/tmp/example1.sock")?;
/// conn.write_all(b"Hello from client!")?;
/// let mut string_buffer = String::new();
/// conn.read_to_string(&mut string_buffer)?;
/// println!("Server answered: {}", string_buffer);
/// # }
/// # Ok(()) }
/// ```
pub struct UdStream {
fd: FdOps,
}
impl UdStream {
/// Connects to a Unix domain socket server at the specified path.
///
/// See [`ToUdSocketPath`] for an example of using various string types to specify socket paths.
///
/// # System calls
/// - `socket`
/// - `connect`
pub fn connect<'a>(path: impl ToUdSocketPath<'a>) -> io::Result<Self> {
Self::_connect(path.to_socket_path()?)
}
fn _connect(path: UdSocketPath<'_>) -> io::Result<Self>
|
if !success {
unsafe { return Err(handle_fd_error(socket)) };
}
unsafe { enable_passcred(socket).map_err(close_by_error(socket))? };
Ok(unsafe { Self::from_raw_fd(socket) })
}
/// Receives bytes from the socket stream.
///
/// # System calls
/// - `read`
pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.fd.read(buf)
}
/// Receives bytes from the socket stream, making use of [scatter input] for the main data.
///
/// # System calls
/// - `readv`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn recv_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.fd.read_vectored(bufs)
}
/// Receives both bytes and ancillary data from the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
pub fn recv_ancillary<'a: 'b, 'b>(
&self,
buf: &mut [u8],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.recv_ancillary_vectored(&mut [IoSliceMut::new(buf)], abuf)
}
/// Receives bytes and ancillary data from the socket stream, making use of [scatter input] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn recv_ancillary_vectored<'a: 'b, 'b>(
&self,
bufs: &mut [IoSliceMut<'_>],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let mut hdr = mk_msghdr_r(bufs, abuf.as_mut())?;
let (success, bytes_read) = unsafe {
let result = libc::recvmsg(self.as_raw_fd(), &mut hdr as *mut _, 0);
(result != -1, result as usize)
};
if success {
Ok((bytes_read, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Sends bytes into the socket stream.
///
/// # System calls
/// - `write`
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.fd.write(buf)
}
/// Sends bytes into the socket stream, making use of [gather output] for the main data.
///
/// # System calls
/// - `senv`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn send_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.fd.write_vectored(bufs)
}
/// Sends bytes and ancillary data into the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
pub fn send_ancillary<'a>(
&self,
buf: &[u8],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.send_ancillary_vectored(&[IoSlice::new(buf)], ancillary_data)
}
/// Sends bytes and ancillary data into the socket stream, making use of [gather output] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn send_ancillary_vectored<'a>(
&self,
bufs: &[IoSlice<'_>],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let abuf = ancillary_data
.into_iter()
.collect::<EncodedAncillaryData<'_>>();
let hdr = mk_msghdr_w(bufs, abuf.as_ref())?;
let (success, bytes_written) = unsafe {
let result = libc::sendmsg(self.as_raw_fd(), &hdr as *const _, 0);
(result != -1, result as usize)
};
if success {
Ok((bytes_written, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Shuts down the read, write, or both halves of the stream. See [`Shutdown`].
///
/// Attempting to call this method with the same `how` argument multiple times may return `Ok(())` every time or it may return an error the second time it is called, depending on the platform. You must either avoid using the same value twice or ignore the error entirely.
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
unsafe { raw_shutdown(self.as_raw_fd(), how) }
}
/// Enables or disables the nonblocking mode for the stream. By default, it is disabled.
///
/// In nonblocking mode, calls to the `recvโฆ` methods and the `Read` trait methods will never wait for at least one byte of data to become available; calls to `sendโฆ` methods and the `Write` trait methods will never wait for the other side to remove enough bytes from the buffer for the write operation to be performed. Those operations will instead return a [`WouldBlock`] error immediately, allowing the thread to perform other useful operations in the meantime.
///
/// [`accept`]: #method.accept " "
/// [`incoming`]: #method.incoming " "
/// [`WouldBlock`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.WouldBlock " "
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
unsafe { raw_set_nonblocking(self.fd.0, nonblocking) }
}
/// Checks whether the stream is currently in nonblocking mode or not.
pub fn is_nonblocking(&self) -> io::Result<bool> {
unsafe { raw_get_nonblocking(self.fd.0) }
}
/// Fetches the credentials of the other end of the connection without using ancillary data. The returned structure contains the process identifier, user identifier and group identifier of the peer.
#[cfg(any(doc, uds_peercred))]
#[cfg_attr( // uds_peercred template
feature = "doc_cfg",
doc
|
{
let addr = path.try_to::<sockaddr_un>()?;
let socket = {
let (success, fd) = unsafe {
let result = libc::socket(AF_UNIX, SOCK_STREAM, 0);
(result != -1, result)
};
if success {
fd
} else {
return Err(io::Error::last_os_error());
}
};
let success = unsafe {
libc::connect(
socket,
&addr as *const _ as *const _,
size_of::<sockaddr_un>() as u32,
)
} != 1;
|
identifier_body
|
stream.rs
|
_run
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # #[cfg(unix)] {
/// use interprocess::os::unix::udsocket::UdStream;
/// use std::io::prelude::*;
///
/// let mut conn = UdStream::connect("/tmp/example1.sock")?;
/// conn.write_all(b"Hello from client!")?;
/// let mut string_buffer = String::new();
/// conn.read_to_string(&mut string_buffer)?;
/// println!("Server answered: {}", string_buffer);
/// # }
/// # Ok(()) }
/// ```
pub struct UdStream {
fd: FdOps,
}
impl UdStream {
/// Connects to a Unix domain socket server at the specified path.
///
/// See [`ToUdSocketPath`] for an example of using various string types to specify socket paths.
///
/// # System calls
/// - `socket`
/// - `connect`
pub fn connect<'a>(path: impl ToUdSocketPath<'a>) -> io::Result<Self> {
Self::_connect(path.to_socket_path()?)
}
fn _connect(path: UdSocketPath<'_>) -> io::Result<Self> {
let addr = path.try_to::<sockaddr_un>()?;
let socket = {
let (success, fd) = unsafe {
let result = libc::socket(AF_UNIX, SOCK_STREAM, 0);
(result != -1, result)
};
if success {
fd
} else {
return Err(io::Error::last_os_error());
}
};
let success = unsafe {
libc::connect(
socket,
&addr as *const _ as *const _,
size_of::<sockaddr_un>() as u32,
)
} != 1;
if !success {
unsafe { return Err(handle_fd_error(socket)) };
}
unsafe { enable_passcred(socket).map_err(close_by_error(socket))? };
Ok(unsafe { Self::from_raw_fd(socket) })
}
/// Receives bytes from the socket stream.
///
/// # System calls
/// - `read`
pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.fd.read(buf)
}
/// Receives bytes from the socket stream, making use of [scatter input] for the main data.
///
/// # System calls
/// - `readv`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn
|
(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.fd.read_vectored(bufs)
}
/// Receives both bytes and ancillary data from the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
pub fn recv_ancillary<'a: 'b, 'b>(
&self,
buf: &mut [u8],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.recv_ancillary_vectored(&mut [IoSliceMut::new(buf)], abuf)
}
/// Receives bytes and ancillary data from the socket stream, making use of [scatter input] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn recv_ancillary_vectored<'a: 'b, 'b>(
&self,
bufs: &mut [IoSliceMut<'_>],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let mut hdr = mk_msghdr_r(bufs, abuf.as_mut())?;
let (success, bytes_read) = unsafe {
let result = libc::recvmsg(self.as_raw_fd(), &mut hdr as *mut _, 0);
(result != -1, result as usize)
};
if success {
Ok((bytes_read, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Sends bytes into the socket stream.
///
/// # System calls
/// - `write`
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.fd.write(buf)
}
/// Sends bytes into the socket stream, making use of [gather output] for the main data.
///
/// # System calls
/// - `senv`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn send_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.fd.write_vectored(bufs)
}
/// Sends bytes and ancillary data into the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
pub fn send_ancillary<'a>(
&self,
buf: &[u8],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.send_ancillary_vectored(&[IoSlice::new(buf)], ancillary_data)
}
/// Sends bytes and ancillary data into the socket stream, making use of [gather output] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn send_ancillary_vectored<'a>(
&self,
bufs: &[IoSlice<'_>],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let abuf = ancillary_data
.into_iter()
.collect::<EncodedAncillaryData<'_>>();
let hdr = mk_msghdr_w(bufs, abuf.as_ref())?;
let (success, bytes_written) = unsafe {
let result = libc::sendmsg(self.as_raw_fd(), &hdr as *const _, 0);
(result != -1, result as usize)
};
if success {
Ok((bytes_written, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Shuts down the read, write, or both halves of the stream. See [`Shutdown`].
///
/// Attempting to call this method with the same `how` argument multiple times may return `Ok(())` every time or it may return an error the second time it is called, depending on the platform. You must either avoid using the same value twice or ignore the error entirely.
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
unsafe { raw_shutdown(self.as_raw_fd(), how) }
}
/// Enables or disables the nonblocking mode for the stream. By default, it is disabled.
///
/// In nonblocking mode, calls to the `recvโฆ` methods and the `Read` trait methods will never wait for at least one byte of data to become available; calls to `sendโฆ` methods and the `Write` trait methods will never wait for the other side to remove enough bytes from the buffer for the write operation to be performed. Those operations will instead return a [`WouldBlock`] error immediately, allowing the thread to perform other useful operations in the meantime.
///
/// [`accept`]: #method.accept " "
/// [`incoming`]: #method.incoming " "
/// [`WouldBlock`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.WouldBlock " "
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
unsafe { raw_set_nonblocking(self.fd.0, nonblocking) }
}
/// Checks whether the stream is currently in nonblocking mode or not.
pub fn is_nonblocking(&self) -> io::Result<bool> {
unsafe { raw_get_nonblocking(self.fd.0) }
}
/// Fetches the credentials of the other end of the connection without using ancillary data. The returned structure contains the process identifier, user identifier and group identifier of the peer.
#[cfg(any(doc, uds_peercred))]
#[cfg_attr( // uds_peercred template
feature = "doc_cfg",
doc
|
recv_vectored
|
identifier_name
|
stream.rs
|
_run
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # #[cfg(unix)] {
/// use interprocess::os::unix::udsocket::UdStream;
/// use std::io::prelude::*;
///
/// let mut conn = UdStream::connect("/tmp/example1.sock")?;
/// conn.write_all(b"Hello from client!")?;
/// let mut string_buffer = String::new();
/// conn.read_to_string(&mut string_buffer)?;
/// println!("Server answered: {}", string_buffer);
/// # }
/// # Ok(()) }
/// ```
pub struct UdStream {
fd: FdOps,
}
impl UdStream {
/// Connects to a Unix domain socket server at the specified path.
///
/// See [`ToUdSocketPath`] for an example of using various string types to specify socket paths.
///
/// # System calls
/// - `socket`
/// - `connect`
pub fn connect<'a>(path: impl ToUdSocketPath<'a>) -> io::Result<Self> {
Self::_connect(path.to_socket_path()?)
}
fn _connect(path: UdSocketPath<'_>) -> io::Result<Self> {
let addr = path.try_to::<sockaddr_un>()?;
let socket = {
let (success, fd) = unsafe {
let result = libc::socket(AF_UNIX, SOCK_STREAM, 0);
(result != -1, result)
};
if success {
fd
} else
|
};
let success = unsafe {
libc::connect(
socket,
&addr as *const _ as *const _,
size_of::<sockaddr_un>() as u32,
)
} != 1;
if !success {
unsafe { return Err(handle_fd_error(socket)) };
}
unsafe { enable_passcred(socket).map_err(close_by_error(socket))? };
Ok(unsafe { Self::from_raw_fd(socket) })
}
/// Receives bytes from the socket stream.
///
/// # System calls
/// - `read`
pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.fd.read(buf)
}
/// Receives bytes from the socket stream, making use of [scatter input] for the main data.
///
/// # System calls
/// - `readv`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn recv_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.fd.read_vectored(bufs)
}
/// Receives both bytes and ancillary data from the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
pub fn recv_ancillary<'a: 'b, 'b>(
&self,
buf: &mut [u8],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.recv_ancillary_vectored(&mut [IoSliceMut::new(buf)], abuf)
}
/// Receives bytes and ancillary data from the socket stream, making use of [scatter input] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn recv_ancillary_vectored<'a: 'b, 'b>(
&self,
bufs: &mut [IoSliceMut<'_>],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let mut hdr = mk_msghdr_r(bufs, abuf.as_mut())?;
let (success, bytes_read) = unsafe {
let result = libc::recvmsg(self.as_raw_fd(), &mut hdr as *mut _, 0);
(result != -1, result as usize)
};
if success {
Ok((bytes_read, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Sends bytes into the socket stream.
///
/// # System calls
/// - `write`
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.fd.write(buf)
}
/// Sends bytes into the socket stream, making use of [gather output] for the main data.
///
/// # System calls
/// - `senv`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn send_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.fd.write_vectored(bufs)
}
/// Sends bytes and ancillary data into the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
pub fn send_ancillary<'a>(
&self,
buf: &[u8],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.send_ancillary_vectored(&[IoSlice::new(buf)], ancillary_data)
}
/// Sends bytes and ancillary data into the socket stream, making use of [gather output] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn send_ancillary_vectored<'a>(
&self,
bufs: &[IoSlice<'_>],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let abuf = ancillary_data
.into_iter()
.collect::<EncodedAncillaryData<'_>>();
let hdr = mk_msghdr_w(bufs, abuf.as_ref())?;
let (success, bytes_written) = unsafe {
let result = libc::sendmsg(self.as_raw_fd(), &hdr as *const _, 0);
(result != -1, result as usize)
};
if success {
Ok((bytes_written, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Shuts down the read, write, or both halves of the stream. See [`Shutdown`].
///
/// Attempting to call this method with the same `how` argument multiple times may return `Ok(())` every time or it may return an error the second time it is called, depending on the platform. You must either avoid using the same value twice or ignore the error entirely.
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
unsafe { raw_shutdown(self.as_raw_fd(), how) }
}
/// Enables or disables the nonblocking mode for the stream. By default, it is disabled.
///
/// In nonblocking mode, calls to the `recvโฆ` methods and the `Read` trait methods will never wait for at least one byte of data to become available; calls to `sendโฆ` methods and the `Write` trait methods will never wait for the other side to remove enough bytes from the buffer for the write operation to be performed. Those operations will instead return a [`WouldBlock`] error immediately, allowing the thread to perform other useful operations in the meantime.
///
/// [`accept`]: #method.accept " "
/// [`incoming`]: #method.incoming " "
/// [`WouldBlock`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.WouldBlock " "
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
unsafe { raw_set_nonblocking(self.fd.0, nonblocking) }
}
/// Checks whether the stream is currently in nonblocking mode or not.
pub fn is_nonblocking(&self) -> io::Result<bool> {
unsafe { raw_get_nonblocking(self.fd.0) }
}
/// Fetches the credentials of the other end of the connection without using ancillary data. The returned structure contains the process identifier, user identifier and group identifier of the peer.
#[cfg(any(doc, uds_peercred))]
#[cfg_attr( // uds_peercred template
feature = "doc_cfg",
|
{
return Err(io::Error::last_os_error());
}
|
conditional_block
|
stream.rs
|
no_run
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # #[cfg(unix)] {
/// use interprocess::os::unix::udsocket::UdStream;
/// use std::io::prelude::*;
///
|
/// conn.read_to_string(&mut string_buffer)?;
/// println!("Server answered: {}", string_buffer);
/// # }
/// # Ok(()) }
/// ```
pub struct UdStream {
fd: FdOps,
}
impl UdStream {
/// Connects to a Unix domain socket server at the specified path.
///
/// See [`ToUdSocketPath`] for an example of using various string types to specify socket paths.
///
/// # System calls
/// - `socket`
/// - `connect`
pub fn connect<'a>(path: impl ToUdSocketPath<'a>) -> io::Result<Self> {
Self::_connect(path.to_socket_path()?)
}
fn _connect(path: UdSocketPath<'_>) -> io::Result<Self> {
let addr = path.try_to::<sockaddr_un>()?;
let socket = {
let (success, fd) = unsafe {
let result = libc::socket(AF_UNIX, SOCK_STREAM, 0);
(result != -1, result)
};
if success {
fd
} else {
return Err(io::Error::last_os_error());
}
};
let success = unsafe {
libc::connect(
socket,
&addr as *const _ as *const _,
size_of::<sockaddr_un>() as u32,
)
} != 1;
if !success {
unsafe { return Err(handle_fd_error(socket)) };
}
unsafe { enable_passcred(socket).map_err(close_by_error(socket))? };
Ok(unsafe { Self::from_raw_fd(socket) })
}
/// Receives bytes from the socket stream.
///
/// # System calls
/// - `read`
pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.fd.read(buf)
}
/// Receives bytes from the socket stream, making use of [scatter input] for the main data.
///
/// # System calls
/// - `readv`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn recv_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.fd.read_vectored(bufs)
}
/// Receives both bytes and ancillary data from the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
pub fn recv_ancillary<'a: 'b, 'b>(
&self,
buf: &mut [u8],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.recv_ancillary_vectored(&mut [IoSliceMut::new(buf)], abuf)
}
/// Receives bytes and ancillary data from the socket stream, making use of [scatter input] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn recv_ancillary_vectored<'a: 'b, 'b>(
&self,
bufs: &mut [IoSliceMut<'_>],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let mut hdr = mk_msghdr_r(bufs, abuf.as_mut())?;
let (success, bytes_read) = unsafe {
let result = libc::recvmsg(self.as_raw_fd(), &mut hdr as *mut _, 0);
(result != -1, result as usize)
};
if success {
Ok((bytes_read, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Sends bytes into the socket stream.
///
/// # System calls
/// - `write`
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.fd.write(buf)
}
/// Sends bytes into the socket stream, making use of [gather output] for the main data.
///
/// # System calls
/// - `senv`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn send_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.fd.write_vectored(bufs)
}
/// Sends bytes and ancillary data into the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
pub fn send_ancillary<'a>(
&self,
buf: &[u8],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.send_ancillary_vectored(&[IoSlice::new(buf)], ancillary_data)
}
/// Sends bytes and ancillary data into the socket stream, making use of [gather output] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn send_ancillary_vectored<'a>(
&self,
bufs: &[IoSlice<'_>],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let abuf = ancillary_data
.into_iter()
.collect::<EncodedAncillaryData<'_>>();
let hdr = mk_msghdr_w(bufs, abuf.as_ref())?;
let (success, bytes_written) = unsafe {
let result = libc::sendmsg(self.as_raw_fd(), &hdr as *const _, 0);
(result != -1, result as usize)
};
if success {
Ok((bytes_written, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Shuts down the read, write, or both halves of the stream. See [`Shutdown`].
///
/// Attempting to call this method with the same `how` argument multiple times may return `Ok(())` every time or it may return an error the second time it is called, depending on the platform. You must either avoid using the same value twice or ignore the error entirely.
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
unsafe { raw_shutdown(self.as_raw_fd(), how) }
}
/// Enables or disables the nonblocking mode for the stream. By default, it is disabled.
///
/// In nonblocking mode, calls to the `recvโฆ` methods and the `Read` trait methods will never wait for at least one byte of data to become available; calls to `sendโฆ` methods and the `Write` trait methods will never wait for the other side to remove enough bytes from the buffer for the write operation to be performed. Those operations will instead return a [`WouldBlock`] error immediately, allowing the thread to perform other useful operations in the meantime.
///
/// [`accept`]: #method.accept " "
/// [`incoming`]: #method.incoming " "
/// [`WouldBlock`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.WouldBlock " "
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
unsafe { raw_set_nonblocking(self.fd.0, nonblocking) }
}
/// Checks whether the stream is currently in nonblocking mode or not.
pub fn is_nonblocking(&self) -> io::Result<bool> {
unsafe { raw_get_nonblocking(self.fd.0) }
}
/// Fetches the credentials of the other end of the connection without using ancillary data. The returned structure contains the process identifier, user identifier and group identifier of the peer.
#[cfg(any(doc, uds_peercred))]
#[cfg_attr( // uds_peercred template
feature = "doc_cfg",
doc
|
/// let mut conn = UdStream::connect("/tmp/example1.sock")?;
/// conn.write_all(b"Hello from client!")?;
/// let mut string_buffer = String::new();
|
random_line_split
|
services.js
|
return null;
}
};
})
.factory('listPesonel',function(){
var listData=[
[{src:"img/1200.png",title:"ๆ็ๅกๅท", count:"6ๅผ "},
{src:"img/1005.png",title:"็งฏๅๅๅ", count:""},
{src:"img/10067.png",title:"ๆ็็งฏๅ", count:"110็งฏๅ"},
{src:"img/1008.png",title:"ๆ็ๅคบๅฎ", count:"0ๅ"},
{src:"img/1009.png",title:"ๆ็็คผๅ
", count:"3ๅผ "}
],
[
{src:"img/10001.png",title:"ๆ็ซ ๆถ่", count:"0็ฏ"},
{src:"img/10002.png",title:"ๆ็ซ ่ฏ่ฎบ", count:"0็ฏ"},
{src:"img/10003.png",title:"ๅๅ้ฎ็ญ", count:"0ๆก"},
{src:"img/10004.png",title:"ๅพ็ๆถ่", count:"0ๅฅ"}
],
[
{src:"img/10005.png",title:"่ฎพ็ฝฎ", count:""},
{src:"img/10006.png",title:"ๅฎขๆ็ต่ฏ", count:"400-801-0404"}
]
]
return {
all:function(){
return listData
}
}
})
.factory("homePage",function(){
var homeData={
exhibition:[
{Src:"img/11111.png",name:"ๅฐไธญๆตทๆฒๅ",priece:"๏ฟฅ4918.00"},
{Src:"img/11112.png",name:"ๅฐไธญๆตท่ถๅ ",priece:"๏ฟฅ1438.00"},
{Src:"img/11113.png",name:"ๅงๅฎคๅบๅคดๆ",priece:"๏ฟฅ622.00"},
{Src:"img/11114.png",name:"ๅฎๆจ้คๅ
ๆก",priece:"๏ฟฅ3100.00"},
{Src:"img/11115.png",name:"็พ้ฃๅไบบๅบ",priece:"๏ฟฅ2860.00"},
{Src:"img/11116.png",name:"ๆ้จๅคง่กฃๆ",priece:"๏ฟฅ2880.00"},
{Src:"img/11117.png",name:"็ฐๅญ้ซ็ฎฑๅบ",priece:"๏ฟฅ1830.00"},
{Src:"img/11118.png",name:"ๅฐๆทๅๆถ็บณๆ",priece:"๏ฟฅ1258.00"},
{Src:"img/11119.png",name:"ๅบๅคดๅจ็ฉๆ",priece:"๏ฟฅ427.00"},
],
exhibition2:[
{Src:"img/11121.png",name:"็ฐไปฃๅธ่บๆฒๅ",priece:"๏ฟฅ3680.00"},
{Src:"img/11122.png",name:"ๅๆจ่ฒ่ถๅ ",priece:"๏ฟฅ1380.00"},
{Src:"img/11123.png",name:"ๅฏไผธ็ผฉ็ต่งๆ",priece:"๏ฟฅ1220.00"},
{Src:"img/11124.png",name:"ๅฐๆทๅ้คๆก",priece:"๏ฟฅ2140.00"},
{Src:"img/11125.png",name:"ๅๆฌงๅไบบๅบ",priece:"๏ฟฅ1333.00"},
{Src:"img/11127.png",name:"็ฎ็บฆๅบๅคดๆ",priece:"๏ฟฅ395.00"},
{Src:"img/11128.png",name:"็ๅ
ณ็ปๅๆ",priece:"๏ฟฅ1612.00"},
{Src:"img/11129.png",name:"่ฟทไฝ ๅฆๅฐ็ปๅ",priece:"๏ฟฅ949.00"},
{Src:"img/11130.png",name:"ๆพๆจๅฟ็ซฅๅบ",priece:"๏ฟฅ1846.00"},
{Src:"img/11131.png",name:"ๅ
จๅฎๆจๅบๅคดๆ",priece:"๏ฟฅ576.00"},
{Src:"img/11132.png",name:"ๅ้จๅฎๆจ่กฃๆ",priece:"๏ฟฅ2058.00"}
],
activeData:[
{writeSrc:"img/30001.png",fullSrc:"img/30002.png",smallSrc1:"img/30003.png",
smallSrc2:"img/30004.png",smallSrc3:"img/30005.png",smallSrc4:"img/30006.png", detail:"ๆฅ็ๆดๅคๅฎขๅ
ๅฎถๅ
ท"},
{writeSrc:"img/401.png",fullSrc:"img/402.png",smallSrc1:"img/403.png",
smallSrc2:"img/404.png",smallSrc3:"img/405.png",smallSrc4:"img/406.png", detail:"ๆฅ็ๆดๅคๅงๅฎคๅฎถๅ
ท"},
{writeSrc:"img/501.png",fullSrc:"img/502.png",smallSrc1:"img/503.png",
smallSrc2:"img/504.png",smallSrc3:"img/505.png",smallSrc4:"img/506.png", detail:"ๆฅ็ๆดๅค้คๅ
ๅฎถๅ
ท"}
],
styleData:[
{title:"็ฐไปฃ",fullSrc:"img/341.png",smallSrc1:"img/432.png",smallSrc2:"img/433.png",smallSrc3:"img/434.png",
smallSrc4:"img/435.png",mark:"็ฐไปฃ้ฃๆ ผ",detail:"ๅฎถๅ
ทไปฅ็ฎ็บฆๆๅฟซ็้ ๅๆณจ้็ฐไปฃๆถๅฐ็ๆฐๆฏ๏ผไปฅๅฎ็จ็พๆญ็น็นๅๅคงๆนๅพไฝ็่ฎพ่ฎกๅธๅผๆถ่ดน่
็้็ใ"},
],
styleData1:[{title:"ๅๆฌง้ฆ",fullSrc:"img/351.png",nextSrc:"img/352.png",smallSrc3:"img/353.png",
smallSrc4:"img/354.png",mark:"ๅๆฌง้ฃๆ ผ",detail:"ๅฎถๅ
ทไปฅ็ฎ็บฆๆๅฟซ็้ ๅๆณจ้็ฐไปฃๆถๅฐ็ๆฐๆฏ๏ผไปฅๅฎ็จ็พๆญ็น็นๅๅคงๆนๅพไฝ็่ฎพ่ฎกๅธๅผๆถ่ดน่
็้็ใ"},
{title:"ไนกๆ้ฆ",fullSrc:"img/361.png", nextSrc:"img/362.png",smallSrc3:"img/363.png",
smallSrc4:"img/364.png",mark:"ไนกๆ้ฃๆ ผ",detail:"ๅฎถๅ
ทไปฅ็ฎ็บฆๆๅฟซ็้ ๅๆณจ้็ฐไปฃๆถๅฐ็ๆฐๆฏ๏ผไปฅๅฎ็จ็พๆญ็น็นๅๅคงๆนๅพไฝ็่ฎพ่ฎกๅธๅผๆถ่ดน่
็้็ใ"},
{title:"็พๅผ้ฆ",fullSrc:"img/371.png",nextSrc:"img/372.png",smallSrc3:"img/373.png",
smallSrc4:"img/375.png",mark:"็พๅผ้ฃๆ ผ",detail:"ๅฎถๅ
ทไปฅ็ฎ็บฆๆๅฟซ็้ ๅๆณจ้็ฐไปฃๆถๅฐ็ๆฐๆฏ๏ผไปฅๅฎ็จ็พๆญ็น็นๅๅคงๆนๅพไฝ็่ฎพ่ฎกๅธๅผๆถ่ดน่
็้็ใ"}
],
afflatus:{
guide:[
{ Src:"img/1401.png",title:"้ไฝ ไฟๅ
ป่ฏๆน๏ผ่ฎฉไฝ ็ๅฎๆจๅฎถๅ
ทๅค็จ10ๅนด", way:"่ถ
ๅผบๅฎๆจๅฎถๅ
ทไฟๅ
ปๆป็ฅ!",type:"ไฟๅ
ป",mold:"ๅฎถๅ
ท"},
{ Src:"img/1402.png",title:"ๅ
ไนฐๅฎถๅ
ท่ฟๆฏๅ
่ฃ
ไฟฎ๏ผ99%็ไบบ็ๅฎ้ฝๅๆไบ", way:"ๅ
ไนฐๅฎถๅ
ท่ฟๆฏๅ
่ฃ
ไฟฎๆฏไธชไธ็บช้พ้ข",type:"ๅฎถๅ
ท",mold:"่ฃ
ไฟฎ"},
{ Src:"img/1403.png",title:"้ไฝ ไฟๅ
ป่ฏๆน๏ผ่ฎฉไฝ ็ๅฎๆจๅฎถๅ
ทๅค็จ10ๅนด", way:"่ถ
ๅผบๅฎๆจๅฎถๅ
ทไฟๅ
ปๆป็ฅ!",type:"ไฟๅ
ป",mold:"ๅฎถๅ
ท"},
{ Src:"img/1404.png",title:"้ไฝ ไฟๅ
ป่ฏๆน๏ผ่ฎฉไฝ ็ๅฎๆจๅฎถๅ
ทๅค็จ10ๅนด", way:"่ถ
ๅผบๅฎๆจๅฎถๅ
ทไฟๅ
ปๆป็ฅ!",type:"ไฟๅ
ป",mold:"ๅฎถๅ
ท"},
{ Src:"img/1405.png",title:"้ไฝ ไฟๅ
ป่ฏๆน๏ผ่ฎฉไฝ ็ๅฎๆจๅฎถๅ
ทๅค็จ10ๅนด", way:"่ถ
ๅผบๅฎๆจๅฎถๅ
ทไฟๅ
ปๆป็ฅ!",type
|
{
if (chats[i].id === parseInt(chatId)) {
return chats[i];
}
}
|
conditional_block
|
|
services.js
|
remove: function(chat) {
chats.splice(chats.indexOf(chat), 1);
},
get: function(chatId) {
for (var i = 0; i < chats.length; i++) {
if (chats[i].id === parseInt(chatId)) {
return chats[i];
}
}
return null;
}
};
})
.factory('listPesonel',function(){
var listData=[
[{src:"img/1200.png",title:"ๆ็ๅกๅท", count:"6ๅผ "},
{src:"img/1005.png",title:"็งฏๅๅๅ", count:""},
{src:"img/10067.png",title:"ๆ็็งฏๅ", count:"110็งฏๅ"},
{src:"img/1008.png",title:"ๆ็ๅคบๅฎ", count:"0ๅ"},
{src:"img/1009.png",title:"ๆ็็คผๅ
", count:"3ๅผ "}
],
[
{src:"img/10001.png",title:"ๆ็ซ ๆถ่", count:"0็ฏ"},
{src:"img/10002.png",title:"ๆ็ซ ่ฏ่ฎบ", count:"0็ฏ"},
{src:"img/10003.png",title:"ๅๅ้ฎ็ญ", count:"0ๆก"},
{src:"img/10004.png",title:"ๅพ็ๆถ่", count:"0ๅฅ"}
],
[
{src:"img/10005.png",title:"่ฎพ็ฝฎ", count:""},
{src:"img/10006.png",title:"ๅฎขๆ็ต่ฏ", count:"400-801-0404"}
]
]
return {
all:function(){
return listData
}
}
})
.factory("homePage",function(){
var homeData={
exhibition:[
{Src:"img/11111.png",name:"ๅฐไธญๆตทๆฒๅ",priece:"๏ฟฅ4918.00"},
{Src:"img/11112.png",name:"ๅฐไธญๆตท่ถๅ ",priece:"๏ฟฅ1438.00"},
{Src:"img/11113.png",name:"ๅงๅฎคๅบๅคดๆ",priece:"๏ฟฅ622.00"},
{Src:"img/11114.png",name:"ๅฎๆจ้คๅ
ๆก",priece:"๏ฟฅ3100.00"},
{Src:"img/11115.png",name:"็พ้ฃๅไบบๅบ",priece:"๏ฟฅ2860.00"},
{Src:"img/11116.png",name:"ๆ้จๅคง่กฃๆ",priece:"๏ฟฅ2880.00"},
{Src:"img/11117.png",name:"็ฐๅญ้ซ็ฎฑๅบ",priece:"๏ฟฅ1830.00"},
{Src:"img/11118.png",name:"ๅฐๆทๅๆถ็บณๆ",priece:"๏ฟฅ1258.00"},
{Src:"img/11119.png",name:"ๅบๅคดๅจ็ฉๆ",priece:"๏ฟฅ427.00"},
],
exhibition2:[
{Src:"img/11121.png",name:"็ฐไปฃๅธ่บๆฒๅ",priece:"๏ฟฅ3680.00"},
{Src:"img/11122.png",name:"ๅๆจ่ฒ่ถๅ ",priece:"๏ฟฅ1380.00"},
{Src:"img/11123.png",name:"ๅฏไผธ็ผฉ็ต่งๆ",priece:"๏ฟฅ1220.00"},
{Src:"img/11124.png",name:"ๅฐๆทๅ้คๆก",priece:"๏ฟฅ2140.00"},
{Src:"img/11125.png",name:"ๅๆฌงๅไบบๅบ",priece:"๏ฟฅ1333.00"},
{Src:"img/11127.png",name:"็ฎ็บฆๅบๅคดๆ",priece:"๏ฟฅ395.00"},
{Src:"img/11128.png",name:"็ๅ
ณ็ปๅๆ",priece:"๏ฟฅ1612.00"},
{Src:"img/11129.png",name:"่ฟทไฝ ๅฆๅฐ็ปๅ",priece:"๏ฟฅ949.00"},
{Src:"img/11130.png",name:"ๆพๆจๅฟ็ซฅๅบ",priece:"๏ฟฅ1846.00"},
{Src:"img/11131.png",name:"ๅ
จๅฎๆจๅบๅคดๆ",priece:"๏ฟฅ576.00"},
{Src:"img/11132.png",name:"ๅ้จๅฎๆจ่กฃๆ",priece:"๏ฟฅ2058.00"}
],
activeData:[
{writeSrc:"img/30001.png",fullSrc:"img/30002.png",smallSrc1:"img/30003.png",
smallSrc2:"img/30004.png",smallSrc3:"img/30005.png",smallSrc4:"img/30006.png", detail:"ๆฅ็ๆดๅคๅฎขๅ
ๅฎถๅ
ท"},
{writeSrc:"img/401.png",fullSrc:"img/402.png",smallSrc1:"img/403.png",
smallSrc2:"img/404.png",smallSrc3:"img/405.png",smallSrc4:"img/406.png", detail:"ๆฅ็ๆดๅคๅงๅฎคๅฎถๅ
ท"},
{writeSrc:"img/501.png",fullSrc:"img/502.png",smallSrc1:"img/503.png",
smallSrc2:"img/504.png",smallSrc3:"img/505.png",smallSrc4:"img/506.png", detail:"ๆฅ็ๆดๅค้คๅ
ๅฎถๅ
ท"}
],
styleData:[
{title:"็ฐไปฃ",fullSrc:"img/341.png",smallSrc1:"img/432.png",smallSrc2:"img/433.png",smallSrc3:"img/434.png",
smallSrc4:"img/435.png",mark:"็ฐไปฃ้ฃๆ ผ",detail:"ๅฎถๅ
ทไปฅ็ฎ็บฆๆๅฟซ็้ ๅๆณจ้็ฐไปฃๆถๅฐ็ๆฐๆฏ๏ผไปฅๅฎ็จ็พๆญ็น็นๅๅคงๆนๅพไฝ็่ฎพ่ฎกๅธๅผๆถ่ดน่
็้็ใ"},
],
styleData1:[{title:"ๅๆฌง้ฆ",fullSrc:"img/351.png",nextSrc:"img/352.png",smallSrc3:"img/353.png",
smallSrc4:"img/354.png",mark:"ๅๆฌง้ฃๆ ผ",detail:"ๅฎถๅ
ทไปฅ็ฎ็บฆๆๅฟซ็้ ๅๆณจ้็ฐไปฃๆถๅฐ็ๆฐๆฏ๏ผไปฅๅฎ็จ็พๆญ็น็นๅๅคงๆนๅพไฝ็่ฎพ่ฎกๅธๅผๆถ่ดน่
็้็ใ"},
{title:"ไนกๆ้ฆ",fullSrc:"img/361.png", nextSrc:"img/362.png",smallSrc3:"img/363.png",
smallSrc4:"img/364.png",mark:"ไนกๆ้ฃๆ ผ",detail:"ๅฎถๅ
ทไปฅ็ฎ็บฆๆๅฟซ็้ ๅๆณจ้็ฐไปฃๆถๅฐ็ๆฐๆฏ๏ผไปฅๅฎ็จ็พๆญ็น็นๅๅคงๆนๅพไฝ็่ฎพ่ฎกๅธๅผๆถ่ดน่
็้็ใ"},
{title:"็พๅผ้ฆ",fullSrc:"img/371.png",nextSrc:"img/372.png",smallSrc3:"img/373.png",
smallSrc4:"img/375.png",mark:"็พๅผ้ฃๆ ผ",detail:"ๅฎถๅ
ทไปฅ็ฎ็บฆๆๅฟซ็้ ๅๆณจ้็ฐไปฃๆถๅฐ็ๆฐๆฏ๏ผไปฅๅฎ็จ็พๆญ็น็นๅๅคงๆนๅพไฝ็่ฎพ่ฎกๅธๅผๆถ่ดน่
็้็ใ"}
],
afflatus:{
guide:[
{ Src:"img/1401.png",title:"้ไฝ ไฟๅ
ป่ฏๆน๏ผ่ฎฉไฝ ็ๅฎๆจๅฎถๅ
ทๅค็จ10ๅนด", way:"่ถ
ๅผบๅฎๆจๅฎถๅ
ทไฟๅ
ปๆป็ฅ!",type:"ไฟๅ
ป",mold:"ๅฎถๅ
ท"},
{ Src:"img/1402.png",title:"ๅ
ไนฐๅฎถๅ
ท่ฟๆฏๅ
่ฃ
ไฟฎ๏ผ99%็ไบบ็ๅฎ้ฝๅๆไบ", way:"ๅ
ไนฐๅฎถๅ
ท่ฟๆฏๅ
่ฃ
ไฟฎๆฏไธชไธ็บช้พ้ข",type:"ๅฎถๅ
ท",mold:"่ฃ
ไฟฎ"},
{ Src:"img/1403.png",title:"้ไฝ ไฟๅ
ป่ฏๆน๏ผ่ฎฉไฝ ็ๅฎๆจๅฎถๅ
ทๅค็จ10ๅนด", way:"่ถ
ๅผบๅฎๆจๅฎถๅ
ทไฟๅ
ปๆป็ฅ!",type:"ไฟๅ
ป",mold:"ๅฎถๅ
ท"},
{ Src:"img/1404.png",title:"้ไฝ ไฟๅ
ป่ฏๆน๏ผ่ฎฉไฝ ็ๅฎๆจๅฎถๅ
ทๅค็จ10ๅนด", way:"่ถ
ๅผบๅฎๆจ
|
all: function() {
return chats;
},
|
random_line_split
|
|
olympics.py
|
participated in the given event. When used with -n, restricts the query to all athletes from a certain NOC who have also participated in the specified event. When used with -g, restricts the query to all medals won by an NOC in the specified event.'
year_help = 'Queries the olympics database for every athlete that participated in the given year. When used with -n, restricts the query to all athletes from a certain NOC who participated in the given year. When used with -g, restricts the query to all medals won by each NOC in a certain year. When used with -e, restricts the query to all athlete from a certain event who participated in the given year.'
medal_help = 'Queries the olympics database for every athlete that has medaled, sorted by the number of medals won. When used with -n, -e, or -y restricts the query to all athletes who have medaled.'
# Creates an argument parser and all flags for the program. --author, --title, and --year are all mutually exclusive, as are --titlesort and --yearsort.
parser = argparse.ArgumentParser(description=arg_parse_description)
parser.add_argument('-n', '--noc', metavar='NOC_CODE', nargs=1, type=str, help=noc_help)
parser.add_argument('-g', '--gold', action='store_true', help=gold_help)
parser.add_argument('-e', '--event', metavar='EVENT_NAME', nargs=1, type=str, help=event_help)
parser.add_argument('-y', '--year', metavar='YEAR', nargs=1, type=int, help=year_help)
parser.add_argument('-m', '--medal', action='store_true', help=medal_help)
parsed_arguments = parser.parse_args()
# Prevents -g flag from being used with -a or -m flag
if parsed_arguments.gold and (parsed_arguments.noc or parsed_arguments.medal):
parser.error('-g/--gold cannot be used with -n/--noc or -m/--medal')
return parsed_arguments
def form_variable_query(noc_code, event, medal, year):
query = 'SELECT '
fields = ['athletes.athlete_name', 'noc_regions.code', 'noc_regions.region', 'events.event', 'sports.sport', 'games.title', 'medals.medal']
tables = ['athletes', 'athletes_biometrics', 'athletes_super_table', 'noc_regions', 'events', 'sports', 'games', 'medals']
where_statements = ['athletes_biometrics.athletes_id = athletes.id', 'athletes_super_table.athletes_biometrics_id = athletes_biometrics.athletes_id', 'athletes_super_table.noc_id = noc_regions.id', 'athletes_super_table.event_id = events.id', 'events.sport_id = sports.id', 'athletes_super_table.games_id = games.id', 'athletes_super_table.medal_id = medals.id']
# The commented line in this if statement should be in the code, and they work. However, for clarity purposes, they have been excluded to prove that the query returned the correct results.
if noc_code:
#fields.remove('noc_regions.code')
fields.remove('noc_regions.region')
where_statements.append('noc_regions.code LIKE \'{noc_code}\'')
if event:
fields.remove('sports.sport')
#fields.remove('events.event')
where_statements.append('events.event LIKE \'{event_name}\'')
if year:
#fields.remove('games.title')
|
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'FROM '
for item in tables:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'WHERE '
for item in where_statements:
query += item + '\nAND '
# Removes the last 'AND' from the query string
query = query[:-4]
# Orders the list by the type of medals won (adapted from https://stackoverflow.com/questions/6332043/sql-order-by-multiple-values-in-specific-order)
if medal:
query += '''ORDER BY
CASE
WHEN medals.medal = \'Gold\' THEN 1
WHEN medals.medal = \'Silver\' THEN 2
WHEN medals.medal = \'Bronze\' THEN 3
END'''
query += ';'
print(query)
return query
def form_golden_query(event, year):
query = 'SELECT '
fields = ['COUNT(medals.medal)', 'noc_regions.region']
tables = ['medals', 'athletes_super_table', 'noc_regions']
where_statements = ['athletes_super_table.medal_id = medals.id', 'medals.medal LIKE \'Gold\'', 'athletes_super_table.noc_id = noc_regions.id']
if event:
tables.append('events')
tables.append('sports')
where_statements.append('athletes_super_table.event_id = events.id')
where_statements.append('events.sport_id = sports.id')
where_statements.append('events.event LIKE \'{event_name}\'')
if year:
tables.append('games')
where_statements.append('athletes_super_table.games_id = games.id')
where_statements.append('cast(games.year AS TEXT) LIKE cast(\'{games_year}\' AS TEXT)')
for item in fields:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'FROM '
for item in tables:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'WHERE '
for item in where_statements:
query += item + '\nAND '
# Removes the last 'AND' from the query string
query = query[:-4]
query += 'GROUP BY noc_regions.region\n'
query += 'ORDER BY COUNT(medals.medal) DESC, noc_regions.region;'
return query
def run_variable_query(cursor, noc_code='', event_name='', medal=False, games_year=0):
noc = False
if noc_code != '':
noc = True
event = False
if event_name != '':
event = True
year = False
if games_year != 0:
year = True
query = form_variable_query(noc, event, medal, year)
try:
cursor.execute(query.format(noc_code=noc_code, event_name=event_name, games_year=games_year))
except Exception as e:
print(e)
exit()
def run_golden_query(cursor, event_name='', games_year=0):
event = False
if event_name != '':
event = True
year = False
if games_year != 0:
year = True
query = form_golden_query(event, year)
try:
cursor.execute(query.format(event_name=event_name, games_year=games_year))
except Exception as e:
print(e)
exit()
def fix_single_quotes(broken_string):
temp_string_array = broken_string.split('\'')
fixed_string = ''
for substring in temp_string_array:
fixed_string += substring + '%'
fixed_string = fixed_string[:-1]
return fixed_string
def user_input_identifier(cursor, input_string, field, table):
query = 'SELECT {table}.{primary_field}'
if table == 'noc_regions':
query += ', noc_regions.region'
elif table == 'events':
query += ', sports.sport'
query += ' FROM {table}'
if table == 'events':
query += ', sports'
query += ' WHERE cast({table}.{primary_field} AS TEXT) ILIKE cast(\'%{input_string}%\' AS TEXT)'
if table == 'noc_regions':
query += ' OR noc_regions.region ILIKE \'%{input_string}%\''
query += ' GROUP BY noc_regions.code, noc_regions.region'
elif table == 'events':
query += ' AND events.sport_id = sports.id'
query += ';'
try:
cursor.execute(query.format(primary_field=field, table=table, input_string=input_string))
except Exception as e:
print(e)
exit()
""" print(query.format(primary_field=field, table=table, input_string=input_string))
print(cursor.rowcount)
exit() """
if cursor.rowcount == 0:
print('That string is not present in the appropriate table. Please run the program again.')
exit()
if cursor.rowcount == 1:
temp_query_list = cursor.fetchall()
if len(temp_query_list) == 2: # When the events or noc_regions table was queried
return temp_query_list[0][0]
return temp_query_list[0][0]
else:
print('Did you mean one of the following?')
if table == 'noc_regions':
print(' Code' + ' ' + 'Region')
print('=' * 30)
|
where_statements.append('cast(games.year AS TEXT) LIKE cast(\'{games_year}\' AS TEXT)')
if medal:
where_statements.append('medals.medal NOT LIKE \'NA\'')
for item in fields:
|
random_line_split
|
olympics.py
|
():
'''
Gets arguments from command line.
'''
# Help descriptions for each argument and the argparser.
arg_parse_description = '''Finds information about the athletes registered under a specific NOC (National Olympic Committee), the athletes who have participated in a given event, the athletes who participated in a certain year, and the athletes who have medaled.
Additionally, finds the number of gold medals each NOC has won.
The -e, -m, -n, and -y flags can all be combined in any order. The -g flag can only be modified by the -e and -y flags.'''
noc_help = 'Queries the olympics database for every athlete from a given NOC'
gold_help = 'Queries the olympics database for every NOC sorted by the number of gold medals won'
event_help = 'Queries the olympics database for every athlete that has participated in the given event. When used with -n, restricts the query to all athletes from a certain NOC who have also participated in the specified event. When used with -g, restricts the query to all medals won by an NOC in the specified event.'
year_help = 'Queries the olympics database for every athlete that participated in the given year. When used with -n, restricts the query to all athletes from a certain NOC who participated in the given year. When used with -g, restricts the query to all medals won by each NOC in a certain year. When used with -e, restricts the query to all athlete from a certain event who participated in the given year.'
medal_help = 'Queries the olympics database for every athlete that has medaled, sorted by the number of medals won. When used with -n, -e, or -y restricts the query to all athletes who have medaled.'
# Creates an argument parser and all flags for the program. --author, --title, and --year are all mutually exclusive, as are --titlesort and --yearsort.
parser = argparse.ArgumentParser(description=arg_parse_description)
parser.add_argument('-n', '--noc', metavar='NOC_CODE', nargs=1, type=str, help=noc_help)
parser.add_argument('-g', '--gold', action='store_true', help=gold_help)
parser.add_argument('-e', '--event', metavar='EVENT_NAME', nargs=1, type=str, help=event_help)
parser.add_argument('-y', '--year', metavar='YEAR', nargs=1, type=int, help=year_help)
parser.add_argument('-m', '--medal', action='store_true', help=medal_help)
parsed_arguments = parser.parse_args()
# Prevents -g flag from being used with -a or -m flag
if parsed_arguments.gold and (parsed_arguments.noc or parsed_arguments.medal):
parser.error('-g/--gold cannot be used with -n/--noc or -m/--medal')
return parsed_arguments
def form_variable_query(noc_code, event, medal, year):
query = 'SELECT '
fields = ['athletes.athlete_name', 'noc_regions.code', 'noc_regions.region', 'events.event', 'sports.sport', 'games.title', 'medals.medal']
tables = ['athletes', 'athletes_biometrics', 'athletes_super_table', 'noc_regions', 'events', 'sports', 'games', 'medals']
where_statements = ['athletes_biometrics.athletes_id = athletes.id', 'athletes_super_table.athletes_biometrics_id = athletes_biometrics.athletes_id', 'athletes_super_table.noc_id = noc_regions.id', 'athletes_super_table.event_id = events.id', 'events.sport_id = sports.id', 'athletes_super_table.games_id = games.id', 'athletes_super_table.medal_id = medals.id']
# The commented line in this if statement should be in the code, and they work. However, for clarity purposes, they have been excluded to prove that the query returned the correct results.
if noc_code:
#fields.remove('noc_regions.code')
fields.remove('noc_regions.region')
where_statements.append('noc_regions.code LIKE \'{noc_code}\'')
if event:
fields.remove('sports.sport')
#fields.remove('events.event')
where_statements.append('events.event LIKE \'{event_name}\'')
if year:
#fields.remove('games.title')
where_statements.append('cast(games.year AS TEXT) LIKE cast(\'{games_year}\' AS TEXT)')
if medal:
where_statements.append('medals.medal NOT LIKE \'NA\'')
for item in fields:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'FROM '
for item in tables:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'WHERE '
for item in where_statements:
query += item + '\nAND '
# Removes the last 'AND' from the query string
query = query[:-4]
# Orders the list by the type of medals won (adapted from https://stackoverflow.com/questions/6332043/sql-order-by-multiple-values-in-specific-order)
if medal:
query += '''ORDER BY
CASE
WHEN medals.medal = \'Gold\' THEN 1
WHEN medals.medal = \'Silver\' THEN 2
WHEN medals.medal = \'Bronze\' THEN 3
END'''
query += ';'
print(query)
return query
def form_golden_query(event, year):
query = 'SELECT '
fields = ['COUNT(medals.medal)', 'noc_regions.region']
tables = ['medals', 'athletes_super_table', 'noc_regions']
where_statements = ['athletes_super_table.medal_id = medals.id', 'medals.medal LIKE \'Gold\'', 'athletes_super_table.noc_id = noc_regions.id']
if event:
tables.append('events')
tables.append('sports')
where_statements.append('athletes_super_table.event_id = events.id')
where_statements.append('events.sport_id = sports.id')
where_statements.append('events.event LIKE \'{event_name}\'')
if year:
tables.append('games')
where_statements.append('athletes_super_table.games_id = games.id')
where_statements.append('cast(games.year AS TEXT) LIKE cast(\'{games_year}\' AS TEXT)')
for item in fields:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'FROM '
for item in tables:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'WHERE '
for item in where_statements:
query += item + '\nAND '
# Removes the last 'AND' from the query string
query = query[:-4]
query += 'GROUP BY noc_regions.region\n'
query += 'ORDER BY COUNT(medals.medal) DESC, noc_regions.region;'
return query
def run_variable_query(cursor, noc_code='', event_name='', medal=False, games_year=0):
noc = False
if noc_code != '':
noc = True
event = False
if event_name != '':
event = True
year = False
if games_year != 0:
year = True
query = form_variable_query(noc, event, medal, year)
try:
cursor.execute(query.format(noc_code=noc_code, event_name=event_name, games_year=games_year))
except Exception as e:
print(e)
exit()
def run_golden_query(cursor, event_name='', games_year=0):
event = False
if event_name != '':
event = True
year = False
if games_year != 0:
year = True
query = form_golden_query(event, year)
try:
cursor.execute(query.format(event_name=event_name, games_year=games_year))
except Exception as e:
print(e)
exit()
def fix_single_quotes(broken_string):
temp_string_array = broken_string.split('\'')
fixed_string = ''
for substring in temp_string_array:
fixed_string += substring + '%'
fixed_string = fixed_string[:-1]
return fixed_string
def user_input_identifier(cursor, input_string, field, table):
query = 'SELECT {table}.{primary_field}'
if table == 'noc_regions':
query += ', noc_regions.region'
elif table == 'events':
query += ', sports.sport'
query += ' FROM {table}'
if table == 'events':
query += ', sports'
query += ' WHERE cast({table}.{primary_field} AS TEXT) ILIKE cast(\'%{input_string}%\' AS TEXT)'
if table == 'noc_regions':
query += ' OR noc_regions.region ILIKE \'%{input_string}%\''
query += ' GROUP BY noc_regions.code, noc_regions.region'
elif table == 'events':
query += ' AND events.sport_id = sports.id'
query += ';'
try:
cursor.execute(query.format(primary
|
get_parsed_arguments
|
identifier_name
|
|
olympics.py
|
participated in the given event. When used with -n, restricts the query to all athletes from a certain NOC who have also participated in the specified event. When used with -g, restricts the query to all medals won by an NOC in the specified event.'
year_help = 'Queries the olympics database for every athlete that participated in the given year. When used with -n, restricts the query to all athletes from a certain NOC who participated in the given year. When used with -g, restricts the query to all medals won by each NOC in a certain year. When used with -e, restricts the query to all athlete from a certain event who participated in the given year.'
medal_help = 'Queries the olympics database for every athlete that has medaled, sorted by the number of medals won. When used with -n, -e, or -y restricts the query to all athletes who have medaled.'
# Creates an argument parser and all flags for the program. --author, --title, and --year are all mutually exclusive, as are --titlesort and --yearsort.
parser = argparse.ArgumentParser(description=arg_parse_description)
parser.add_argument('-n', '--noc', metavar='NOC_CODE', nargs=1, type=str, help=noc_help)
parser.add_argument('-g', '--gold', action='store_true', help=gold_help)
parser.add_argument('-e', '--event', metavar='EVENT_NAME', nargs=1, type=str, help=event_help)
parser.add_argument('-y', '--year', metavar='YEAR', nargs=1, type=int, help=year_help)
parser.add_argument('-m', '--medal', action='store_true', help=medal_help)
parsed_arguments = parser.parse_args()
# Prevents -g flag from being used with -a or -m flag
if parsed_arguments.gold and (parsed_arguments.noc or parsed_arguments.medal):
parser.error('-g/--gold cannot be used with -n/--noc or -m/--medal')
return parsed_arguments
def form_variable_query(noc_code, event, medal, year):
query = 'SELECT '
fields = ['athletes.athlete_name', 'noc_regions.code', 'noc_regions.region', 'events.event', 'sports.sport', 'games.title', 'medals.medal']
tables = ['athletes', 'athletes_biometrics', 'athletes_super_table', 'noc_regions', 'events', 'sports', 'games', 'medals']
where_statements = ['athletes_biometrics.athletes_id = athletes.id', 'athletes_super_table.athletes_biometrics_id = athletes_biometrics.athletes_id', 'athletes_super_table.noc_id = noc_regions.id', 'athletes_super_table.event_id = events.id', 'events.sport_id = sports.id', 'athletes_super_table.games_id = games.id', 'athletes_super_table.medal_id = medals.id']
# The commented line in this if statement should be in the code, and they work. However, for clarity purposes, they have been excluded to prove that the query returned the correct results.
if noc_code:
#fields.remove('noc_regions.code')
fields.remove('noc_regions.region')
where_statements.append('noc_regions.code LIKE \'{noc_code}\'')
if event:
fields.remove('sports.sport')
#fields.remove('events.event')
where_statements.append('events.event LIKE \'{event_name}\'')
if year:
#fields.remove('games.title')
where_statements.append('cast(games.year AS TEXT) LIKE cast(\'{games_year}\' AS TEXT)')
if medal:
where_statements.append('medals.medal NOT LIKE \'NA\'')
for item in fields:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'FROM '
for item in tables:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'WHERE '
for item in where_statements:
query += item + '\nAND '
# Removes the last 'AND' from the query string
query = query[:-4]
# Orders the list by the type of medals won (adapted from https://stackoverflow.com/questions/6332043/sql-order-by-multiple-values-in-specific-order)
if medal:
query += '''ORDER BY
CASE
WHEN medals.medal = \'Gold\' THEN 1
WHEN medals.medal = \'Silver\' THEN 2
WHEN medals.medal = \'Bronze\' THEN 3
END'''
query += ';'
print(query)
return query
def form_golden_query(event, year):
query = 'SELECT '
fields = ['COUNT(medals.medal)', 'noc_regions.region']
tables = ['medals', 'athletes_super_table', 'noc_regions']
where_statements = ['athletes_super_table.medal_id = medals.id', 'medals.medal LIKE \'Gold\'', 'athletes_super_table.noc_id = noc_regions.id']
if event:
tables.append('events')
tables.append('sports')
where_statements.append('athletes_super_table.event_id = events.id')
where_statements.append('events.sport_id = sports.id')
where_statements.append('events.event LIKE \'{event_name}\'')
if year:
tables.append('games')
where_statements.append('athletes_super_table.games_id = games.id')
where_statements.append('cast(games.year AS TEXT) LIKE cast(\'{games_year}\' AS TEXT)')
for item in fields:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'FROM '
for item in tables:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'WHERE '
for item in where_statements:
query += item + '\nAND '
# Removes the last 'AND' from the query string
query = query[:-4]
query += 'GROUP BY noc_regions.region\n'
query += 'ORDER BY COUNT(medals.medal) DESC, noc_regions.region;'
return query
def run_variable_query(cursor, noc_code='', event_name='', medal=False, games_year=0):
|
def run_golden_query(cursor, event_name='', games_year=0):
event = False
if event_name != '':
event = True
year = False
if games_year != 0:
year = True
query = form_golden_query(event, year)
try:
cursor.execute(query.format(event_name=event_name, games_year=games_year))
except Exception as e:
print(e)
exit()
def fix_single_quotes(broken_string):
temp_string_array = broken_string.split('\'')
fixed_string = ''
for substring in temp_string_array:
fixed_string += substring + '%'
fixed_string = fixed_string[:-1]
return fixed_string
def user_input_identifier(cursor, input_string, field, table):
query = 'SELECT {table}.{primary_field}'
if table == 'noc_regions':
query += ', noc_regions.region'
elif table == 'events':
query += ', sports.sport'
query += ' FROM {table}'
if table == 'events':
query += ', sports'
query += ' WHERE cast({table}.{primary_field} AS TEXT) ILIKE cast(\'%{input_string}%\' AS TEXT)'
if table == 'noc_regions':
query += ' OR noc_regions.region ILIKE \'%{input_string}%\''
query += ' GROUP BY noc_regions.code, noc_regions.region'
elif table == 'events':
query += ' AND events.sport_id = sports.id'
query += ';'
try:
cursor.execute(query.format(primary_field=field, table=table, input_string=input_string))
except Exception as e:
print(e)
exit()
""" print(query.format(primary_field=field, table=table, input_string=input_string))
print(cursor.rowcount)
exit() """
if cursor.rowcount == 0:
print('That string is not present in the appropriate table. Please run the program again.')
exit()
if cursor.rowcount == 1:
temp_query_list = cursor.fetchall()
if len(temp_query_list) == 2: # When the events or noc_regions table was queried
return temp_query_list[0][0]
return temp_query_list[0][0]
else:
print('Did you mean one of the following?')
if table == 'noc_regions':
print(' Code' + ' ' + 'Region')
print('=' * 30)
elif
|
noc = False
if noc_code != '':
noc = True
event = False
if event_name != '':
event = True
year = False
if games_year != 0:
year = True
query = form_variable_query(noc, event, medal, year)
try:
cursor.execute(query.format(noc_code=noc_code, event_name=event_name, games_year=games_year))
except Exception as e:
print(e)
exit()
|
identifier_body
|
olympics.py
|
event. When used with -g, restricts the query to all medals won by an NOC in the specified event.'
year_help = 'Queries the olympics database for every athlete that participated in the given year. When used with -n, restricts the query to all athletes from a certain NOC who participated in the given year. When used with -g, restricts the query to all medals won by each NOC in a certain year. When used with -e, restricts the query to all athlete from a certain event who participated in the given year.'
medal_help = 'Queries the olympics database for every athlete that has medaled, sorted by the number of medals won. When used with -n, -e, or -y restricts the query to all athletes who have medaled.'
# Creates an argument parser and all flags for the program. --author, --title, and --year are all mutually exclusive, as are --titlesort and --yearsort.
parser = argparse.ArgumentParser(description=arg_parse_description)
parser.add_argument('-n', '--noc', metavar='NOC_CODE', nargs=1, type=str, help=noc_help)
parser.add_argument('-g', '--gold', action='store_true', help=gold_help)
parser.add_argument('-e', '--event', metavar='EVENT_NAME', nargs=1, type=str, help=event_help)
parser.add_argument('-y', '--year', metavar='YEAR', nargs=1, type=int, help=year_help)
parser.add_argument('-m', '--medal', action='store_true', help=medal_help)
parsed_arguments = parser.parse_args()
# Prevents -g flag from being used with -a or -m flag
if parsed_arguments.gold and (parsed_arguments.noc or parsed_arguments.medal):
parser.error('-g/--gold cannot be used with -n/--noc or -m/--medal')
return parsed_arguments
def form_variable_query(noc_code, event, medal, year):
query = 'SELECT '
fields = ['athletes.athlete_name', 'noc_regions.code', 'noc_regions.region', 'events.event', 'sports.sport', 'games.title', 'medals.medal']
tables = ['athletes', 'athletes_biometrics', 'athletes_super_table', 'noc_regions', 'events', 'sports', 'games', 'medals']
where_statements = ['athletes_biometrics.athletes_id = athletes.id', 'athletes_super_table.athletes_biometrics_id = athletes_biometrics.athletes_id', 'athletes_super_table.noc_id = noc_regions.id', 'athletes_super_table.event_id = events.id', 'events.sport_id = sports.id', 'athletes_super_table.games_id = games.id', 'athletes_super_table.medal_id = medals.id']
# The commented line in this if statement should be in the code, and they work. However, for clarity purposes, they have been excluded to prove that the query returned the correct results.
if noc_code:
#fields.remove('noc_regions.code')
fields.remove('noc_regions.region')
where_statements.append('noc_regions.code LIKE \'{noc_code}\'')
if event:
fields.remove('sports.sport')
#fields.remove('events.event')
where_statements.append('events.event LIKE \'{event_name}\'')
if year:
#fields.remove('games.title')
where_statements.append('cast(games.year AS TEXT) LIKE cast(\'{games_year}\' AS TEXT)')
if medal:
where_statements.append('medals.medal NOT LIKE \'NA\'')
for item in fields:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'FROM '
for item in tables:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'WHERE '
for item in where_statements:
query += item + '\nAND '
# Removes the last 'AND' from the query string
query = query[:-4]
# Orders the list by the type of medals won (adapted from https://stackoverflow.com/questions/6332043/sql-order-by-multiple-values-in-specific-order)
if medal:
query += '''ORDER BY
CASE
WHEN medals.medal = \'Gold\' THEN 1
WHEN medals.medal = \'Silver\' THEN 2
WHEN medals.medal = \'Bronze\' THEN 3
END'''
query += ';'
print(query)
return query
def form_golden_query(event, year):
query = 'SELECT '
fields = ['COUNT(medals.medal)', 'noc_regions.region']
tables = ['medals', 'athletes_super_table', 'noc_regions']
where_statements = ['athletes_super_table.medal_id = medals.id', 'medals.medal LIKE \'Gold\'', 'athletes_super_table.noc_id = noc_regions.id']
if event:
tables.append('events')
tables.append('sports')
where_statements.append('athletes_super_table.event_id = events.id')
where_statements.append('events.sport_id = sports.id')
where_statements.append('events.event LIKE \'{event_name}\'')
if year:
tables.append('games')
where_statements.append('athletes_super_table.games_id = games.id')
where_statements.append('cast(games.year AS TEXT) LIKE cast(\'{games_year}\' AS TEXT)')
for item in fields:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'FROM '
for item in tables:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'WHERE '
for item in where_statements:
query += item + '\nAND '
# Removes the last 'AND' from the query string
query = query[:-4]
query += 'GROUP BY noc_regions.region\n'
query += 'ORDER BY COUNT(medals.medal) DESC, noc_regions.region;'
return query
def run_variable_query(cursor, noc_code='', event_name='', medal=False, games_year=0):
noc = False
if noc_code != '':
noc = True
event = False
if event_name != '':
event = True
year = False
if games_year != 0:
year = True
query = form_variable_query(noc, event, medal, year)
try:
cursor.execute(query.format(noc_code=noc_code, event_name=event_name, games_year=games_year))
except Exception as e:
print(e)
exit()
def run_golden_query(cursor, event_name='', games_year=0):
event = False
if event_name != '':
event = True
year = False
if games_year != 0:
year = True
query = form_golden_query(event, year)
try:
cursor.execute(query.format(event_name=event_name, games_year=games_year))
except Exception as e:
print(e)
exit()
def fix_single_quotes(broken_string):
temp_string_array = broken_string.split('\'')
fixed_string = ''
for substring in temp_string_array:
fixed_string += substring + '%'
fixed_string = fixed_string[:-1]
return fixed_string
def user_input_identifier(cursor, input_string, field, table):
query = 'SELECT {table}.{primary_field}'
if table == 'noc_regions':
query += ', noc_regions.region'
elif table == 'events':
query += ', sports.sport'
query += ' FROM {table}'
if table == 'events':
query += ', sports'
query += ' WHERE cast({table}.{primary_field} AS TEXT) ILIKE cast(\'%{input_string}%\' AS TEXT)'
if table == 'noc_regions':
query += ' OR noc_regions.region ILIKE \'%{input_string}%\''
query += ' GROUP BY noc_regions.code, noc_regions.region'
elif table == 'events':
query += ' AND events.sport_id = sports.id'
query += ';'
try:
cursor.execute(query.format(primary_field=field, table=table, input_string=input_string))
except Exception as e:
print(e)
exit()
""" print(query.format(primary_field=field, table=table, input_string=input_string))
print(cursor.rowcount)
exit() """
if cursor.rowcount == 0:
print('That string is not present in the appropriate table. Please run the program again.')
exit()
if cursor.rowcount == 1:
temp_query_list = cursor.fetchall()
if len(temp_query_list) == 2: # When the events or noc_regions table was queried
return temp_query_list[0][0]
return temp_query_list[0][0]
else:
print('Did you mean one of the following?')
if table == 'noc_regions':
print(' Code' + ' ' + 'Region')
print('=' * 30)
elif table == 'events':
|
print(' Events' + ' ' * (54) + 'Sports')
print('=' * 100)
|
conditional_block
|
|
dbgap.go
|
return t, nil
}
pub, err := x509.ParsePKCS1PublicKey(block.Bytes)
if err != nil {
return nil, fmt.Errorf("parsing public key: %v", err)
}
t.publicKey = pub
return t, nil
}
// TranslateToken implements the ga4gh.Translator interface.
func (s *DbGapTranslator) TranslateToken(ctx context.Context, auth string) (*ga4gh.Identity, error) {
if err := ga4gh.VerifyTokenWithKey(s.publicKey, auth); err != nil {
return nil, fmt.Errorf("verifying user token signature: %v", err)
}
userInfo, err := s.getURL(dbGapUserInfoURL, auth)
if err != nil {
return nil, fmt.Errorf("getting dbGaP user info: %v", err)
}
if err := ga4gh.VerifyTokenWithKey(s.publicKey, userInfo); err != nil {
return nil, fmt.Errorf("verifying user info token signature: %v", err)
}
passport, err := s.getURL(dbGapPassportURL, auth)
if err != nil {
return nil, fmt.Errorf("getting dbGaP passport: %v", err)
}
if err := ga4gh.VerifyTokenWithKey(s.publicKey, passport); err != nil {
return nil, fmt.Errorf("verifying passport token signature: %v", err)
}
var claims dbGapClaims
var id dbGapIdToken
if err := s.extractClaims(auth, &id, &claims); err != nil {
return nil, fmt.Errorf("extracting user claims: %v", err)
}
if err := s.extractClaims(userInfo, &id, &claims); err != nil {
return nil, fmt.Errorf("extracting user info claims: %v", err)
}
if err := s.extractClaims(passport, &id, &claims); err != nil {
return nil, fmt.Errorf("extracting passport claims: %v", err)
}
return s.translateToken(ctx, convertToOIDCIDToken(id), claims, time.Now())
}
func (s *DbGapTranslator) getURL(url, userTok string) (string, error) {
url = strings.Replace(url, "${TOKEN}", userTok, -1)
get, err := http.Get(url)
if err != nil {
return "", err
}
buf := new(bytes.Buffer)
buf.ReadFrom(get.Body)
body := buf.String()
if get.StatusCode < 200 || get.StatusCode > 299 {
return "", fmt.Errorf("http status %d: %v", get.StatusCode, body)
}
return body, nil
}
func (s *DbGapTranslator) extractClaims(tok string, id *dbGapIdToken, claims *dbGapClaims) error {
parsed, err := jwt.ParseSigned(tok)
if err != nil {
return fmt.Errorf("parsing signed token: %v", err)
}
err = parsed.UnsafeClaimsWithoutVerification(id, claims)
if err != nil {
return fmt.Errorf("extracting claims from token: %v", err)
}
return nil
}
func (s *DbGapTranslator) translateToken(ctx context.Context, token *oidc.IDToken, claims dbGapClaims, now time.Time) (*ga4gh.Identity, error) {
id := ga4gh.Identity{
Issuer: token.Issuer,
Subject: token.Subject,
Expiry: token.Expiry.Unix(),
GivenName: claims.Vcard.GivenName,
FamilyName: claims.Vcard.FamilyName,
Name: strutil.JoinNonEmpty([]string{claims.Vcard.GivenName, claims.Vcard.FamilyName}, " "),
Email: claims.Vcard.Email,
VisaJWTs: []string{},
}
for _, ident := range claims.Identity {
if ident.Authority == eraCommonsAuthority {
if username, ok := ident.ID.(string); ok {
id.Username = username
}
}
}
accessions := make(map[string]dbGapAccess)
type source struct {
orgID string
by string
}
affiliations := make(map[string]source)
for _, p := range claims.DbGapPassport {
for _, a := range p.Access {
if a.Study.Accession == nil {
continue
}
// TODO: Verify that the heuristics for de-duplicating access entries is correct.
ac := *a.Study.Accession
exp := a.Expires
if access, ok := accessions[ac]; ok {
// For duplicate accessions, only keep the one with the later expiry timestamp.
if access.Expires > exp {
continue
}
}
accessions[ac] = dbGapAccess{
Expires: exp,
Issued: a.Issued,
}
}
if p.Org == nil || len(*p.Org) == 0 || p.Role == nil || len(*p.Role) == 0 {
continue
}
var r string
if *p.Role == "pi" || *p.Role == "downloader" {
r = "nih.researcher"
} else {
r = "member"
}
o := removePunctuation.ReplaceAllString(*p.Org, "")
o = strings.ReplaceAll(o, " ", "-")
v := r + "@" + o + ".orgs.nih.gov"
// Does not deal with complex cases where multiple org_DUNS attest to the same
// "value" (v) for AffiliationAndRole.
if src, ok := affiliations[v]; !ok || src.by == "self" {
by := "so"
if p.SO == nil || *p.SO == "" {
by = "self"
}
affiliations[v] = source{
orgID: *p.OrgID,
by: by,
}
}
}
currUnixTime := now.Unix()
affiliationAsserted := now.Unix()
for a, val := range accessions {
visa := ga4gh.VisaData{
StdClaims: ga4gh.StdClaims{
Subject: token.Subject,
Issuer: s.visaIssuer,
ExpiresAt: val.Expires,
IssuedAt: val.Issued,
},
Assertion: ga4gh.Assertion{
Type: ga4gh.ControlledAccessGrants,
Value: ga4gh.Value("https://dac.nih.gov/datasets/" + a),
Source: dbGapIssuer,
By: ga4gh.DAC,
Asserted: affiliationAsserted,
},
Scope: visaScope,
}
v, err := ga4gh.NewVisaFromData(ctx, &visa, s.visaJKU, s.signer)
if err != nil {
return nil, fmt.Errorf("sign ControlledAccessGrants claim failed: %s", err)
}
id.VisaJWTs = append(id.VisaJWTs, string(v.JWT()))
// Keep the oldest Issued accession for use as affiliationAsserted.
if val.Issued > 0 && val.Issued < affiliationAsserted {
affiliationAsserted = val.Issued
}
}
for a, src := range affiliations {
// Claim for dbGap
visa := ga4gh.VisaData{
StdClaims: ga4gh.StdClaims{
Issuer: s.visaIssuer,
ExpiresAt: currUnixTime + validSec,
IssuedAt: affiliationAsserted,
},
Assertion: ga4gh.Assertion{
Type: ga4gh.AffiliationAndRole,
Value: ga4gh.Value(a),
Source: dbGapIssuer,
By: ga4gh.System,
Asserted: affiliationAsserted,
},
Scope: visaScope,
}
v, err := ga4gh.NewVisaFromData(ctx, &visa, s.visaJKU, s.signer)
if err != nil {
return nil, fmt.Errorf("sign dbGap ClaimAffiliationAndRole claim failed: %s", err)
}
id.VisaJWTs = append(id.VisaJWTs, string(v.JWT()))
// Claim for org
visa = ga4gh.VisaData{
StdClaims: ga4gh.StdClaims{
Issuer: s.visaIssuer,
ExpiresAt: currUnixTime + validSec,
IssuedAt: affiliationAsserted,
},
Assertion: ga4gh.Assertion{
Type: ga4gh.AffiliationAndRole,
Value: ga4gh.Value(a),
Source: ga4gh.Source(dbGapOrgURL + src.orgID),
By: ga4gh.By(src.by),
Asserted: affiliationAsserted,
},
Scope: visaScope,
}
v, err = ga4gh.NewVisaFromData(ctx, &visa, s.visaJKU, s.signer)
if err != nil
|
{
return nil, fmt.Errorf("sign org ClaimAffiliationAndRole claim failed: %s", err)
}
|
conditional_block
|
|
dbgap.go
|
_info.cgi?${TOKEN}"
dbGapPassportURL = "https://dbgap.ncbi.nlm.nih.gov/aa/jwt/user_passport.cgi?${TOKEN}"
eraCommonsAuthority = "eRA"
visaScope = "openid"
fixedKeyID = "kid"
)
// DbGapTranslator is a ga4gh.Translator that converts dbGap identities into GA4GH identities.
type DbGapTranslator struct {
publicKey *rsa.PublicKey
visaIssuer string
visaJKU string
signer kms.Signer
}
type dbGapStudy struct {
Accession *string `json:"accession"`
}
type dbGapAccess struct {
Study dbGapStudy `json:"study"`
Expires int64 `json:"expires"`
Issued int64 `json:"issued"`
}
type dbGapPassport struct {
Access []dbGapAccess `json:"access"`
Org *string `json:"org"`
OrgID *string `json:"org_DUNS"`
Role *string `json:"role"`
SO *string `json:"so"`
}
type dbGapIdentity struct {
Authority string `json:"authority"`
ID interface{} `json:"id"`
}
type vCard struct {
Email string `json:"email"`
GivenName string `json:"fname"`
FamilyName string `json:"lname"`
Orgs []string `json:"orgs"`
Roles []string `json:"roles"`
}
type dbGapClaims struct {
DbGapPassport []dbGapPassport `json:"dbgap_passport"`
Identity []dbGapIdentity `json:"identity"`
Vcard vCard `json:"vcard"`
}
// dbGapIdToken mocks OIDC library's idToken implementation, except minor differences in the types of
// Audience, Expiry, and IssuedAt fields to facilitate JSON unmarshalling.
type dbGapIdToken struct {
Issuer string `json:"iss"`
Subject string `json:"sub"`
Audience string `json:"aud"`
Expiry int64 `json:"exp"`
IssuedAt int64 `json:"iat"`
Nonce string `json:"nonce"`
AtHash string `json:"at_hash"`
}
const validSec = 3600 * 24 * 60 // 60 days
var removePunctuation = regexp.MustCompile("[^a-zA-Z0-9 ]+")
func convertToOIDCIDToken(token dbGapIdToken) *oidc.IDToken {
return &oidc.IDToken{
Issuer: token.Issuer,
Subject: token.Subject,
Audience: []string{token.Audience},
Expiry: time.Unix(token.Expiry, 0),
IssuedAt: time.Unix(token.IssuedAt, 0),
Nonce: token.Nonce,
AccessTokenHash: token.AtHash,
}
}
// NewDbGapTranslator creates a new DbGapTranslator with the provided public key. If the tokens
// passed to this translator do not have an audience claim with a value equal to the
// clientID value then they will be rejected.
func
|
(publicKey, selfIssuer string, signer kms.Signer) (*DbGapTranslator, error) {
if len(selfIssuer) == 0 {
return nil, fmt.Errorf("NewDbGapTranslator failed, selfIssuer or signingPrivateKey is empty")
}
jku := strings.TrimSuffix(selfIssuer, "/") + "/.well-known/jwks.json"
t := &DbGapTranslator{
visaIssuer: selfIssuer,
visaJKU: jku,
signer: signer,
}
block, _ := pem.Decode([]byte(publicKey))
if block == nil {
return t, nil
}
pub, err := x509.ParsePKCS1PublicKey(block.Bytes)
if err != nil {
return nil, fmt.Errorf("parsing public key: %v", err)
}
t.publicKey = pub
return t, nil
}
// TranslateToken implements the ga4gh.Translator interface.
func (s *DbGapTranslator) TranslateToken(ctx context.Context, auth string) (*ga4gh.Identity, error) {
if err := ga4gh.VerifyTokenWithKey(s.publicKey, auth); err != nil {
return nil, fmt.Errorf("verifying user token signature: %v", err)
}
userInfo, err := s.getURL(dbGapUserInfoURL, auth)
if err != nil {
return nil, fmt.Errorf("getting dbGaP user info: %v", err)
}
if err := ga4gh.VerifyTokenWithKey(s.publicKey, userInfo); err != nil {
return nil, fmt.Errorf("verifying user info token signature: %v", err)
}
passport, err := s.getURL(dbGapPassportURL, auth)
if err != nil {
return nil, fmt.Errorf("getting dbGaP passport: %v", err)
}
if err := ga4gh.VerifyTokenWithKey(s.publicKey, passport); err != nil {
return nil, fmt.Errorf("verifying passport token signature: %v", err)
}
var claims dbGapClaims
var id dbGapIdToken
if err := s.extractClaims(auth, &id, &claims); err != nil {
return nil, fmt.Errorf("extracting user claims: %v", err)
}
if err := s.extractClaims(userInfo, &id, &claims); err != nil {
return nil, fmt.Errorf("extracting user info claims: %v", err)
}
if err := s.extractClaims(passport, &id, &claims); err != nil {
return nil, fmt.Errorf("extracting passport claims: %v", err)
}
return s.translateToken(ctx, convertToOIDCIDToken(id), claims, time.Now())
}
func (s *DbGapTranslator) getURL(url, userTok string) (string, error) {
url = strings.Replace(url, "${TOKEN}", userTok, -1)
get, err := http.Get(url)
if err != nil {
return "", err
}
buf := new(bytes.Buffer)
buf.ReadFrom(get.Body)
body := buf.String()
if get.StatusCode < 200 || get.StatusCode > 299 {
return "", fmt.Errorf("http status %d: %v", get.StatusCode, body)
}
return body, nil
}
func (s *DbGapTranslator) extractClaims(tok string, id *dbGapIdToken, claims *dbGapClaims) error {
parsed, err := jwt.ParseSigned(tok)
if err != nil {
return fmt.Errorf("parsing signed token: %v", err)
}
err = parsed.UnsafeClaimsWithoutVerification(id, claims)
if err != nil {
return fmt.Errorf("extracting claims from token: %v", err)
}
return nil
}
func (s *DbGapTranslator) translateToken(ctx context.Context, token *oidc.IDToken, claims dbGapClaims, now time.Time) (*ga4gh.Identity, error) {
id := ga4gh.Identity{
Issuer: token.Issuer,
Subject: token.Subject,
Expiry: token.Expiry.Unix(),
GivenName: claims.Vcard.GivenName,
FamilyName: claims.Vcard.FamilyName,
Name: strutil.JoinNonEmpty([]string{claims.Vcard.GivenName, claims.Vcard.FamilyName}, " "),
Email: claims.Vcard.Email,
VisaJWTs: []string{},
}
for _, ident := range claims.Identity {
if ident.Authority == eraCommonsAuthority {
if username, ok := ident.ID.(string); ok {
id.Username = username
}
}
}
accessions := make(map[string]dbGapAccess)
type source struct {
orgID string
by string
}
affiliations := make(map[string]source)
for _, p := range claims.DbGapPassport {
for _, a := range p.Access {
if a.Study.Accession == nil {
continue
}
// TODO: Verify that the heuristics for de-duplicating access entries is correct.
ac := *a.Study.Accession
exp := a.Expires
if access, ok := accessions[ac]; ok {
// For duplicate accessions, only keep the one with the later expiry timestamp.
if access.Expires > exp {
continue
}
}
accessions[ac] = dbGapAccess{
Expires: exp,
Issued: a.Issued,
}
}
if p.Org == nil || len(*p.Org) == 0 || p.Role == nil || len(*p.Role) == 0 {
continue
}
var r string
if *p.Role == "pi" || *p.Role == "downloader" {
r = "nih.researcher"
} else {
r = "member"
}
o := removePunctuation.ReplaceAllString(*p.Org, "")
o = strings.ReplaceAll(o, " ", "-")
v := r + "@" + o + ".orgs.nih.gov"
// Does not deal with complex cases where multiple org_DUNS attest to the same
// "value" (v) for Affiliation
|
NewDbGapTranslator
|
identifier_name
|
dbgap.go
|
_info.cgi?${TOKEN}"
dbGapPassportURL = "https://dbgap.ncbi.nlm.nih.gov/aa/jwt/user_passport.cgi?${TOKEN}"
eraCommonsAuthority = "eRA"
visaScope = "openid"
fixedKeyID = "kid"
)
// DbGapTranslator is a ga4gh.Translator that converts dbGap identities into GA4GH identities.
type DbGapTranslator struct {
publicKey *rsa.PublicKey
visaIssuer string
visaJKU string
signer kms.Signer
}
type dbGapStudy struct {
Accession *string `json:"accession"`
}
type dbGapAccess struct {
Study dbGapStudy `json:"study"`
Expires int64 `json:"expires"`
Issued int64 `json:"issued"`
}
type dbGapPassport struct {
Access []dbGapAccess `json:"access"`
Org *string `json:"org"`
OrgID *string `json:"org_DUNS"`
Role *string `json:"role"`
SO *string `json:"so"`
}
type dbGapIdentity struct {
Authority string `json:"authority"`
ID interface{} `json:"id"`
}
type vCard struct {
Email string `json:"email"`
GivenName string `json:"fname"`
FamilyName string `json:"lname"`
Orgs []string `json:"orgs"`
Roles []string `json:"roles"`
}
type dbGapClaims struct {
DbGapPassport []dbGapPassport `json:"dbgap_passport"`
Identity []dbGapIdentity `json:"identity"`
Vcard vCard `json:"vcard"`
}
// dbGapIdToken mocks OIDC library's idToken implementation, except minor differences in the types of
// Audience, Expiry, and IssuedAt fields to facilitate JSON unmarshalling.
type dbGapIdToken struct {
Issuer string `json:"iss"`
Subject string `json:"sub"`
Audience string `json:"aud"`
Expiry int64 `json:"exp"`
IssuedAt int64 `json:"iat"`
Nonce string `json:"nonce"`
AtHash string `json:"at_hash"`
}
const validSec = 3600 * 24 * 60 // 60 days
var removePunctuation = regexp.MustCompile("[^a-zA-Z0-9 ]+")
func convertToOIDCIDToken(token dbGapIdToken) *oidc.IDToken {
return &oidc.IDToken{
Issuer: token.Issuer,
Subject: token.Subject,
Audience: []string{token.Audience},
Expiry: time.Unix(token.Expiry, 0),
IssuedAt: time.Unix(token.IssuedAt, 0),
Nonce: token.Nonce,
AccessTokenHash: token.AtHash,
}
}
// NewDbGapTranslator creates a new DbGapTranslator with the provided public key. If the tokens
// passed to this translator do not have an audience claim with a value equal to the
// clientID value then they will be rejected.
func NewDbGapTranslator(publicKey, selfIssuer string, signer kms.Signer) (*DbGapTranslator, error)
|
}
t.publicKey = pub
return t, nil
}
// TranslateToken implements the ga4gh.Translator interface.
func (s *DbGapTranslator) TranslateToken(ctx context.Context, auth string) (*ga4gh.Identity, error) {
if err := ga4gh.VerifyTokenWithKey(s.publicKey, auth); err != nil {
return nil, fmt.Errorf("verifying user token signature: %v", err)
}
userInfo, err := s.getURL(dbGapUserInfoURL, auth)
if err != nil {
return nil, fmt.Errorf("getting dbGaP user info: %v", err)
}
if err := ga4gh.VerifyTokenWithKey(s.publicKey, userInfo); err != nil {
return nil, fmt.Errorf("verifying user info token signature: %v", err)
}
passport, err := s.getURL(dbGapPassportURL, auth)
if err != nil {
return nil, fmt.Errorf("getting dbGaP passport: %v", err)
}
if err := ga4gh.VerifyTokenWithKey(s.publicKey, passport); err != nil {
return nil, fmt.Errorf("verifying passport token signature: %v", err)
}
var claims dbGapClaims
var id dbGapIdToken
if err := s.extractClaims(auth, &id, &claims); err != nil {
return nil, fmt.Errorf("extracting user claims: %v", err)
}
if err := s.extractClaims(userInfo, &id, &claims); err != nil {
return nil, fmt.Errorf("extracting user info claims: %v", err)
}
if err := s.extractClaims(passport, &id, &claims); err != nil {
return nil, fmt.Errorf("extracting passport claims: %v", err)
}
return s.translateToken(ctx, convertToOIDCIDToken(id), claims, time.Now())
}
func (s *DbGapTranslator) getURL(url, userTok string) (string, error) {
url = strings.Replace(url, "${TOKEN}", userTok, -1)
get, err := http.Get(url)
if err != nil {
return "", err
}
buf := new(bytes.Buffer)
buf.ReadFrom(get.Body)
body := buf.String()
if get.StatusCode < 200 || get.StatusCode > 299 {
return "", fmt.Errorf("http status %d: %v", get.StatusCode, body)
}
return body, nil
}
func (s *DbGapTranslator) extractClaims(tok string, id *dbGapIdToken, claims *dbGapClaims) error {
parsed, err := jwt.ParseSigned(tok)
if err != nil {
return fmt.Errorf("parsing signed token: %v", err)
}
err = parsed.UnsafeClaimsWithoutVerification(id, claims)
if err != nil {
return fmt.Errorf("extracting claims from token: %v", err)
}
return nil
}
func (s *DbGapTranslator) translateToken(ctx context.Context, token *oidc.IDToken, claims dbGapClaims, now time.Time) (*ga4gh.Identity, error) {
id := ga4gh.Identity{
Issuer: token.Issuer,
Subject: token.Subject,
Expiry: token.Expiry.Unix(),
GivenName: claims.Vcard.GivenName,
FamilyName: claims.Vcard.FamilyName,
Name: strutil.JoinNonEmpty([]string{claims.Vcard.GivenName, claims.Vcard.FamilyName}, " "),
Email: claims.Vcard.Email,
VisaJWTs: []string{},
}
for _, ident := range claims.Identity {
if ident.Authority == eraCommonsAuthority {
if username, ok := ident.ID.(string); ok {
id.Username = username
}
}
}
accessions := make(map[string]dbGapAccess)
type source struct {
orgID string
by string
}
affiliations := make(map[string]source)
for _, p := range claims.DbGapPassport {
for _, a := range p.Access {
if a.Study.Accession == nil {
continue
}
// TODO: Verify that the heuristics for de-duplicating access entries is correct.
ac := *a.Study.Accession
exp := a.Expires
if access, ok := accessions[ac]; ok {
// For duplicate accessions, only keep the one with the later expiry timestamp.
if access.Expires > exp {
continue
}
}
accessions[ac] = dbGapAccess{
Expires: exp,
Issued: a.Issued,
}
}
if p.Org == nil || len(*p.Org) == 0 || p.Role == nil || len(*p.Role) == 0 {
continue
}
var r string
if *p.Role == "pi" || *p.Role == "downloader" {
r = "nih.researcher"
} else {
r = "member"
}
o := removePunctuation.ReplaceAllString(*p.Org, "")
o = strings.ReplaceAll(o, " ", "-")
v := r + "@" + o + ".orgs.nih.gov"
// Does not deal with complex cases where multiple org_DUNS attest to the same
// "value" (v) for Affiliation
|
{
if len(selfIssuer) == 0 {
return nil, fmt.Errorf("NewDbGapTranslator failed, selfIssuer or signingPrivateKey is empty")
}
jku := strings.TrimSuffix(selfIssuer, "/") + "/.well-known/jwks.json"
t := &DbGapTranslator{
visaIssuer: selfIssuer,
visaJKU: jku,
signer: signer,
}
block, _ := pem.Decode([]byte(publicKey))
if block == nil {
return t, nil
}
pub, err := x509.ParsePKCS1PublicKey(block.Bytes)
if err != nil {
return nil, fmt.Errorf("parsing public key: %v", err)
|
identifier_body
|
dbgap.go
|
/user_info.cgi?${TOKEN}"
dbGapPassportURL = "https://dbgap.ncbi.nlm.nih.gov/aa/jwt/user_passport.cgi?${TOKEN}"
eraCommonsAuthority = "eRA"
visaScope = "openid"
fixedKeyID = "kid"
)
// DbGapTranslator is a ga4gh.Translator that converts dbGap identities into GA4GH identities.
type DbGapTranslator struct {
publicKey *rsa.PublicKey
visaIssuer string
visaJKU string
signer kms.Signer
}
type dbGapStudy struct {
Accession *string `json:"accession"`
}
type dbGapAccess struct {
Study dbGapStudy `json:"study"`
Expires int64 `json:"expires"`
Issued int64 `json:"issued"`
}
type dbGapPassport struct {
Access []dbGapAccess `json:"access"`
Org *string `json:"org"`
OrgID *string `json:"org_DUNS"`
Role *string `json:"role"`
SO *string `json:"so"`
}
type dbGapIdentity struct {
Authority string `json:"authority"`
ID interface{} `json:"id"`
}
type vCard struct {
Email string `json:"email"`
GivenName string `json:"fname"`
FamilyName string `json:"lname"`
Orgs []string `json:"orgs"`
Roles []string `json:"roles"`
}
type dbGapClaims struct {
DbGapPassport []dbGapPassport `json:"dbgap_passport"`
Identity []dbGapIdentity `json:"identity"`
Vcard vCard `json:"vcard"`
}
// dbGapIdToken mocks OIDC library's idToken implementation, except minor differences in the types of
// Audience, Expiry, and IssuedAt fields to facilitate JSON unmarshalling.
type dbGapIdToken struct {
Issuer string `json:"iss"`
Subject string `json:"sub"`
Audience string `json:"aud"`
Expiry int64 `json:"exp"`
IssuedAt int64 `json:"iat"`
Nonce string `json:"nonce"`
AtHash string `json:"at_hash"`
}
const validSec = 3600 * 24 * 60 // 60 days
var removePunctuation = regexp.MustCompile("[^a-zA-Z0-9 ]+")
func convertToOIDCIDToken(token dbGapIdToken) *oidc.IDToken {
return &oidc.IDToken{
Issuer: token.Issuer,
Subject: token.Subject,
Audience: []string{token.Audience},
Expiry: time.Unix(token.Expiry, 0),
IssuedAt: time.Unix(token.IssuedAt, 0),
Nonce: token.Nonce,
AccessTokenHash: token.AtHash,
}
}
// NewDbGapTranslator creates a new DbGapTranslator with the provided public key. If the tokens
// passed to this translator do not have an audience claim with a value equal to the
// clientID value then they will be rejected.
func NewDbGapTranslator(publicKey, selfIssuer string, signer kms.Signer) (*DbGapTranslator, error) {
if len(selfIssuer) == 0 {
return nil, fmt.Errorf("NewDbGapTranslator failed, selfIssuer or signingPrivateKey is empty")
}
jku := strings.TrimSuffix(selfIssuer, "/") + "/.well-known/jwks.json"
t := &DbGapTranslator{
visaIssuer: selfIssuer,
visaJKU: jku,
signer: signer,
}
block, _ := pem.Decode([]byte(publicKey))
if block == nil {
return t, nil
}
pub, err := x509.ParsePKCS1PublicKey(block.Bytes)
if err != nil {
return nil, fmt.Errorf("parsing public key: %v", err)
}
|
}
// TranslateToken implements the ga4gh.Translator interface.
func (s *DbGapTranslator) TranslateToken(ctx context.Context, auth string) (*ga4gh.Identity, error) {
if err := ga4gh.VerifyTokenWithKey(s.publicKey, auth); err != nil {
return nil, fmt.Errorf("verifying user token signature: %v", err)
}
userInfo, err := s.getURL(dbGapUserInfoURL, auth)
if err != nil {
return nil, fmt.Errorf("getting dbGaP user info: %v", err)
}
if err := ga4gh.VerifyTokenWithKey(s.publicKey, userInfo); err != nil {
return nil, fmt.Errorf("verifying user info token signature: %v", err)
}
passport, err := s.getURL(dbGapPassportURL, auth)
if err != nil {
return nil, fmt.Errorf("getting dbGaP passport: %v", err)
}
if err := ga4gh.VerifyTokenWithKey(s.publicKey, passport); err != nil {
return nil, fmt.Errorf("verifying passport token signature: %v", err)
}
var claims dbGapClaims
var id dbGapIdToken
if err := s.extractClaims(auth, &id, &claims); err != nil {
return nil, fmt.Errorf("extracting user claims: %v", err)
}
if err := s.extractClaims(userInfo, &id, &claims); err != nil {
return nil, fmt.Errorf("extracting user info claims: %v", err)
}
if err := s.extractClaims(passport, &id, &claims); err != nil {
return nil, fmt.Errorf("extracting passport claims: %v", err)
}
return s.translateToken(ctx, convertToOIDCIDToken(id), claims, time.Now())
}
func (s *DbGapTranslator) getURL(url, userTok string) (string, error) {
url = strings.Replace(url, "${TOKEN}", userTok, -1)
get, err := http.Get(url)
if err != nil {
return "", err
}
buf := new(bytes.Buffer)
buf.ReadFrom(get.Body)
body := buf.String()
if get.StatusCode < 200 || get.StatusCode > 299 {
return "", fmt.Errorf("http status %d: %v", get.StatusCode, body)
}
return body, nil
}
func (s *DbGapTranslator) extractClaims(tok string, id *dbGapIdToken, claims *dbGapClaims) error {
parsed, err := jwt.ParseSigned(tok)
if err != nil {
return fmt.Errorf("parsing signed token: %v", err)
}
err = parsed.UnsafeClaimsWithoutVerification(id, claims)
if err != nil {
return fmt.Errorf("extracting claims from token: %v", err)
}
return nil
}
func (s *DbGapTranslator) translateToken(ctx context.Context, token *oidc.IDToken, claims dbGapClaims, now time.Time) (*ga4gh.Identity, error) {
id := ga4gh.Identity{
Issuer: token.Issuer,
Subject: token.Subject,
Expiry: token.Expiry.Unix(),
GivenName: claims.Vcard.GivenName,
FamilyName: claims.Vcard.FamilyName,
Name: strutil.JoinNonEmpty([]string{claims.Vcard.GivenName, claims.Vcard.FamilyName}, " "),
Email: claims.Vcard.Email,
VisaJWTs: []string{},
}
for _, ident := range claims.Identity {
if ident.Authority == eraCommonsAuthority {
if username, ok := ident.ID.(string); ok {
id.Username = username
}
}
}
accessions := make(map[string]dbGapAccess)
type source struct {
orgID string
by string
}
affiliations := make(map[string]source)
for _, p := range claims.DbGapPassport {
for _, a := range p.Access {
if a.Study.Accession == nil {
continue
}
// TODO: Verify that the heuristics for de-duplicating access entries is correct.
ac := *a.Study.Accession
exp := a.Expires
if access, ok := accessions[ac]; ok {
// For duplicate accessions, only keep the one with the later expiry timestamp.
if access.Expires > exp {
continue
}
}
accessions[ac] = dbGapAccess{
Expires: exp,
Issued: a.Issued,
}
}
if p.Org == nil || len(*p.Org) == 0 || p.Role == nil || len(*p.Role) == 0 {
continue
}
var r string
if *p.Role == "pi" || *p.Role == "downloader" {
r = "nih.researcher"
} else {
r = "member"
}
o := removePunctuation.ReplaceAllString(*p.Org, "")
o = strings.ReplaceAll(o, " ", "-")
v := r + "@" + o + ".orgs.nih.gov"
// Does not deal with complex cases where multiple org_DUNS attest to the same
// "value" (v) for AffiliationAnd
|
t.publicKey = pub
return t, nil
|
random_line_split
|
HandView.py
|
game specific, and methods that help with that
are in HandManagement.py.
Player can arrange their own hand, and prepare to play cards during other players' turns.
"""
def __init__(self, controller, display, ruleset):
self.controller = controller
self.display = display
self.ruleset = ruleset
self.Meld_Threshold = controller._state.rules.Meld_Threshold
self.deal_size = controller._state.rules.Deal_Size
self.help_text = controller._state.rules.help_text
if ruleset == 'Liverpool':
self.buttons_per_player = self.Meld_Threshold[0][0] + self.Meld_Threshold[0][1]
self.RuleSetsButtons = RuleSetsButtons_LP
elif ruleset == 'HandAndFoot':
self.RuleSetsButtons = RuleSetsButtons_HF
self.hand_scaling = (UIC.scale, UIC.Card_Spacing)
self.current_hand = []
self.last_hand = []
self.hand_info = [] # will contain UICardWrapped elements of current_hand
self.prepared_cards = [] # will contain list of prepared cards from controller
self.discards = []
self.discard_confirm = False
# num_wilds is HandAndFoot specific, only non-zero if by prepare_card_btn in HandAndFootButtons.py is triggered.
self.num_wilds = 0
self.wild_cards = []
self.selected_list = []
self.round_index = 0
self.round_advance = False
self.num_players = 1
# In Liverpool and other Shared_Board games: prepare cards buttons must be updated each round
self.need_updated_buttons = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
#
# if someone joins between rounds, then they won't know the correct meld requirement until the round begins.
# (self.controller._state.round = -1 until play commences).
# In HandAndFoot: Correct meld requirement will be written in lower right corner once play commences.
# In Liverpool: Will see correct buttons once round commences.
self.RuleSetsButtons.CreateButtons(self)
def update(self, player_index=0, num_players=1, visible_scards = []):
"""This updates the view of the hand, between rounds it displays a message. """
self.visible_scards = visible_scards
self.controller._state.player_index = player_index
if self.num_players > num_players and self.controller._state.rules.Shared_Board \
and not self.need_updated_buttons:
# A player has left the game after the round has begun -- make adjustments so game can continue.
self.playerLeftGame(num_players)
self.num_players = num_players
if self.controller._state.round == -1:
self.mesgBetweenRounds(self.help_text)
if self.round_advance:
self.round_index = self.round_index + 1
if self.round_index < len(self.Meld_Threshold):
self.help_text[0] = 'This is the round of ' + str(self.Meld_Threshold[self.round_index]) + ' ! '
self.need_updated_buttons = True # used for Liverpool.
else:
self.help_text = ['Game has concluded. Scores for each round can be found in command window.']
self.round_advance = False
else:
if not self.round_index == self.controller._state.round:
# Need this to true up round_index if a player joins mid-game.
skipped_rounds = self.controller._state.round - self.round_index
for idx in range(skipped_rounds):
#todo: How to score latecomers should be moved to ruleset.
score = 0
self.controller.lateJoinScores(score)
self.round_index = self.controller._state.round
self.round_advance = True
# reset outline colors on ready buttons to what they need to be at the start of the "between rounds" state.
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.last_hand = self.current_hand
self.current_hand = self.controller.getHand()
if len(self.current_hand) == 0:
self.hand_info = []
elif not self.last_hand == self.current_hand:
self.hand_info = HandManagement.WrapHand(self, self.current_hand, self.hand_info)
HandManagement.ShowHolding(self, self.hand_info) # displays hand
self.RuleSetsButtons.ButtonDisplay(self)
def nextEventWildsOnBoard(self):
"""This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.
It is looking for key strokes to designate ambiguous wild cards in runs.
The mouse is ignored until you designate all the wilds (turn phase goes back to play)."""
if self.controller._state.rules.Shared_Board and self.num_wilds > 0:
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
else:
# in Shared_Board games, check if there are wilds that need to be updated.
# All other events are ignored until play is finished.
HandManagement.wildsHiLoGetInput(self)
def nextEvent(self):
"""This submits the next user input to the controller,
In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything
unless designating values for prepared wild cards, at which time the mouse is ignored
unless you want to clear the prepared cards.
In games with Shared_Board = True wilds on board might change designation upon other cards being played.
IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then
it must be designated before play is completed.
This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled."""
if self.controller._state.rules.Shared_Board:
self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())
if self.num_wilds > 0:
self.nextEventWildsOnBoard()
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
wild_instructions = 'Use the keyboard to designate your prepared wild cards \r\n '
wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).'
self.controller.note = wild_instructions
pos = pygame.mouse.get_pos()
if self.event.type == pygame.MOUSEBUTTONDOWN:
self.RuleSetsButtons.ClickedButton(self, pos)
for element in self.hand_info:
# cannot select prepared cards, so not included in logic below.
if element.img_clickable.isOver(pos):
if element.status == 1:
element.status = 0
element.img_clickable.changeOutline(0)
elif element.status == 0:
element.status = 1
element.img_clickable.changeOutline(2)
elif self.event.type == pygame.MOUSEMOTION:
self.RuleSetsButtons.MouseHiLight(self, pos)
HandManagement.MouseHiLight(self.hand_info, pos)
elif self.event.type == pygame.KEYDOWN:
if self.controller._state.rules.Buy_Option:
if self.controller.buying_opportunity:
if self.event.key == pygame.K_y:
self.controller.wantTopCard(True)
self.controller.note = 'You have signaled you want to buy the card.'
elif self.event.key == pygame.K_n:
self.controller.wantTopCard(False)
self.controller.note = 'You have signaled you do not want to buy the card.'
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
HandManagement.ManuallyAssign(self)
def gatherSelected(self):
|
def discardConfirmation(self, confirmed, wrapped_discards):
""" Confirm a user is sure about a discard and then perform it once confirmed."""
discards = []
for element in wrapped_discards:
discards.append(element.card)
if self.discards != discards:
confirmed = False
self.discards = discards
if not confirmed:
self.controller.note = "Please confirm - discard " + "{0}".format(self.discards)
return True # ask for confirmation
else:
# confirmed is True, performing discard and removing discarded wrapped cards from hand_info.
if self.discard_confirm:
controller_response = self.controller.discard(self.discards)
if controller_response:
for element in wrapped_discards:
self.hand_info.remove(element)
return False # now that this is done, we don't have anything waiting on confirmation
def mesgBetweenRounds(self, message):
"""print message where cards usually displayed until Ready button is clicked for next round."""
font = UIC.Medium_Text
|
""" gathers selected cards
in order to take action on selected cards (either discarding them or preparing them)
"""
self.selected_list = []
for element in self.hand_info:
if element.status == 1:
self.selected_list.append(element)
return self.selected_list
|
identifier_body
|
HandView.py
|
game specific, and methods that help with that
are in HandManagement.py.
Player can arrange their own hand, and prepare to play cards during other players' turns.
"""
|
self.Meld_Threshold = controller._state.rules.Meld_Threshold
self.deal_size = controller._state.rules.Deal_Size
self.help_text = controller._state.rules.help_text
if ruleset == 'Liverpool':
self.buttons_per_player = self.Meld_Threshold[0][0] + self.Meld_Threshold[0][1]
self.RuleSetsButtons = RuleSetsButtons_LP
elif ruleset == 'HandAndFoot':
self.RuleSetsButtons = RuleSetsButtons_HF
self.hand_scaling = (UIC.scale, UIC.Card_Spacing)
self.current_hand = []
self.last_hand = []
self.hand_info = [] # will contain UICardWrapped elements of current_hand
self.prepared_cards = [] # will contain list of prepared cards from controller
self.discards = []
self.discard_confirm = False
# num_wilds is HandAndFoot specific, only non-zero if by prepare_card_btn in HandAndFootButtons.py is triggered.
self.num_wilds = 0
self.wild_cards = []
self.selected_list = []
self.round_index = 0
self.round_advance = False
self.num_players = 1
# In Liverpool and other Shared_Board games: prepare cards buttons must be updated each round
self.need_updated_buttons = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
#
# if someone joins between rounds, then they won't know the correct meld requirement until the round begins.
# (self.controller._state.round = -1 until play commences).
# In HandAndFoot: Correct meld requirement will be written in lower right corner once play commences.
# In Liverpool: Will see correct buttons once round commences.
self.RuleSetsButtons.CreateButtons(self)
def update(self, player_index=0, num_players=1, visible_scards = []):
"""This updates the view of the hand, between rounds it displays a message. """
self.visible_scards = visible_scards
self.controller._state.player_index = player_index
if self.num_players > num_players and self.controller._state.rules.Shared_Board \
and not self.need_updated_buttons:
# A player has left the game after the round has begun -- make adjustments so game can continue.
self.playerLeftGame(num_players)
self.num_players = num_players
if self.controller._state.round == -1:
self.mesgBetweenRounds(self.help_text)
if self.round_advance:
self.round_index = self.round_index + 1
if self.round_index < len(self.Meld_Threshold):
self.help_text[0] = 'This is the round of ' + str(self.Meld_Threshold[self.round_index]) + ' ! '
self.need_updated_buttons = True # used for Liverpool.
else:
self.help_text = ['Game has concluded. Scores for each round can be found in command window.']
self.round_advance = False
else:
if not self.round_index == self.controller._state.round:
# Need this to true up round_index if a player joins mid-game.
skipped_rounds = self.controller._state.round - self.round_index
for idx in range(skipped_rounds):
#todo: How to score latecomers should be moved to ruleset.
score = 0
self.controller.lateJoinScores(score)
self.round_index = self.controller._state.round
self.round_advance = True
# reset outline colors on ready buttons to what they need to be at the start of the "between rounds" state.
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.last_hand = self.current_hand
self.current_hand = self.controller.getHand()
if len(self.current_hand) == 0:
self.hand_info = []
elif not self.last_hand == self.current_hand:
self.hand_info = HandManagement.WrapHand(self, self.current_hand, self.hand_info)
HandManagement.ShowHolding(self, self.hand_info) # displays hand
self.RuleSetsButtons.ButtonDisplay(self)
def nextEventWildsOnBoard(self):
"""This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.
It is looking for key strokes to designate ambiguous wild cards in runs.
The mouse is ignored until you designate all the wilds (turn phase goes back to play)."""
if self.controller._state.rules.Shared_Board and self.num_wilds > 0:
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
else:
# in Shared_Board games, check if there are wilds that need to be updated.
# All other events are ignored until play is finished.
HandManagement.wildsHiLoGetInput(self)
def nextEvent(self):
"""This submits the next user input to the controller,
In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything
unless designating values for prepared wild cards, at which time the mouse is ignored
unless you want to clear the prepared cards.
In games with Shared_Board = True wilds on board might change designation upon other cards being played.
IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then
it must be designated before play is completed.
This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled."""
if self.controller._state.rules.Shared_Board:
self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())
if self.num_wilds > 0:
self.nextEventWildsOnBoard()
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
wild_instructions = 'Use the keyboard to designate your prepared wild cards \r\n '
wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).'
self.controller.note = wild_instructions
pos = pygame.mouse.get_pos()
if self.event.type == pygame.MOUSEBUTTONDOWN:
self.RuleSetsButtons.ClickedButton(self, pos)
for element in self.hand_info:
# cannot select prepared cards, so not included in logic below.
if element.img_clickable.isOver(pos):
if element.status == 1:
element.status = 0
element.img_clickable.changeOutline(0)
elif element.status == 0:
element.status = 1
element.img_clickable.changeOutline(2)
elif self.event.type == pygame.MOUSEMOTION:
self.RuleSetsButtons.MouseHiLight(self, pos)
HandManagement.MouseHiLight(self.hand_info, pos)
elif self.event.type == pygame.KEYDOWN:
if self.controller._state.rules.Buy_Option:
if self.controller.buying_opportunity:
if self.event.key == pygame.K_y:
self.controller.wantTopCard(True)
self.controller.note = 'You have signaled you want to buy the card.'
elif self.event.key == pygame.K_n:
self.controller.wantTopCard(False)
self.controller.note = 'You have signaled you do not want to buy the card.'
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
HandManagement.ManuallyAssign(self)
def gatherSelected(self):
""" gathers selected cards
in order to take action on selected cards (either discarding them or preparing them)
"""
self.selected_list = []
for element in self.hand_info:
if element.status == 1:
self.selected_list.append(element)
return self.selected_list
def discardConfirmation(self, confirmed, wrapped_discards):
""" Confirm a user is sure about a discard and then perform it once confirmed."""
discards = []
for element in wrapped_discards:
discards.append(element.card)
if self.discards != discards:
confirmed = False
self.discards = discards
if not confirmed:
self.controller.note = "Please confirm - discard " + "{0}".format(self.discards)
return True # ask for confirmation
else:
# confirmed is True, performing discard and removing discarded wrapped cards from hand_info.
if self.discard_confirm:
controller_response = self.controller.discard(self.discards)
if controller_response:
for element in wrapped_discards:
self.hand_info.remove(element)
return False # now that this is done, we don't have anything waiting on confirmation
def mesgBetweenRounds(self, message):
"""print message where cards usually displayed until Ready button is clicked for next round."""
font = UIC.Medium_Text
y
|
def __init__(self, controller, display, ruleset):
self.controller = controller
self.display = display
self.ruleset = ruleset
|
random_line_split
|
HandView.py
|
game specific, and methods that help with that
are in HandManagement.py.
Player can arrange their own hand, and prepare to play cards during other players' turns.
"""
def
|
(self, controller, display, ruleset):
self.controller = controller
self.display = display
self.ruleset = ruleset
self.Meld_Threshold = controller._state.rules.Meld_Threshold
self.deal_size = controller._state.rules.Deal_Size
self.help_text = controller._state.rules.help_text
if ruleset == 'Liverpool':
self.buttons_per_player = self.Meld_Threshold[0][0] + self.Meld_Threshold[0][1]
self.RuleSetsButtons = RuleSetsButtons_LP
elif ruleset == 'HandAndFoot':
self.RuleSetsButtons = RuleSetsButtons_HF
self.hand_scaling = (UIC.scale, UIC.Card_Spacing)
self.current_hand = []
self.last_hand = []
self.hand_info = [] # will contain UICardWrapped elements of current_hand
self.prepared_cards = [] # will contain list of prepared cards from controller
self.discards = []
self.discard_confirm = False
# num_wilds is HandAndFoot specific, only non-zero if by prepare_card_btn in HandAndFootButtons.py is triggered.
self.num_wilds = 0
self.wild_cards = []
self.selected_list = []
self.round_index = 0
self.round_advance = False
self.num_players = 1
# In Liverpool and other Shared_Board games: prepare cards buttons must be updated each round
self.need_updated_buttons = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
#
# if someone joins between rounds, then they won't know the correct meld requirement until the round begins.
# (self.controller._state.round = -1 until play commences).
# In HandAndFoot: Correct meld requirement will be written in lower right corner once play commences.
# In Liverpool: Will see correct buttons once round commences.
self.RuleSetsButtons.CreateButtons(self)
def update(self, player_index=0, num_players=1, visible_scards = []):
"""This updates the view of the hand, between rounds it displays a message. """
self.visible_scards = visible_scards
self.controller._state.player_index = player_index
if self.num_players > num_players and self.controller._state.rules.Shared_Board \
and not self.need_updated_buttons:
# A player has left the game after the round has begun -- make adjustments so game can continue.
self.playerLeftGame(num_players)
self.num_players = num_players
if self.controller._state.round == -1:
self.mesgBetweenRounds(self.help_text)
if self.round_advance:
self.round_index = self.round_index + 1
if self.round_index < len(self.Meld_Threshold):
self.help_text[0] = 'This is the round of ' + str(self.Meld_Threshold[self.round_index]) + ' ! '
self.need_updated_buttons = True # used for Liverpool.
else:
self.help_text = ['Game has concluded. Scores for each round can be found in command window.']
self.round_advance = False
else:
if not self.round_index == self.controller._state.round:
# Need this to true up round_index if a player joins mid-game.
skipped_rounds = self.controller._state.round - self.round_index
for idx in range(skipped_rounds):
#todo: How to score latecomers should be moved to ruleset.
score = 0
self.controller.lateJoinScores(score)
self.round_index = self.controller._state.round
self.round_advance = True
# reset outline colors on ready buttons to what they need to be at the start of the "between rounds" state.
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.last_hand = self.current_hand
self.current_hand = self.controller.getHand()
if len(self.current_hand) == 0:
self.hand_info = []
elif not self.last_hand == self.current_hand:
self.hand_info = HandManagement.WrapHand(self, self.current_hand, self.hand_info)
HandManagement.ShowHolding(self, self.hand_info) # displays hand
self.RuleSetsButtons.ButtonDisplay(self)
def nextEventWildsOnBoard(self):
"""This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.
It is looking for key strokes to designate ambiguous wild cards in runs.
The mouse is ignored until you designate all the wilds (turn phase goes back to play)."""
if self.controller._state.rules.Shared_Board and self.num_wilds > 0:
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
else:
# in Shared_Board games, check if there are wilds that need to be updated.
# All other events are ignored until play is finished.
HandManagement.wildsHiLoGetInput(self)
def nextEvent(self):
"""This submits the next user input to the controller,
In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything
unless designating values for prepared wild cards, at which time the mouse is ignored
unless you want to clear the prepared cards.
In games with Shared_Board = True wilds on board might change designation upon other cards being played.
IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then
it must be designated before play is completed.
This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled."""
if self.controller._state.rules.Shared_Board:
self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())
if self.num_wilds > 0:
self.nextEventWildsOnBoard()
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
wild_instructions = 'Use the keyboard to designate your prepared wild cards \r\n '
wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).'
self.controller.note = wild_instructions
pos = pygame.mouse.get_pos()
if self.event.type == pygame.MOUSEBUTTONDOWN:
self.RuleSetsButtons.ClickedButton(self, pos)
for element in self.hand_info:
# cannot select prepared cards, so not included in logic below.
if element.img_clickable.isOver(pos):
if element.status == 1:
element.status = 0
element.img_clickable.changeOutline(0)
elif element.status == 0:
element.status = 1
element.img_clickable.changeOutline(2)
elif self.event.type == pygame.MOUSEMOTION:
self.RuleSetsButtons.MouseHiLight(self, pos)
HandManagement.MouseHiLight(self.hand_info, pos)
elif self.event.type == pygame.KEYDOWN:
if self.controller._state.rules.Buy_Option:
if self.controller.buying_opportunity:
if self.event.key == pygame.K_y:
self.controller.wantTopCard(True)
self.controller.note = 'You have signaled you want to buy the card.'
elif self.event.key == pygame.K_n:
self.controller.wantTopCard(False)
self.controller.note = 'You have signaled you do not want to buy the card.'
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
HandManagement.ManuallyAssign(self)
def gatherSelected(self):
""" gathers selected cards
in order to take action on selected cards (either discarding them or preparing them)
"""
self.selected_list = []
for element in self.hand_info:
if element.status == 1:
self.selected_list.append(element)
return self.selected_list
def discardConfirmation(self, confirmed, wrapped_discards):
""" Confirm a user is sure about a discard and then perform it once confirmed."""
discards = []
for element in wrapped_discards:
discards.append(element.card)
if self.discards != discards:
confirmed = False
self.discards = discards
if not confirmed:
self.controller.note = "Please confirm - discard " + "{0}".format(self.discards)
return True # ask for confirmation
else:
# confirmed is True, performing discard and removing discarded wrapped cards from hand_info.
if self.discard_confirm:
controller_response = self.controller.discard(self.discards)
if controller_response:
for element in wrapped_discards:
self.hand_info.remove(element)
return False # now that this is done, we don't have anything waiting on confirmation
def mesgBetweenRounds(self, message):
"""print message where cards usually displayed until Ready button is clicked for next round."""
font = UIC.Medium_Text
|
__init__
|
identifier_name
|
HandView.py
|
game specific, and methods that help with that
are in HandManagement.py.
Player can arrange their own hand, and prepare to play cards during other players' turns.
"""
def __init__(self, controller, display, ruleset):
self.controller = controller
self.display = display
self.ruleset = ruleset
self.Meld_Threshold = controller._state.rules.Meld_Threshold
self.deal_size = controller._state.rules.Deal_Size
self.help_text = controller._state.rules.help_text
if ruleset == 'Liverpool':
self.buttons_per_player = self.Meld_Threshold[0][0] + self.Meld_Threshold[0][1]
self.RuleSetsButtons = RuleSetsButtons_LP
elif ruleset == 'HandAndFoot':
self.RuleSetsButtons = RuleSetsButtons_HF
self.hand_scaling = (UIC.scale, UIC.Card_Spacing)
self.current_hand = []
self.last_hand = []
self.hand_info = [] # will contain UICardWrapped elements of current_hand
self.prepared_cards = [] # will contain list of prepared cards from controller
self.discards = []
self.discard_confirm = False
# num_wilds is HandAndFoot specific, only non-zero if by prepare_card_btn in HandAndFootButtons.py is triggered.
self.num_wilds = 0
self.wild_cards = []
self.selected_list = []
self.round_index = 0
self.round_advance = False
self.num_players = 1
# In Liverpool and other Shared_Board games: prepare cards buttons must be updated each round
self.need_updated_buttons = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
#
# if someone joins between rounds, then they won't know the correct meld requirement until the round begins.
# (self.controller._state.round = -1 until play commences).
# In HandAndFoot: Correct meld requirement will be written in lower right corner once play commences.
# In Liverpool: Will see correct buttons once round commences.
self.RuleSetsButtons.CreateButtons(self)
def update(self, player_index=0, num_players=1, visible_scards = []):
"""This updates the view of the hand, between rounds it displays a message. """
self.visible_scards = visible_scards
self.controller._state.player_index = player_index
if self.num_players > num_players and self.controller._state.rules.Shared_Board \
and not self.need_updated_buttons:
# A player has left the game after the round has begun -- make adjustments so game can continue.
self.playerLeftGame(num_players)
self.num_players = num_players
if self.controller._state.round == -1:
self.mesgBetweenRounds(self.help_text)
if self.round_advance:
self.round_index = self.round_index + 1
if self.round_index < len(self.Meld_Threshold):
self.help_text[0] = 'This is the round of ' + str(self.Meld_Threshold[self.round_index]) + ' ! '
self.need_updated_buttons = True # used for Liverpool.
else:
self.help_text = ['Game has concluded. Scores for each round can be found in command window.']
self.round_advance = False
else:
if not self.round_index == self.controller._state.round:
# Need this to true up round_index if a player joins mid-game.
skipped_rounds = self.controller._state.round - self.round_index
for idx in range(skipped_rounds):
#todo: How to score latecomers should be moved to ruleset.
score = 0
self.controller.lateJoinScores(score)
self.round_index = self.controller._state.round
self.round_advance = True
# reset outline colors on ready buttons to what they need to be at the start of the "between rounds" state.
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.last_hand = self.current_hand
self.current_hand = self.controller.getHand()
if len(self.current_hand) == 0:
self.hand_info = []
elif not self.last_hand == self.current_hand:
self.hand_info = HandManagement.WrapHand(self, self.current_hand, self.hand_info)
HandManagement.ShowHolding(self, self.hand_info) # displays hand
self.RuleSetsButtons.ButtonDisplay(self)
def nextEventWildsOnBoard(self):
"""This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.
It is looking for key strokes to designate ambiguous wild cards in runs.
The mouse is ignored until you designate all the wilds (turn phase goes back to play)."""
if self.controller._state.rules.Shared_Board and self.num_wilds > 0:
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
else:
# in Shared_Board games, check if there are wilds that need to be updated.
# All other events are ignored until play is finished.
HandManagement.wildsHiLoGetInput(self)
def nextEvent(self):
"""This submits the next user input to the controller,
In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything
unless designating values for prepared wild cards, at which time the mouse is ignored
unless you want to clear the prepared cards.
In games with Shared_Board = True wilds on board might change designation upon other cards being played.
IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then
it must be designated before play is completed.
This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled."""
if self.controller._state.rules.Shared_Board:
self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())
if self.num_wilds > 0:
self.nextEventWildsOnBoard()
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
wild_instructions = 'Use the keyboard to designate your prepared wild cards \r\n '
wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).'
self.controller.note = wild_instructions
pos = pygame.mouse.get_pos()
if self.event.type == pygame.MOUSEBUTTONDOWN:
|
elif self.event.type == pygame.MOUSEMOTION:
self.RuleSetsButtons.MouseHiLight(self, pos)
HandManagement.MouseHiLight(self.hand_info, pos)
elif self.event.type == pygame.KEYDOWN:
if self.controller._state.rules.Buy_Option:
if self.controller.buying_opportunity:
if self.event.key == pygame.K_y:
self.controller.wantTopCard(True)
self.controller.note = 'You have signaled you want to buy the card.'
elif self.event.key == pygame.K_n:
self.controller.wantTopCard(False)
self.controller.note = 'You have signaled you do not want to buy the card.'
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
HandManagement.ManuallyAssign(self)
def gatherSelected(self):
""" gathers selected cards
in order to take action on selected cards (either discarding them or preparing them)
"""
self.selected_list = []
for element in self.hand_info:
if element.status == 1:
self.selected_list.append(element)
return self.selected_list
def discardConfirmation(self, confirmed, wrapped_discards):
""" Confirm a user is sure about a discard and then perform it once confirmed."""
discards = []
for element in wrapped_discards:
discards.append(element.card)
if self.discards != discards:
confirmed = False
self.discards = discards
if not confirmed:
self.controller.note = "Please confirm - discard " + "{0}".format(self.discards)
return True # ask for confirmation
else:
# confirmed is True, performing discard and removing discarded wrapped cards from hand_info.
if self.discard_confirm:
controller_response = self.controller.discard(self.discards)
if controller_response:
for element in wrapped_discards:
self.hand_info.remove(element)
return False # now that this is done, we don't have anything waiting on confirmation
def mesgBetweenRounds(self, message):
"""print message where cards usually displayed until Ready button is clicked for next round."""
font = UIC.Medium_Text
|
self.RuleSetsButtons.ClickedButton(self, pos)
for element in self.hand_info:
# cannot select prepared cards, so not included in logic below.
if element.img_clickable.isOver(pos):
if element.status == 1:
element.status = 0
element.img_clickable.changeOutline(0)
elif element.status == 0:
element.status = 1
element.img_clickable.changeOutline(2)
|
conditional_block
|
ft_cityscapes.py
|
_fwavacc_0.91834.pth', # empty string denotes no snapshot
'snapshot':'epoch_42_loss_0.00916_acc_0.95598_acc-cls_0.58651_mean-iu_0.50990.pth',
'print_freq': 30,
'val_batch_size': 16,
'val_save_to_img_file': False,
'val_img_sample_rate': 0.05 # randomly sample some validation results to display
}
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
|
def parse_args():
parser = argparse.ArgumentParser(description='Games Semantic Segmentation FCN8')
parser.add_argument('--gpu', type=str, default='0,1', help='gpu id')
parser.add_argument('--epochs', type=int, default=50, help='number of rpochs to run')
parser.add_argument('--seed', type=int, default=47, help='seed for training')
args = parser.parse_args()
return args
def main():
# args = parse_args()
torch.backends.cudnn.benchmark = True
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
# # if args.seed:
# random.seed(args.seed)
# np.random.seed(args.seed)
# torch.manual_seed(args.seed)
# # if args.gpu:
# torch.cuda.manual_seed_all(args.seed)
seed = 63
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# if args.gpu:
torch.cuda.manual_seed_all(seed)
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# train_transforms = transforms.Compose([
# transforms.RandomCrop(args['crop_size']),
# transforms.RandomRotation(90),
# transforms.RandomHorizontalFlip(p=0.5),
# transforms.RandomVerticalFlip(p=0.5),
# ])
short_size = int(min(args['input_size']) / 0.875)
# val_transforms = transforms.Compose([
# transforms.Scale(short_size, interpolation=Image.NEAREST),
# # joint_transforms.Scale(short_size),
# transforms.CenterCrop(args['input_size'])
# ])
train_joint_transform = joint_transforms.Compose([
# joint_transforms.Scale(short_size),
joint_transforms.RandomCrop(args['crop_size']),
joint_transforms.RandomHorizontallyFlip(),
joint_transforms.RandomRotate(90)
])
val_joint_transform = joint_transforms.Compose([
joint_transforms.Scale(short_size),
joint_transforms.CenterCrop(args['input_size'])
])
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(*mean_std)
])
target_transform = extended_transforms.MaskToTensor()
restore_transform = transforms.Compose([
extended_transforms.DeNormalize(*mean_std),
transforms.ToPILImage()
])
visualize = transforms.ToTensor()
train_set = cityscapes.CityScapes('train', joint_transform=train_joint_transform,
transform=input_transform, target_transform=target_transform)
# train_set = cityscapes.CityScapes('train', transform=train_transforms)
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=8, shuffle=True)
val_set = cityscapes.CityScapes('val', joint_transform=val_joint_transform, transform=input_transform,
target_transform=target_transform)
# val_set = cityscapes.CityScapes('val', transform=val_transforms)
val_loader = DataLoader(val_set, batch_size=args['val_batch_size'], num_workers=8, shuffle=True)
print(len(train_loader), len(val_loader))
# sdf
vgg_model = VGGNet(requires_grad=True, remove_fc=True)
net = FCN8s(pretrained_net=vgg_model, n_class=cityscapes.num_classes, dropout_rate=0.4)
# net.apply(init_weights)
criterion = nn.CrossEntropyLoss(ignore_index=cityscapes.ignore_label)
optimizer = optim.Adam(net.parameters(), lr=1e-4)
check_mkdir(ft_ckpt_path)
check_mkdir(os.path.join(ft_ckpt_path, ft_exp_name))
open(os.path.join(ft_ckpt_path, ft_exp_name, str(datetime.datetime.now()) + '.txt'), 'w').write(str(args) + '\n\n')
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=args['lr_patience'], min_lr=1e-10)
vgg_model = vgg_model.to(device)
net = net.to(device)
if torch.cuda.device_count()>1:
net = nn.DataParallel(net)
# if len(args['snapshot']) == 0:
# curr_epoch = 1
# args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0}
# else:
# print('training resumes from ' + args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'])))
# split_snapshot = args['snapshot'].split('_')
# curr_epoch = int(split_snapshot[1]) + 1
curr_epoch = 1
args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0}
# args['best_record'] = {'epoch': int(split_snapshot[1]), 'val_loss': float(split_snapshot[3]),
# 'acc': float(split_snapshot[5]), 'acc_cls': float(split_snapshot[7]),
# 'mean_iu': float(split_snapshot[9][:-4])}
criterion.to(device)
for epoch in range(curr_epoch, args['epoch_num'] + 1):
train(train_loader, net, device, criterion, optimizer, epoch, args)
val_loss = validate(val_loader, net, device, criterion, optimizer, epoch, args, restore_transform, visualize)
scheduler.step(val_loss)
def train(train_loader, net, device, criterion, optimizer, epoch, train_args):
net.train()
train_loss = AverageMeter()
curr_iter = (epoch - 1) * len(train_loader)
targets_all, preds_all = [], []
for i, data in enumerate(train_loader):
inputs, targets = data
assert inputs.size()[2:] == targets.size()[1:]
N = inputs.size(0)
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
assert outputs.size()[2:] == targets.size()[1:]
assert outputs.size()[1] == cityscapes.num_classes
loss = criterion(outputs, targets) / N
loss.backward()
optimizer.step()
train_loss.update(loss.data, N)
targets_all.append(targets.data.cpu().numpy())
preds_all.append(outputs.data.max(1)[1].squeeze_(1).cpu().numpy())
curr_iter += 1
writer.add_scalar('train_loss', train_loss.avg, curr_iter)
if (i + 1) % train_args['print_freq'] == 0:
print('[epoch %d], [iter %d / %d], [train loss %.5f]' % (
epoch, i + 1, len(train_loader), train_loss.avg))
targets_all = np.concatenate(targets_all)
preds_all = np.concatenate(preds_all)
acc, acc_cls, mean_iou, _ = evaluate(preds_all, targets_all, cityscapes.num_classes)
print('-----------------------------------------------------------------------------------------------------------')
print('[epoch %d], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f]' % (
epoch, acc, acc_cls, mean_iou))
def validate(val_loader, net, device, criterion, optimizer, epoch, train_args, restore, visualize, finetuning=True):
net.eval()
val_loss = AverageMeter()
inputs_all, gts_all, predictions_all = [], [], []
with torch.no_grad():
for vi, data in enumerate(val_loader):
inputs, gts = data
N = inputs.size(0)
inputs, gts = inputs.to(device), gts.to(device)
outputs = net(inputs)
predictions = outputs.data.max(1)[1].squeeze_(1).cpu().numpy()
val_loss.update(criterion(outputs, gts).data / N, N)
for i in inputs:
if random.random() > train_args['val_img_sample_rate']:
inputs_all.append(None)
else:
inputs_all.append(i.data.cpu())
gts_all.append(gts.data.cpu().numpy())
predictions_all.append(predictions)
gts_all = np.concatenate(gts_all)
predictions_all = np.concatenate(predictions_all)
acc, acc
|
torch.nn.init.xavier_uniform_(m.weight.data)
# torch.nn.init.xavier_uniform(m.bias.data, 0)
nn.init.constant_(m.bias, 0)
|
conditional_block
|
ft_cityscapes.py
|
_fwavacc_0.91834.pth', # empty string denotes no snapshot
'snapshot':'epoch_42_loss_0.00916_acc_0.95598_acc-cls_0.58651_mean-iu_0.50990.pth',
'print_freq': 30,
'val_batch_size': 16,
'val_save_to_img_file': False,
'val_img_sample_rate': 0.05 # randomly sample some validation results to display
}
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
torch.nn.init.xavier_uniform_(m.weight.data)
# torch.nn.init.xavier_uniform(m.bias.data, 0)
nn.init.constant_(m.bias, 0)
def parse_args():
parser = argparse.ArgumentParser(description='Games Semantic Segmentation FCN8')
parser.add_argument('--gpu', type=str, default='0,1', help='gpu id')
parser.add_argument('--epochs', type=int, default=50, help='number of rpochs to run')
parser.add_argument('--seed', type=int, default=47, help='seed for training')
args = parser.parse_args()
return args
def main():
# args = parse_args()
|
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# train_transforms = transforms.Compose([
# transforms.RandomCrop(args['crop_size']),
# transforms.RandomRotation(90),
# transforms.RandomHorizontalFlip(p=0.5),
# transforms.RandomVerticalFlip(p=0.5),
# ])
short_size = int(min(args['input_size']) / 0.875)
# val_transforms = transforms.Compose([
# transforms.Scale(short_size, interpolation=Image.NEAREST),
# # joint_transforms.Scale(short_size),
# transforms.CenterCrop(args['input_size'])
# ])
train_joint_transform = joint_transforms.Compose([
# joint_transforms.Scale(short_size),
joint_transforms.RandomCrop(args['crop_size']),
joint_transforms.RandomHorizontallyFlip(),
joint_transforms.RandomRotate(90)
])
val_joint_transform = joint_transforms.Compose([
joint_transforms.Scale(short_size),
joint_transforms.CenterCrop(args['input_size'])
])
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(*mean_std)
])
target_transform = extended_transforms.MaskToTensor()
restore_transform = transforms.Compose([
extended_transforms.DeNormalize(*mean_std),
transforms.ToPILImage()
])
visualize = transforms.ToTensor()
train_set = cityscapes.CityScapes('train', joint_transform=train_joint_transform,
transform=input_transform, target_transform=target_transform)
# train_set = cityscapes.CityScapes('train', transform=train_transforms)
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=8, shuffle=True)
val_set = cityscapes.CityScapes('val', joint_transform=val_joint_transform, transform=input_transform,
target_transform=target_transform)
# val_set = cityscapes.CityScapes('val', transform=val_transforms)
val_loader = DataLoader(val_set, batch_size=args['val_batch_size'], num_workers=8, shuffle=True)
print(len(train_loader), len(val_loader))
# sdf
vgg_model = VGGNet(requires_grad=True, remove_fc=True)
net = FCN8s(pretrained_net=vgg_model, n_class=cityscapes.num_classes, dropout_rate=0.4)
# net.apply(init_weights)
criterion = nn.CrossEntropyLoss(ignore_index=cityscapes.ignore_label)
optimizer = optim.Adam(net.parameters(), lr=1e-4)
check_mkdir(ft_ckpt_path)
check_mkdir(os.path.join(ft_ckpt_path, ft_exp_name))
open(os.path.join(ft_ckpt_path, ft_exp_name, str(datetime.datetime.now()) + '.txt'), 'w').write(str(args) + '\n\n')
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=args['lr_patience'], min_lr=1e-10)
vgg_model = vgg_model.to(device)
net = net.to(device)
if torch.cuda.device_count()>1:
net = nn.DataParallel(net)
# if len(args['snapshot']) == 0:
# curr_epoch = 1
# args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0}
# else:
# print('training resumes from ' + args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'])))
# split_snapshot = args['snapshot'].split('_')
# curr_epoch = int(split_snapshot[1]) + 1
curr_epoch = 1
args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0}
# args['best_record'] = {'epoch': int(split_snapshot[1]), 'val_loss': float(split_snapshot[3]),
# 'acc': float(split_snapshot[5]), 'acc_cls': float(split_snapshot[7]),
# 'mean_iu': float(split_snapshot[9][:-4])}
criterion.to(device)
for epoch in range(curr_epoch, args['epoch_num'] + 1):
train(train_loader, net, device, criterion, optimizer, epoch, args)
val_loss = validate(val_loader, net, device, criterion, optimizer, epoch, args, restore_transform, visualize)
scheduler.step(val_loss)
def train(train_loader, net, device, criterion, optimizer, epoch, train_args):
net.train()
train_loss = AverageMeter()
curr_iter = (epoch - 1) * len(train_loader)
targets_all, preds_all = [], []
for i, data in enumerate(train_loader):
inputs, targets = data
assert inputs.size()[2:] == targets.size()[1:]
N = inputs.size(0)
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
assert outputs.size()[2:] == targets.size()[1:]
assert outputs.size()[1] == cityscapes.num_classes
loss = criterion(outputs, targets) / N
loss.backward()
optimizer.step()
train_loss.update(loss.data, N)
targets_all.append(targets.data.cpu().numpy())
preds_all.append(outputs.data.max(1)[1].squeeze_(1).cpu().numpy())
curr_iter += 1
writer.add_scalar('train_loss', train_loss.avg, curr_iter)
if (i + 1) % train_args['print_freq'] == 0:
print('[epoch %d], [iter %d / %d], [train loss %.5f]' % (
epoch, i + 1, len(train_loader), train_loss.avg))
targets_all = np.concatenate(targets_all)
preds_all = np.concatenate(preds_all)
acc, acc_cls, mean_iou, _ = evaluate(preds_all, targets_all, cityscapes.num_classes)
print('-----------------------------------------------------------------------------------------------------------')
print('[epoch %d], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f]' % (
epoch, acc, acc_cls, mean_iou))
def validate(val_loader, net, device, criterion, optimizer, epoch, train_args, restore, visualize, finetuning=True):
net.eval()
val_loss = AverageMeter()
inputs_all, gts_all, predictions_all = [], [], []
with torch.no_grad():
for vi, data in enumerate(val_loader):
inputs, gts = data
N = inputs.size(0)
inputs, gts = inputs.to(device), gts.to(device)
outputs = net(inputs)
predictions = outputs.data.max(1)[1].squeeze_(1).cpu().numpy()
val_loss.update(criterion(outputs, gts).data / N, N)
for i in inputs:
if random.random() > train_args['val_img_sample_rate']:
inputs_all.append(None)
else:
inputs_all.append(i.data.cpu())
gts_all.append(gts.data.cpu().numpy())
predictions_all.append(predictions)
gts_all = np.concatenate(gts_all)
predictions_all = np.concatenate(predictions_all)
acc, acc
|
torch.backends.cudnn.benchmark = True
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
# # if args.seed:
# random.seed(args.seed)
# np.random.seed(args.seed)
# torch.manual_seed(args.seed)
# # if args.gpu:
# torch.cuda.manual_seed_all(args.seed)
seed = 63
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# if args.gpu:
torch.cuda.manual_seed_all(seed)
|
identifier_body
|
ft_cityscapes.py
|
_fwavacc_0.91834.pth', # empty string denotes no snapshot
'snapshot':'epoch_42_loss_0.00916_acc_0.95598_acc-cls_0.58651_mean-iu_0.50990.pth',
'print_freq': 30,
'val_batch_size': 16,
'val_save_to_img_file': False,
'val_img_sample_rate': 0.05 # randomly sample some validation results to display
}
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
torch.nn.init.xavier_uniform_(m.weight.data)
# torch.nn.init.xavier_uniform(m.bias.data, 0)
nn.init.constant_(m.bias, 0)
def parse_args():
parser = argparse.ArgumentParser(description='Games Semantic Segmentation FCN8')
parser.add_argument('--gpu', type=str, default='0,1', help='gpu id')
parser.add_argument('--epochs', type=int, default=50, help='number of rpochs to run')
parser.add_argument('--seed', type=int, default=47, help='seed for training')
args = parser.parse_args()
return args
def main():
# args = parse_args()
torch.backends.cudnn.benchmark = True
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
# # if args.seed:
# random.seed(args.seed)
# np.random.seed(args.seed)
# torch.manual_seed(args.seed)
# # if args.gpu:
# torch.cuda.manual_seed_all(args.seed)
seed = 63
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# if args.gpu:
torch.cuda.manual_seed_all(seed)
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# train_transforms = transforms.Compose([
# transforms.RandomCrop(args['crop_size']),
# transforms.RandomRotation(90),
# transforms.RandomHorizontalFlip(p=0.5),
# transforms.RandomVerticalFlip(p=0.5),
# ])
short_size = int(min(args['input_size']) / 0.875)
# val_transforms = transforms.Compose([
# transforms.Scale(short_size, interpolation=Image.NEAREST),
# # joint_transforms.Scale(short_size),
# transforms.CenterCrop(args['input_size'])
# ])
train_joint_transform = joint_transforms.Compose([
# joint_transforms.Scale(short_size),
joint_transforms.RandomCrop(args['crop_size']),
joint_transforms.RandomHorizontallyFlip(),
joint_transforms.RandomRotate(90)
])
val_joint_transform = joint_transforms.Compose([
joint_transforms.Scale(short_size),
joint_transforms.CenterCrop(args['input_size'])
])
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(*mean_std)
])
target_transform = extended_transforms.MaskToTensor()
restore_transform = transforms.Compose([
extended_transforms.DeNormalize(*mean_std),
transforms.ToPILImage()
])
visualize = transforms.ToTensor()
train_set = cityscapes.CityScapes('train', joint_transform=train_joint_transform,
transform=input_transform, target_transform=target_transform)
# train_set = cityscapes.CityScapes('train', transform=train_transforms)
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=8, shuffle=True)
val_set = cityscapes.CityScapes('val', joint_transform=val_joint_transform, transform=input_transform,
target_transform=target_transform)
# val_set = cityscapes.CityScapes('val', transform=val_transforms)
val_loader = DataLoader(val_set, batch_size=args['val_batch_size'], num_workers=8, shuffle=True)
print(len(train_loader), len(val_loader))
# sdf
vgg_model = VGGNet(requires_grad=True, remove_fc=True)
net = FCN8s(pretrained_net=vgg_model, n_class=cityscapes.num_classes, dropout_rate=0.4)
# net.apply(init_weights)
criterion = nn.CrossEntropyLoss(ignore_index=cityscapes.ignore_label)
optimizer = optim.Adam(net.parameters(), lr=1e-4)
check_mkdir(ft_ckpt_path)
check_mkdir(os.path.join(ft_ckpt_path, ft_exp_name))
open(os.path.join(ft_ckpt_path, ft_exp_name, str(datetime.datetime.now()) + '.txt'), 'w').write(str(args) + '\n\n')
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=args['lr_patience'], min_lr=1e-10)
vgg_model = vgg_model.to(device)
net = net.to(device)
if torch.cuda.device_count()>1:
net = nn.DataParallel(net)
# if len(args['snapshot']) == 0:
# curr_epoch = 1
# args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0}
# else:
# print('training resumes from ' + args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'])))
# split_snapshot = args['snapshot'].split('_')
# curr_epoch = int(split_snapshot[1]) + 1
curr_epoch = 1
args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0}
# args['best_record'] = {'epoch': int(split_snapshot[1]), 'val_loss': float(split_snapshot[3]),
# 'acc': float(split_snapshot[5]), 'acc_cls': float(split_snapshot[7]),
# 'mean_iu': float(split_snapshot[9][:-4])}
criterion.to(device)
for epoch in range(curr_epoch, args['epoch_num'] + 1):
train(train_loader, net, device, criterion, optimizer, epoch, args)
val_loss = validate(val_loader, net, device, criterion, optimizer, epoch, args, restore_transform, visualize)
scheduler.step(val_loss)
def train(train_loader, net, device, criterion, optimizer, epoch, train_args):
|
curr_iter = (epoch - 1) * len(train_loader)
targets_all, preds_all = [], []
for i, data in enumerate(train_loader):
inputs, targets = data
assert inputs.size()[2:] == targets.size()[1:]
N = inputs.size(0)
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
assert outputs.size()[2:] == targets.size()[1:]
assert outputs.size()[1] == cityscapes.num_classes
loss = criterion(outputs, targets) / N
loss.backward()
optimizer.step()
train_loss.update(loss.data, N)
targets_all.append(targets.data.cpu().numpy())
preds_all.append(outputs.data.max(1)[1].squeeze_(1).cpu().numpy())
curr_iter += 1
writer.add_scalar('train_loss', train_loss.avg, curr_iter)
if (i + 1) % train_args['print_freq'] == 0:
print('[epoch %d], [iter %d / %d], [train loss %.5f]' % (
epoch, i + 1, len(train_loader), train_loss.avg))
targets_all = np.concatenate(targets_all)
preds_all = np.concatenate(preds_all)
acc, acc_cls, mean_iou, _ = evaluate(preds_all, targets_all, cityscapes.num_classes)
print('-----------------------------------------------------------------------------------------------------------')
print('[epoch %d], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f]' % (
epoch, acc, acc_cls, mean_iou))
def validate(val_loader, net, device, criterion, optimizer, epoch, train_args, restore, visualize, finetuning=True):
net.eval()
val_loss = AverageMeter()
inputs_all, gts_all, predictions_all = [], [], []
with torch.no_grad():
for vi, data in enumerate(val_loader):
inputs, gts = data
N = inputs.size(0)
inputs, gts = inputs.to(device), gts.to(device)
outputs = net(inputs)
predictions = outputs.data.max(1)[1].squeeze_(1).cpu().numpy()
val_loss.update(criterion(outputs, gts).data / N, N)
for i in inputs:
if random.random() > train_args['val_img_sample_rate']:
inputs_all.append(None)
else:
inputs_all.append(i.data.cpu())
gts_all.append(gts.data.cpu().numpy())
predictions_all.append(predictions)
gts_all = np.concatenate(gts_all)
predictions_all = np.concatenate(predictions_all)
acc, acc_cls
|
net.train()
train_loss = AverageMeter()
|
random_line_split
|
ft_cityscapes.py
|
_fwavacc_0.91834.pth', # empty string denotes no snapshot
'snapshot':'epoch_42_loss_0.00916_acc_0.95598_acc-cls_0.58651_mean-iu_0.50990.pth',
'print_freq': 30,
'val_batch_size': 16,
'val_save_to_img_file': False,
'val_img_sample_rate': 0.05 # randomly sample some validation results to display
}
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
torch.nn.init.xavier_uniform_(m.weight.data)
# torch.nn.init.xavier_uniform(m.bias.data, 0)
nn.init.constant_(m.bias, 0)
def parse_args():
parser = argparse.ArgumentParser(description='Games Semantic Segmentation FCN8')
parser.add_argument('--gpu', type=str, default='0,1', help='gpu id')
parser.add_argument('--epochs', type=int, default=50, help='number of rpochs to run')
parser.add_argument('--seed', type=int, default=47, help='seed for training')
args = parser.parse_args()
return args
def main():
# args = parse_args()
torch.backends.cudnn.benchmark = True
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
# # if args.seed:
# random.seed(args.seed)
# np.random.seed(args.seed)
# torch.manual_seed(args.seed)
# # if args.gpu:
# torch.cuda.manual_seed_all(args.seed)
seed = 63
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# if args.gpu:
torch.cuda.manual_seed_all(seed)
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# train_transforms = transforms.Compose([
# transforms.RandomCrop(args['crop_size']),
# transforms.RandomRotation(90),
# transforms.RandomHorizontalFlip(p=0.5),
# transforms.RandomVerticalFlip(p=0.5),
# ])
short_size = int(min(args['input_size']) / 0.875)
# val_transforms = transforms.Compose([
# transforms.Scale(short_size, interpolation=Image.NEAREST),
# # joint_transforms.Scale(short_size),
# transforms.CenterCrop(args['input_size'])
# ])
train_joint_transform = joint_transforms.Compose([
# joint_transforms.Scale(short_size),
joint_transforms.RandomCrop(args['crop_size']),
joint_transforms.RandomHorizontallyFlip(),
joint_transforms.RandomRotate(90)
])
val_joint_transform = joint_transforms.Compose([
joint_transforms.Scale(short_size),
joint_transforms.CenterCrop(args['input_size'])
])
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(*mean_std)
])
target_transform = extended_transforms.MaskToTensor()
restore_transform = transforms.Compose([
extended_transforms.DeNormalize(*mean_std),
transforms.ToPILImage()
])
visualize = transforms.ToTensor()
train_set = cityscapes.CityScapes('train', joint_transform=train_joint_transform,
transform=input_transform, target_transform=target_transform)
# train_set = cityscapes.CityScapes('train', transform=train_transforms)
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=8, shuffle=True)
val_set = cityscapes.CityScapes('val', joint_transform=val_joint_transform, transform=input_transform,
target_transform=target_transform)
# val_set = cityscapes.CityScapes('val', transform=val_transforms)
val_loader = DataLoader(val_set, batch_size=args['val_batch_size'], num_workers=8, shuffle=True)
print(len(train_loader), len(val_loader))
# sdf
vgg_model = VGGNet(requires_grad=True, remove_fc=True)
net = FCN8s(pretrained_net=vgg_model, n_class=cityscapes.num_classes, dropout_rate=0.4)
# net.apply(init_weights)
criterion = nn.CrossEntropyLoss(ignore_index=cityscapes.ignore_label)
optimizer = optim.Adam(net.parameters(), lr=1e-4)
check_mkdir(ft_ckpt_path)
check_mkdir(os.path.join(ft_ckpt_path, ft_exp_name))
open(os.path.join(ft_ckpt_path, ft_exp_name, str(datetime.datetime.now()) + '.txt'), 'w').write(str(args) + '\n\n')
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=args['lr_patience'], min_lr=1e-10)
vgg_model = vgg_model.to(device)
net = net.to(device)
if torch.cuda.device_count()>1:
net = nn.DataParallel(net)
# if len(args['snapshot']) == 0:
# curr_epoch = 1
# args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0}
# else:
# print('training resumes from ' + args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'])))
# split_snapshot = args['snapshot'].split('_')
# curr_epoch = int(split_snapshot[1]) + 1
curr_epoch = 1
args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0}
# args['best_record'] = {'epoch': int(split_snapshot[1]), 'val_loss': float(split_snapshot[3]),
# 'acc': float(split_snapshot[5]), 'acc_cls': float(split_snapshot[7]),
# 'mean_iu': float(split_snapshot[9][:-4])}
criterion.to(device)
for epoch in range(curr_epoch, args['epoch_num'] + 1):
train(train_loader, net, device, criterion, optimizer, epoch, args)
val_loss = validate(val_loader, net, device, criterion, optimizer, epoch, args, restore_transform, visualize)
scheduler.step(val_loss)
def
|
(train_loader, net, device, criterion, optimizer, epoch, train_args):
net.train()
train_loss = AverageMeter()
curr_iter = (epoch - 1) * len(train_loader)
targets_all, preds_all = [], []
for i, data in enumerate(train_loader):
inputs, targets = data
assert inputs.size()[2:] == targets.size()[1:]
N = inputs.size(0)
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
assert outputs.size()[2:] == targets.size()[1:]
assert outputs.size()[1] == cityscapes.num_classes
loss = criterion(outputs, targets) / N
loss.backward()
optimizer.step()
train_loss.update(loss.data, N)
targets_all.append(targets.data.cpu().numpy())
preds_all.append(outputs.data.max(1)[1].squeeze_(1).cpu().numpy())
curr_iter += 1
writer.add_scalar('train_loss', train_loss.avg, curr_iter)
if (i + 1) % train_args['print_freq'] == 0:
print('[epoch %d], [iter %d / %d], [train loss %.5f]' % (
epoch, i + 1, len(train_loader), train_loss.avg))
targets_all = np.concatenate(targets_all)
preds_all = np.concatenate(preds_all)
acc, acc_cls, mean_iou, _ = evaluate(preds_all, targets_all, cityscapes.num_classes)
print('-----------------------------------------------------------------------------------------------------------')
print('[epoch %d], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f]' % (
epoch, acc, acc_cls, mean_iou))
def validate(val_loader, net, device, criterion, optimizer, epoch, train_args, restore, visualize, finetuning=True):
net.eval()
val_loss = AverageMeter()
inputs_all, gts_all, predictions_all = [], [], []
with torch.no_grad():
for vi, data in enumerate(val_loader):
inputs, gts = data
N = inputs.size(0)
inputs, gts = inputs.to(device), gts.to(device)
outputs = net(inputs)
predictions = outputs.data.max(1)[1].squeeze_(1).cpu().numpy()
val_loss.update(criterion(outputs, gts).data / N, N)
for i in inputs:
if random.random() > train_args['val_img_sample_rate']:
inputs_all.append(None)
else:
inputs_all.append(i.data.cpu())
gts_all.append(gts.data.cpu().numpy())
predictions_all.append(predictions)
gts_all = np.concatenate(gts_all)
predictions_all = np.concatenate(predictions_all)
acc, acc
|
train
|
identifier_name
|
load_csv.py
|
iterrows return a row object to access column names for each row
import csv
import os
import datetime
def euro(number):
return f'{number:.2f} โฌ'.replace('.',',')
def date_s(date):
# accepts datetime, returns formatted string
return str(date.strftime("%d.%m.%Y"))
def convert_to_date(date):
if type(date) == datetime.date:
return date
else:
return date.date()
class Data():
def __init__(self, data=None, columns=[]):
self.data = {}
self.columns = columns # column names
self.shape = (0, 0)
if data:
if columns:
for i in range(len(data[0])):
self.data[self.columns[i]] = []
else:
for i in range(len(data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(data):
for j, col in enumerate(row):
self.data[self.columns[j]].append(col)
self.shape = (len(data), len(data[0]))
print(self.data)
for col in self.columns:
setattr(self, col, self.data[col])
def write_csv(self, filename, decimal=',', sep=';', head=True):
# writes self.data to a give csv file
with open(filename, 'w+', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=sep)
if head:
writer.writerow(self.columns)
for i, row in self.iterrows():
str_row = [str(r).replace('.', decimal) for r in row]
writer.writerow(str_row)
def read_csv(self, filename, head=True, column_names=[],
decimal=',', parse_dates=[], date_parser=None):
# make an array to store the csv data with shape (rows, columns)
if not os.path.isfile(filename):
print(f'Error: "{filename}" does not exist.')
return
file_data = []
try:
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
file_data.append(row)
except csv.Error:
print(f'Error: Could not read "{filename}"')
return
if len(file_data) == 0:
print(f'Error: "{filename}" does not contain any data.')
return
self.shape = (len(file_data), len(file_data[0]))
if column_names and len(column_names) != self.shape[1]:
print('Error: Mismatching length of column names ' +
f'(Got {len(column_names)} instead of {self.shape[1]}).')
return
if head and not column_names:
# set or store column names
self.columns = file_data[0]
file_data = file_data[1:]
for col in self.columns:
|
elif head and column_names:
# TODO: check if len of column names is compatible
self.columns = list(column_names)
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif not head and column_names:
self.columns = list(column_names)
for col in self.columns:
self.data[col] = []
else:
for i in range(len(file_data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(file_data):
for j, col in enumerate(row):
# check if data is boolean
if col == 'True':
self.data[self.columns[j]].append(True)
continue
elif col == 'False':
self.data[self.columns[j]].append(False)
continue
# check if data is date
if parse_dates and self.columns[j] in parse_dates:
self.data[self.columns[j]].append(date_parser(col))
continue
# convert numbers to float or int
value = col.replace(decimal, '.')
try:
value = float(value)
if value.is_integer():
self.data[self.columns[j]].append(int(value))
else:
self.data[self.columns[j]].append(value)
except ValueError:
# data is not a number
self.data[self.columns[j]].append(col)
# set attributes of data object based on column names
for col in self.columns:
setattr(self, col, self.data[col])
class Row():
def __init__(self, data, columns):
self.data = data
self.columns = columns
for i, col in enumerate(self.columns):
setattr(self, col, data[i])
def __getitem__(self, key):
return self.data[self.columns.index(key)]
def __iter__(self):
return iter(self.data)
def iterrows(self):
# similar to iterrows
# but yields a row object as well as the index
# TODO: maybe replace iterrows with this
v = list(self.data.values())
if len(v) == 0:
return
i = 0
while i < len(v[0]):
data = []
for col in v:
data.append(col[i])
row = self.Row(data, self.columns)
yield i, row
i += 1
def sort(self, by=None, reverse=False):
'''
sorts the rows
"by" has to be a column name
'''
#temp_data = list(self.iterrows())
temp_data = [list(row) for i, row in self.iterrows()]
#print(temp_data)
if not by or by not in self.columns:
i = 0
else:
i = self.columns.index(by)
temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)
# convert back to self.data structure
for i, row in enumerate(temp_data):
for j, col in enumerate(row):
self.data[self.columns[j]][i] = col
#return temp_data
def to_html(self, filename, format_values={}, rename_columns={},
css=[], column_align={}, caption=None,
format_columns={}):
'''
construct a html table out of this objects's data
filename is a valid *.html or *.htm filename
format_values is a dictionary with column names as keys
and functions as values that take a single value as an argument
and return the formatted (or otherwise processed) value
rename_columns is a dictionary with pairs of
current col name: new col name
css is a list of css elements that are inserted into the
<style> tag
column_align is a dict with column name: align (left, right, center)
caption specifies the table's caption
format_columns is a dictionary with format options for the respective
columns
'''
if len(self.data) == 0:
# return if this has no data
print('HTML building aborted: No data')
return
if filename[-4:] != 'html' and filename[-3:] != 'htm':
print(f'Error: "{filename}" is not a valid html file')
return
strTable = '<html><head><style>'
# css table style
# add classes for alignment
strTable += ('.right {text-align: right;} ' +
'.left {text-align: left;} ' +
'.center {text-align: center;}')
for style in css:
# add css elements to style tag
strTable += style
strTable += '</style></head><body><table>'
if caption:
strTable += f'<caption>{caption}</caption>'
strTable += '<tr>'
for col in self.columns:
# add column names to table header
if col in rename_columns.keys():
col = rename_columns[col]
strTable += f'<th>{col}</th>'
strTable += '</tr>'
for i, row in self.iterrows():
# add rows to table
strRW = '<tr>'
for col in self.columns:
strTD = '<td '
value = row[col]
if col in format_values.keys():
value = format_values[col](value)
if col in format_columns.keys():
strTD += format_columns[col]
if col in column_align.keys():
strTD += f' class=\"{column_align[col]}\">{value}'
else:
strTD += f'>{value}'
strTD += '</td>'
strRW += strTD
strRW += '</tr>'
strTable += strRW
strTable += '</table></body></html>'
with open(filename, 'w') as html_file:
html_file.write(strTable)
if __name__ == '__main__':
file_path = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(file_path, 'exported_csv', 'staff.csv')
data = Data()
data.read_csv(filename,
head=True,
column_names = ['A', 'B', 'C', 'D', 'E'],
parse_dates=['date'],
date_parser=lambda x: datetime.datetime.strptime(x, '%d.%m.%Y').date())
table_css = [
'table {border-collapse: collapse;}',
'table, th, td {border: 1px solid black;}',
'th, td {text-align:
|
self.data[col] = []
|
random_line_split
|
load_csv.py
|
iterrows return a row object to access column names for each row
import csv
import os
import datetime
def euro(number):
return f'{number:.2f} โฌ'.replace('.',',')
def date_s(date):
# accepts datetime, returns formatted string
return str(date.strftime("%d.%m.%Y"))
def convert_to_date(date):
if type(date) == datetime.date:
return date
else:
return date.date()
class Data():
def __init__(self, data=None, columns=[]):
self.data = {}
self.columns = columns # column names
self.shape = (0, 0)
if data:
if columns:
for i in range(len(data[0])):
self.data[self.columns[i]] = []
else:
for i in range(len(data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(data):
for j, col in enumerate(row):
self.data[self.columns[j]].append(col)
self.shape = (len(data), len(data[0]))
print(self.data)
for col in self.columns:
setattr(self, col, self.data[col])
def write_csv(self, filename, decimal=',', sep=';', head=True):
# writes self.data to a give csv file
wi
|
def read_csv(self, filename, head=True, column_names=[],
decimal=',', parse_dates=[], date_parser=None):
# make an array to store the csv data with shape (rows, columns)
if not os.path.isfile(filename):
print(f'Error: "{filename}" does not exist.')
return
file_data = []
try:
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
file_data.append(row)
except csv.Error:
print(f'Error: Could not read "{filename}"')
return
if len(file_data) == 0:
print(f'Error: "{filename}" does not contain any data.')
return
self.shape = (len(file_data), len(file_data[0]))
if column_names and len(column_names) != self.shape[1]:
print('Error: Mismatching length of column names ' +
f'(Got {len(column_names)} instead of {self.shape[1]}).')
return
if head and not column_names:
# set or store column names
self.columns = file_data[0]
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif head and column_names:
# TODO: check if len of column names is compatible
self.columns = list(column_names)
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif not head and column_names:
self.columns = list(column_names)
for col in self.columns:
self.data[col] = []
else:
for i in range(len(file_data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(file_data):
for j, col in enumerate(row):
# check if data is boolean
if col == 'True':
self.data[self.columns[j]].append(True)
continue
elif col == 'False':
self.data[self.columns[j]].append(False)
continue
# check if data is date
if parse_dates and self.columns[j] in parse_dates:
self.data[self.columns[j]].append(date_parser(col))
continue
# convert numbers to float or int
value = col.replace(decimal, '.')
try:
value = float(value)
if value.is_integer():
self.data[self.columns[j]].append(int(value))
else:
self.data[self.columns[j]].append(value)
except ValueError:
# data is not a number
self.data[self.columns[j]].append(col)
# set attributes of data object based on column names
for col in self.columns:
setattr(self, col, self.data[col])
class Row():
def __init__(self, data, columns):
self.data = data
self.columns = columns
for i, col in enumerate(self.columns):
setattr(self, col, data[i])
def __getitem__(self, key):
return self.data[self.columns.index(key)]
def __iter__(self):
return iter(self.data)
def iterrows(self):
# similar to iterrows
# but yields a row object as well as the index
# TODO: maybe replace iterrows with this
v = list(self.data.values())
if len(v) == 0:
return
i = 0
while i < len(v[0]):
data = []
for col in v:
data.append(col[i])
row = self.Row(data, self.columns)
yield i, row
i += 1
def sort(self, by=None, reverse=False):
'''
sorts the rows
"by" has to be a column name
'''
#temp_data = list(self.iterrows())
temp_data = [list(row) for i, row in self.iterrows()]
#print(temp_data)
if not by or by not in self.columns:
i = 0
else:
i = self.columns.index(by)
temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)
# convert back to self.data structure
for i, row in enumerate(temp_data):
for j, col in enumerate(row):
self.data[self.columns[j]][i] = col
#return temp_data
def to_html(self, filename, format_values={}, rename_columns={},
css=[], column_align={}, caption=None,
format_columns={}):
'''
construct a html table out of this objects's data
filename is a valid *.html or *.htm filename
format_values is a dictionary with column names as keys
and functions as values that take a single value as an argument
and return the formatted (or otherwise processed) value
rename_columns is a dictionary with pairs of
current col name: new col name
css is a list of css elements that are inserted into the
<style> tag
column_align is a dict with column name: align (left, right, center)
caption specifies the table's caption
format_columns is a dictionary with format options for the respective
columns
'''
if len(self.data) == 0:
# return if this has no data
print('HTML building aborted: No data')
return
if filename[-4:] != 'html' and filename[-3:] != 'htm':
print(f'Error: "{filename}" is not a valid html file')
return
strTable = '<html><head><style>'
# css table style
# add classes for alignment
strTable += ('.right {text-align: right;} ' +
'.left {text-align: left;} ' +
'.center {text-align: center;}')
for style in css:
# add css elements to style tag
strTable += style
strTable += '</style></head><body><table>'
if caption:
strTable += f'<caption>{caption}</caption>'
strTable += '<tr>'
for col in self.columns:
# add column names to table header
if col in rename_columns.keys():
col = rename_columns[col]
strTable += f'<th>{col}</th>'
strTable += '</tr>'
for i, row in self.iterrows():
# add rows to table
strRW = '<tr>'
for col in self.columns:
strTD = '<td '
value = row[col]
if col in format_values.keys():
value = format_values[col](value)
if col in format_columns.keys():
strTD += format_columns[col]
if col in column_align.keys():
strTD += f' class=\"{column_align[col]}\">{value}'
else:
strTD += f'>{value}'
strTD += '</td>'
strRW += strTD
strRW += '</tr>'
strTable += strRW
strTable += '</table></body></html>'
with open(filename, 'w') as html_file:
html_file.write(strTable)
if __name__ == '__main__':
file_path = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(file_path, 'exported_csv', 'staff.csv')
data = Data()
data.read_csv(filename,
head=True,
column_names = ['A', 'B', 'C', 'D', 'E'],
parse_dates=['date'],
date_parser=lambda x: datetime.datetime.strptime(x, '%d.%m.%Y').date())
table_css = [
'table {border-collapse: collapse;}',
'table, th, td {border: 1px solid black;}',
'th, td {text-align:
|
th open(filename, 'w+', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=sep)
if head:
writer.writerow(self.columns)
for i, row in self.iterrows():
str_row = [str(r).replace('.', decimal) for r in row]
writer.writerow(str_row)
|
identifier_body
|
load_csv.py
|
iterrows return a row object to access column names for each row
import csv
import os
import datetime
def euro(number):
return f'{number:.2f} โฌ'.replace('.',',')
def date_s(date):
# accepts datetime, returns formatted string
return str(date.strftime("%d.%m.%Y"))
def convert_to_date(date):
if type(date) == datetime.date:
return date
else:
return date.date()
class Data():
def __init__(self, data=None, columns=[]):
self.data = {}
self.columns = columns # column names
self.shape = (0, 0)
if data:
if columns:
for i in range(len(data[0])):
self.data[self.columns[i]] = []
else:
for i in range(len(data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(data):
for j, col in enumerate(row):
self.data[self.columns[j]].append(col)
self.shape = (len(data), len(data[0]))
print(self.data)
for col in self.columns:
setattr(self, col, self.data[col])
def write_csv(self, filename, decimal=',', sep=';', head=True):
# writes self.data to a give csv file
with open(filename, 'w+', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=sep)
if head:
writer.writerow(self.columns)
for i, row in self.iterrows():
str_row = [str(r).replace('.', decimal) for r in row]
writer.writerow(str_row)
def read_csv(self, filename, head=True, column_names=[],
decimal=',', parse_dates=[], date_parser=None):
# make an array to store the csv data with shape (rows, columns)
if not os.path.isfile(filename):
print(f'Error: "{filename}" does not exist.')
return
file_data = []
try:
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
file_data.append(row)
except csv.Error:
print(f'Error: Could not read "{filename}"')
return
if len(file_data) == 0:
print(f'Error: "{filename}" does not contain any data.')
return
self.shape = (len(file_data), len(file_data[0]))
if column_names and len(column_names) != self.shape[1]:
print('Error: Mismatching length of column names ' +
f'(Got {len(column_names)} instead of {self.shape[1]}).')
return
if head and not column_names:
# set or store column names
self.columns = file_data[0]
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif head and column_names:
# TODO: check if len of column names is compatible
self.columns = list(column_names)
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif not head and column_names:
se
|
else:
for i in range(len(file_data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(file_data):
for j, col in enumerate(row):
# check if data is boolean
if col == 'True':
self.data[self.columns[j]].append(True)
continue
elif col == 'False':
self.data[self.columns[j]].append(False)
continue
# check if data is date
if parse_dates and self.columns[j] in parse_dates:
self.data[self.columns[j]].append(date_parser(col))
continue
# convert numbers to float or int
value = col.replace(decimal, '.')
try:
value = float(value)
if value.is_integer():
self.data[self.columns[j]].append(int(value))
else:
self.data[self.columns[j]].append(value)
except ValueError:
# data is not a number
self.data[self.columns[j]].append(col)
# set attributes of data object based on column names
for col in self.columns:
setattr(self, col, self.data[col])
class Row():
def __init__(self, data, columns):
self.data = data
self.columns = columns
for i, col in enumerate(self.columns):
setattr(self, col, data[i])
def __getitem__(self, key):
return self.data[self.columns.index(key)]
def __iter__(self):
return iter(self.data)
def iterrows(self):
# similar to iterrows
# but yields a row object as well as the index
# TODO: maybe replace iterrows with this
v = list(self.data.values())
if len(v) == 0:
return
i = 0
while i < len(v[0]):
data = []
for col in v:
data.append(col[i])
row = self.Row(data, self.columns)
yield i, row
i += 1
def sort(self, by=None, reverse=False):
'''
sorts the rows
"by" has to be a column name
'''
#temp_data = list(self.iterrows())
temp_data = [list(row) for i, row in self.iterrows()]
#print(temp_data)
if not by or by not in self.columns:
i = 0
else:
i = self.columns.index(by)
temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)
# convert back to self.data structure
for i, row in enumerate(temp_data):
for j, col in enumerate(row):
self.data[self.columns[j]][i] = col
#return temp_data
def to_html(self, filename, format_values={}, rename_columns={},
css=[], column_align={}, caption=None,
format_columns={}):
'''
construct a html table out of this objects's data
filename is a valid *.html or *.htm filename
format_values is a dictionary with column names as keys
and functions as values that take a single value as an argument
and return the formatted (or otherwise processed) value
rename_columns is a dictionary with pairs of
current col name: new col name
css is a list of css elements that are inserted into the
<style> tag
column_align is a dict with column name: align (left, right, center)
caption specifies the table's caption
format_columns is a dictionary with format options for the respective
columns
'''
if len(self.data) == 0:
# return if this has no data
print('HTML building aborted: No data')
return
if filename[-4:] != 'html' and filename[-3:] != 'htm':
print(f'Error: "{filename}" is not a valid html file')
return
strTable = '<html><head><style>'
# css table style
# add classes for alignment
strTable += ('.right {text-align: right;} ' +
'.left {text-align: left;} ' +
'.center {text-align: center;}')
for style in css:
# add css elements to style tag
strTable += style
strTable += '</style></head><body><table>'
if caption:
strTable += f'<caption>{caption}</caption>'
strTable += '<tr>'
for col in self.columns:
# add column names to table header
if col in rename_columns.keys():
col = rename_columns[col]
strTable += f'<th>{col}</th>'
strTable += '</tr>'
for i, row in self.iterrows():
# add rows to table
strRW = '<tr>'
for col in self.columns:
strTD = '<td '
value = row[col]
if col in format_values.keys():
value = format_values[col](value)
if col in format_columns.keys():
strTD += format_columns[col]
if col in column_align.keys():
strTD += f' class=\"{column_align[col]}\">{value}'
else:
strTD += f'>{value}'
strTD += '</td>'
strRW += strTD
strRW += '</tr>'
strTable += strRW
strTable += '</table></body></html>'
with open(filename, 'w') as html_file:
html_file.write(strTable)
if __name__ == '__main__':
file_path = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(file_path, 'exported_csv', 'staff.csv')
data = Data()
data.read_csv(filename,
head=True,
column_names = ['A', 'B', 'C', 'D', 'E'],
parse_dates=['date'],
date_parser=lambda x: datetime.datetime.strptime(x, '%d.%m.%Y').date())
table_css = [
'table {border-collapse: collapse;}',
'table, th, td {border: 1px solid black;}',
'th, td {text-align: left
|
lf.columns = list(column_names)
for col in self.columns:
self.data[col] = []
|
conditional_block
|
load_csv.py
|
iterrows return a row object to access column names for each row
import csv
import os
import datetime
def euro(number):
return f'{number:.2f} โฌ'.replace('.',',')
def date_s(date):
# accepts datetime, returns formatted string
return str(date.strftime("%d.%m.%Y"))
def convert_to_date(date):
if type(date) == datetime.date:
return date
else:
return date.date()
class Data():
def __init__(self, data=None, columns=[]):
self.data = {}
self.columns = columns # column names
self.shape = (0, 0)
if data:
if columns:
for i in range(len(data[0])):
self.data[self.columns[i]] = []
else:
for i in range(len(data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(data):
for j, col in enumerate(row):
self.data[self.columns[j]].append(col)
self.shape = (len(data), len(data[0]))
print(self.data)
for col in self.columns:
setattr(self, col, self.data[col])
def write_csv(self, filename, decimal=',', sep=';', head=True):
# writes self.data to a give csv file
with open(filename, 'w+', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=sep)
if head:
writer.writerow(self.columns)
for i, row in self.iterrows():
str_row = [str(r).replace('.', decimal) for r in row]
writer.writerow(str_row)
def read_csv(self, filename, head=True, column_names=[],
decimal=',', parse_dates=[], date_parser=None):
# make an array to store the csv data with shape (rows, columns)
if not os.path.isfile(filename):
print(f'Error: "{filename}" does not exist.')
return
file_data = []
try:
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
file_data.append(row)
except csv.Error:
print(f'Error: Could not read "{filename}"')
return
if len(file_data) == 0:
print(f'Error: "{filename}" does not contain any data.')
return
self.shape = (len(file_data), len(file_data[0]))
if column_names and len(column_names) != self.shape[1]:
print('Error: Mismatching length of column names ' +
f'(Got {len(column_names)} instead of {self.shape[1]}).')
return
if head and not column_names:
# set or store column names
self.columns = file_data[0]
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif head and column_names:
# TODO: check if len of column names is compatible
self.columns = list(column_names)
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif not head and column_names:
self.columns = list(column_names)
for col in self.columns:
self.data[col] = []
else:
for i in range(len(file_data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(file_data):
for j, col in enumerate(row):
# check if data is boolean
if col == 'True':
self.data[self.columns[j]].append(True)
continue
elif col == 'False':
self.data[self.columns[j]].append(False)
continue
# check if data is date
if parse_dates and self.columns[j] in parse_dates:
self.data[self.columns[j]].append(date_parser(col))
continue
# convert numbers to float or int
value = col.replace(decimal, '.')
try:
value = float(value)
if value.is_integer():
self.data[self.columns[j]].append(int(value))
else:
self.data[self.columns[j]].append(value)
except ValueError:
# data is not a number
self.data[self.columns[j]].append(col)
# set attributes of data object based on column names
for col in self.columns:
setattr(self, col, self.data[col])
class Row():
def __init__(self, data, columns):
self.data = data
self.columns = columns
for i, col in enumerate(self.columns):
setattr(self, col, data[i])
def __getitem__(self, key):
return self.data[self.columns.index(key)]
def __iter__(self):
return iter(self.data)
def iterrows(self):
# similar to iterrows
# but yields a row object as well as the index
# TODO: maybe replace iterrows with this
v = list(self.data.values())
if len(v) == 0:
return
i = 0
while i < len(v[0]):
data = []
for col in v:
data.append(col[i])
row = self.Row(data, self.columns)
yield i, row
i += 1
def so
|
elf, by=None, reverse=False):
'''
sorts the rows
"by" has to be a column name
'''
#temp_data = list(self.iterrows())
temp_data = [list(row) for i, row in self.iterrows()]
#print(temp_data)
if not by or by not in self.columns:
i = 0
else:
i = self.columns.index(by)
temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)
# convert back to self.data structure
for i, row in enumerate(temp_data):
for j, col in enumerate(row):
self.data[self.columns[j]][i] = col
#return temp_data
def to_html(self, filename, format_values={}, rename_columns={},
css=[], column_align={}, caption=None,
format_columns={}):
'''
construct a html table out of this objects's data
filename is a valid *.html or *.htm filename
format_values is a dictionary with column names as keys
and functions as values that take a single value as an argument
and return the formatted (or otherwise processed) value
rename_columns is a dictionary with pairs of
current col name: new col name
css is a list of css elements that are inserted into the
<style> tag
column_align is a dict with column name: align (left, right, center)
caption specifies the table's caption
format_columns is a dictionary with format options for the respective
columns
'''
if len(self.data) == 0:
# return if this has no data
print('HTML building aborted: No data')
return
if filename[-4:] != 'html' and filename[-3:] != 'htm':
print(f'Error: "{filename}" is not a valid html file')
return
strTable = '<html><head><style>'
# css table style
# add classes for alignment
strTable += ('.right {text-align: right;} ' +
'.left {text-align: left;} ' +
'.center {text-align: center;}')
for style in css:
# add css elements to style tag
strTable += style
strTable += '</style></head><body><table>'
if caption:
strTable += f'<caption>{caption}</caption>'
strTable += '<tr>'
for col in self.columns:
# add column names to table header
if col in rename_columns.keys():
col = rename_columns[col]
strTable += f'<th>{col}</th>'
strTable += '</tr>'
for i, row in self.iterrows():
# add rows to table
strRW = '<tr>'
for col in self.columns:
strTD = '<td '
value = row[col]
if col in format_values.keys():
value = format_values[col](value)
if col in format_columns.keys():
strTD += format_columns[col]
if col in column_align.keys():
strTD += f' class=\"{column_align[col]}\">{value}'
else:
strTD += f'>{value}'
strTD += '</td>'
strRW += strTD
strRW += '</tr>'
strTable += strRW
strTable += '</table></body></html>'
with open(filename, 'w') as html_file:
html_file.write(strTable)
if __name__ == '__main__':
file_path = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(file_path, 'exported_csv', 'staff.csv')
data = Data()
data.read_csv(filename,
head=True,
column_names = ['A', 'B', 'C', 'D', 'E'],
parse_dates=['date'],
date_parser=lambda x: datetime.datetime.strptime(x, '%d.%m.%Y').date())
table_css = [
'table {border-collapse: collapse;}',
'table, th, td {border: 1px solid black;}',
'th, td {text-align:
|
rt(s
|
identifier_name
|
client.rs
|
{
std::fs::create_dir(&workSpace)?;
}
let basic_auth = BasicAuth {
user: RpcUsername.to_string(),
password: RpcPassword.to_string(),
};
let res = container
.post("https://u2.dmhy.org/getrss.php")
.form(&[
("inclbookmarked", 0),
("inclautochecked", 1),
("trackerssl", 1),
("showrows", 10),
("search_mode", 1),
])
.send()
.await?
.text()
.await?;
let res = Document::from(res.as_str())
.find(Name("a"))
.find(|x| match x.attr("class") {
Some(str) => {
if str == "faqlink" {
match x.attr("rel") {
Some(str) => str == "nofollow noopener noreferer",
_ => false,
}
} else {
false
}
}
_ => false,
})
.unwrap()
.text();
let passkey = U2client::matchRegex(&res, "passkey=([0-9a-z]*)")?;
Ok(U2client {
uid,
passkey,
container,
torrentClient: TransClient::with_auth(&RpcURL, basic_auth),
tempSpace,
workSpace,
})
} else {
Err("illegal cookie".into())
}
}
pub async fn removeTorrent(&self, id: String) -> Result<()> {
let _ = self
.torrentClient
.torrent_remove(vec![Id::Hash(id)], true)
.await?;
Ok(())
}
pub async fn addTorrent(&self, url: &str) -> Result<()> {
let s = self.container.get(url).send().await?;
let contentDisposition = s
.headers()
.get("content-disposition")
.ok_or("addTorrent:can not find content-disposition header")?
.to_str()?;
let filename = U2client::matchRegex(contentDisposition, "filename=%5BU2%5D.(.+)")?;
let to = format!("{}/{}", self.tempSpace, filename);
let toPath = Path::new(&to);
let content = s.bytes().await?;
if toPath.exists() {
std::fs::remove_file(&toPath)?;
}
let mut file = std::fs::File::create(&toPath)?;
file.write_all(&*content)?;
let add: TorrentAddArgs = TorrentAddArgs {
filename: Some(to),
download_dir: Some(self.workSpace.clone()),
..TorrentAddArgs::default()
};
let _ = self.torrentClient.torrent_add(add).await?;
Ok(())
}
pub async fn getTransmissionSession(&self) -> Result<SessionGet> {
Ok(self.torrentClient.session_get().await?.arguments)
}
pub async fn performActionOnTorrent(&self, id: String, op: TorrentAction) -> Result<()> {
let _ = self
.torrentClient
.torrent_action(op, vec![Id::Hash(id)])
.await?;
Ok(())
}
pub async fn getWorkingTorrent(&self) -> Result<Torrents<Torrent>> {
Ok(self.torrentClient.torrent_get(None, None).await?.arguments)
}
pub async fn getStats(&self) -> Result<SessionStats> {
Ok(self.torrentClient.session_stats().await?.arguments)
}
pub async fn getFreeSpace(&self, d: String) -> Result<FreeSpace> {
Ok(self.torrentClient.free_space(d).await?.arguments)
}
pub async fn getDownloadList(&self) -> Result<Vec<RssInfo>> {
Ok(self.getTorrent().await?)
}
pub async fn getRemove(&self) -> Result<Vec<Torrent>>
|
pub async fn getUserInfo(&self) -> Result<UserInfo> {
let context = self
.get(format!(
"https://u2.dmhy.org/userdetails.php?id={}",
self.uid
))
.await?;
let username = Document::from(context.as_str())
.find(Name("a"))
.find(|x| match x.attr("class") {
Some(x) => x.split_at(x.len() - 5).1 == "_Name",
_ => false,
})
.ok_or("getUserInfo:can not find username node")?
.text();
let body: HashMap<String, String> = U2client::parseHtml(&context, 2)?;
let t = U2client::reduceToText(&body, "BTๆถ้ด")?;
let timeRate = U2client::matchRegex(&t, "ๅ็ง/ไธ่ฝฝๆถ้ดๆฏ็:[' ']*([0-9.]+)")?;
let uploadTime = U2client::matchRegex(&t, "ๅ็งๆถ้ด:[' ']*([ๅคฉ0-9:' ']+[0-9])")?;
let downloadTime = U2client::matchRegex(&t, "ไธ่ฝฝๆถ้ด:[' ']*([ๅคฉ0-9:' ']+[0-9])")?;
let t = U2client::reduceToText(&body, "ไผ ่พ[ๅๅฒ]")?;
let shareRate = U2client::matchRegex(&t, "ๅไบซ็:[' ']*([0-9.]+)")?;
let upload = U2client::matchRegex(&t, "ไธไผ ้:[' ']*([0-9.' ']+[TGMK]iB)")?;
let download = U2client::matchRegex(&t, "ไธ่ฝฝ้:[' ']*([0-9.' ']+[TGMK]iB)")?;
let actualUpload = U2client::matchRegex(&t, "ๅฎ้
ไธไผ :[' ']*([0-9.' ']+[TGMK]iB)")?;
let actualDownload = U2client::matchRegex(&t, "ๅฎ้
ไธ่ฝฝ:[' ']*([0-9.' ']+[TGMK]iB)")?;
let t = U2client::reduceToText(&body, "UCoin[่ฏฆๆ
]")?;
let coin = U2client::matchRegex(&t, "[(]([0-9.,]+)[)]")?;
Ok(UserInfo {
username,
download,
upload,
shareRate,
actualDownload,
actualUpload,
coin,
downloadTime,
uploadTime,
timeRate,
})
}
/// 2 => Free
/// 3 => 2x
/// 4 => 2xFree
/// 5 => 50%off
/// 6 => 2x50%off
/// 7 => 30%off
pub async fn applyMagic(&self, uid: &str, time: i32, magic: i32) -> Result<()> {
let time = time.max(24);
let url = format!(
"https://u2.dmhy.org/promotion.php?action=magic&torrent={}",
uid
);
let post = [
("action", "magic".to_string()),
("torrent", uid.to_string()),
("user", "SELF".to_string()),
("hours", time.to_string()),
("promotion", magic.to_string()),
];
let res = self.container.post(&url).form(&post).send().await?;
if res.status().as_u16() == 200 {
Ok(())
} else {
Err("apply magic failed:network failed".into())
}
}
pub async fn getTorrent(&self) -> Result<Vec<RssInfo>> {
let url = format!(
"https://u2.dmhy.org/torrentrss.php?rows=50&trackerssl=1&passkey={}",
self.passkey
);
let content = self.get(url).await?.into_bytes();
let channel = Channel::read_from(&content[..])?;
let res = channel.items.iter().map(async move |x| -> Result<RssInfo> {
let title = x.title.clone().ok_or("getTorrent:bad rss feed")?;
let url = x.enclosure.clone().ok_or("getTorrent:bad rss feed")?.url;
let cat = x.categories[0].name.clone();
let uid = U2client::matchRegex(url.as_str(), "id=([0-9]+)")?;
let U2Info = self.getTorrentInfo(&uid).await?;
Ok(RssInfo {
title,
url,
cat,
uid,
U2Info,
})
});
let res: Vec<Result<RssInfo>> = futures::future::join_all(res).await;
let mut ret = Vec::new();
for x in res.into_iter() {
ret.push(x?);
}
Ok(ret)
}
pub async fn getTorrentInfo(&self, idx: &str) -> Result<TorrentInfo> {
let toNumber = |x: &str| -> Result<f32> {
Ok(U2client::matchRegex(&x.to_string(), "([0-9.]+)")
|
{
let mut torrent = self.getWorkingTorrent().await?;
torrent.torrents.sort_by_key(|x| {
(
x.peers_getting_from_us.unwrap_or(0),
x.added_date.unwrap_or(0),
)
});
Ok(torrent.torrents.into_iter().take(5).collect())
}
|
identifier_body
|
client.rs
|
ๅๅฒ]")?;
let shareRate = U2client::matchRegex(&t, "ๅไบซ็:[' ']*([0-9.]+)")?;
let upload = U2client::matchRegex(&t, "ไธไผ ้:[' ']*([0-9.' ']+[TGMK]iB)")?;
let download = U2client::matchRegex(&t, "ไธ่ฝฝ้:[' ']*([0-9.' ']+[TGMK]iB)")?;
let actualUpload = U2client::matchRegex(&t, "ๅฎ้
ไธไผ :[' ']*([0-9.' ']+[TGMK]iB)")?;
let actualDownload = U2client::matchRegex(&t, "ๅฎ้
ไธ่ฝฝ:[' ']*([0-9.' ']+[TGMK]iB)")?;
let t = U2client::reduceToText(&body, "UCoin[่ฏฆๆ
]")?;
let coin = U2client::matchRegex(&t, "[(]([0-9.,]+)[)]")?;
Ok(UserInfo {
username,
download,
upload,
shareRate,
actualDownload,
actualUpload,
coin,
downloadTime,
uploadTime,
timeRate,
})
}
/// 2 => Free
/// 3 => 2x
/// 4 => 2xFree
/// 5 => 50%off
/// 6 => 2x50%off
/// 7 => 30%off
pub async fn applyMagic(&self, uid: &str, time: i32, magic: i32) -> Result<()> {
let time = time.max(24);
let url = format!(
"https://u2.dmhy.org/promotion.php?action=magic&torrent={}",
uid
);
let post = [
("action", "magic".to_string()),
("torrent", uid.to_string()),
("user", "SELF".to_string()),
("hours", time.to_string()),
("promotion", magic.to_string()),
];
let res = self.container.post(&url).form(&post).send().await?;
if res.status().as_u16() == 200 {
Ok(())
} else {
Err("apply magic failed:network failed".into())
}
}
pub async fn getTorrent(&self) -> Result<Vec<RssInfo>> {
let url = format!(
"https://u2.dmhy.org/torrentrss.php?rows=50&trackerssl=1&passkey={}",
self.passkey
);
let content = self.get(url).await?.into_bytes();
let channel = Channel::read_from(&content[..])?;
let res = channel.items.iter().map(async move |x| -> Result<RssInfo> {
let title = x.title.clone().ok_or("getTorrent:bad rss feed")?;
let url = x.enclosure.clone().ok_or("getTorrent:bad rss feed")?.url;
let cat = x.categories[0].name.clone();
let uid = U2client::matchRegex(url.as_str(), "id=([0-9]+)")?;
let U2Info = self.getTorrentInfo(&uid).await?;
Ok(RssInfo {
title,
url,
cat,
uid,
U2Info,
})
});
let res: Vec<Result<RssInfo>> = futures::future::join_all(res).await;
let mut ret = Vec::new();
for x in res.into_iter() {
ret.push(x?);
}
Ok(ret)
}
pub async fn getTorrentInfo(&self, idx: &str) -> Result<TorrentInfo> {
let toNumber = |x: &str| -> Result<f32> {
Ok(U2client::matchRegex(&x.to_string(), "([0-9.]+)")?.parse::<f32>()?)
};
let context = self
.get(format!("https://u2.dmhy.org/details.php?id={}", idx))
.await?;
let body: HashMap<String, String> = U2client::parseHtml(&context, 1)?;
let doc = Document::from(
body.get("ๆต้ไผๆ ")
.ok_or("getTorrentInfo:bad html")?
.as_str(),
);
let sink = doc
.find(select::predicate::Any)
.next()
.ok_or("getTorrentInfo:can find main table")?;
let typeNode = sink.find(Name("img")).next();
let (uploadFX, downloadFX) = if let Some(typeNode) = typeNode {
let typeNode = typeNode
.attr("alt")
.ok_or("getTorrentInfo:can find alt for fx")?;
match typeNode {
"FREE" => (1.0, 0.0),
"2X Free" => (2.0, 0.0),
"30%" => (1.0, 0.3),
"2X 50%" => (2.0, 0.5),
"50%" => (1.0, 0.5),
"2X" => (2.0, 1.0),
"Promotion" => {
let mut iters = sink.find(Name("b"));
let f = toNumber(
&*iters
.next()
.ok_or("getTorrentInfo:can find promotion")?
.text(),
)?;
let s = toNumber(
&*iters
.next()
.ok_or("getTorrentInfo:can find promotion")?
.text(),
)?;
(f, s)
}
_ => (1.0, 1.0),
}
} else {
(1.0, 1.0)
};
let s = U2client::reduceToText(&body, "ๅบๆฌไฟกๆฏ")?;
let size = U2client::matchRegex(&s, "ๅคงๅฐ:[' ']*([0-9.' ']+[TGMK]iB)")?;
let number = toNumber(&*size)?;
let GbSize = match size
.chars()
.nth(size.len() - 3)
.ok_or("getTorrentInfo:bad torrent size")?
{
'T' => number * 1024.0,
'G' => number,
'M' => number / 1024.0,
_ => number / 1024.0 / 1024.0,
};
let s = U2client::reduceToText(&body, "ๅไผด[ๆฅ็ๅ่กจ][้่ๅ่กจ]")?;
let seeder = U2client::matchRegex(&s, "([0-9]+)[' ']*ไธชๅ็ง่
")?.parse::<i32>()?;
let leecher = U2client::matchRegex(&s, "([0-9]+)[' ']*ไธชไธ่ฝฝ่
")?.parse::<i32>()?;
let s = U2client::reduceToText(&body, "ๆดปๅๅบฆ")?;
let avgProgress = U2client::matchRegex(&s, "ๅนณๅ่ฟๅบฆ:[' ']*[(]([0-9]+%)[)]")
.unwrap_or_else(|_| String::from("100%"));
let avgProgress = toNumber(&avgProgress)? / 100.0;
let s = U2client::reduceToText(&body, "็งๅญไฟกๆฏ")?;
let Hash = U2client::matchRegex(&s, "็งๅญๆฃๅๅผ:[' ']*([0-9a-z]*)[' ']*")?;
Ok(TorrentInfo {
GbSize,
uploadFX,
downloadFX,
seeder,
leecher,
avgProgress,
Hash,
})
}
async fn get<T>(&self, url: T) -> Result<String>
where
T: IntoUrl,
{
let ret = self.container.get(url).send().await?;
if ret.status().as_u16() == 200 {
Ok(ret.text().await?)
} else {
Err(ret.text().await?.into())
}
}
fn matchRegex(src: &str, reg: &str) -> Result<String> {
Ok(Regex::new(reg)?
.captures_iter(src)
.next()
.ok_or("matchRegex:regex match failed")?
.get(1)
.ok_or("matchRegex:regex match failed")?
.as_str()
.to_string())
}
fn reduceToText(mp: &HashMap<String, String>, idx: &str) -> Result<String> {
let str = mp.get(idx).ok_or("reduceToText:broken html")?.as_str();
let ret = Document::from(str)
.find(select::predicate::Any)
.next()
.ok_or("reduceToText:can not find Any Node")?
.text();
Ok(Regex::new("([\u{00ad}\u{00a0}])")?
.replace_all(&*ret, "")
.to_string())
}
fn parseHtml(context: &str, timesOfReduce: i32) -> Result<HashMap<String, String>> {
let doc = Document::from(context);
let mut outer = doc
.find(Nam
|
e("td"))
|
identifier_name
|
|
client.rs
|
.get("https://u2.dmhy.org/index.php")
.send()
.await?;
if x.url().path() == "/index.php" {
let context = x.text().await?;
let uid = Document::from(context.as_str())
.find(Name("a"))
.filter(|x| match x.attr("class") {
Some(x) => x.split_at(x.len() - 5).1 == "_Name",
_ => false,
})
.filter_map(|n| n.attr("href"))
.map(|x| x.to_string())
.next()
.ok_or("get uid failed")?
.split('=')
.last()
.ok_or("get uid failed")?
.to_string();
let tempSpace = format!("{}/temp", workRoot);
if !Path::new(&tempSpace).exists() {
std::fs::create_dir(&tempSpace)?;
}
let workSpace = format!("{}/work", workRoot);
if !Path::new(&workSpace).exists() {
std::fs::create_dir(&workSpace)?;
}
let basic_auth = BasicAuth {
user: RpcUsername.to_string(),
password: RpcPassword.to_string(),
};
let res = container
.post("https://u2.dmhy.org/getrss.php")
.form(&[
("inclbookmarked", 0),
("inclautochecked", 1),
("trackerssl", 1),
("showrows", 10),
("search_mode", 1),
])
.send()
.await?
.text()
.await?;
let res = Document::from(res.as_str())
.find(Name("a"))
.find(|x| match x.attr("class") {
Some(str) => {
if str == "faqlink" {
match x.attr("rel") {
Some(str) => str == "nofollow noopener noreferer",
_ => false,
}
} else {
false
}
}
_ => false,
})
.unwrap()
.text();
let passkey = U2client::matchRegex(&res, "passkey=([0-9a-z]*)")?;
Ok(U2client {
uid,
passkey,
container,
torrentClient: TransClient::with_auth(&RpcURL, basic_auth),
tempSpace,
workSpace,
})
} else {
Err("illegal cookie".into())
}
}
pub async fn removeTorrent(&self, id: String) -> Result<()> {
let _ = self
.torrentClient
.torrent_remove(vec![Id::Hash(id)], true)
.await?;
Ok(())
}
pub async fn addTorrent(&self, url: &str) -> Result<()> {
let s = self.container.get(url).send().await?;
let contentDisposition = s
.headers()
.get("content-disposition")
.ok_or("addTorrent:can not find content-disposition header")?
.to_str()?;
let filename = U2client::matchRegex(contentDisposition, "filename=%5BU2%5D.(.+)")?;
let to = format!("{}/{}", self.tempSpace, filename);
let toPath = Path::new(&to);
let content = s.bytes().await?;
if toPath.exists() {
std::fs::remove_file(&toPath)?;
}
let mut file = std::fs::File::create(&toPath)?;
file.write_all(&*content)?;
let add: TorrentAddArgs = TorrentAddArgs {
filename: Some(to),
download_dir: Some(self.workSpace.clone()),
..TorrentAddArgs::default()
};
let _ = self.torrentClient.torrent_add(add).await?;
Ok(())
}
pub async fn getTransmissionSession(&self) -> Result<SessionGet> {
Ok(self.torrentClient.session_get().await?.arguments)
}
pub async fn performActionOnTorrent(&self, id: String, op: TorrentAction) -> Result<()> {
let _ = self
.torrentClient
.torrent_action(op, vec![Id::Hash(id)])
.await?;
Ok(())
}
pub async fn getWorkingTorrent(&self) -> Result<Torrents<Torrent>> {
Ok(self.torrentClient.torrent_get(None, None).await?.arguments)
}
pub async fn getStats(&self) -> Result<SessionStats> {
Ok(self.torrentClient.session_stats().await?.arguments)
}
pub async fn getFreeSpace(&self, d: String) -> Result<FreeSpace> {
Ok(self.torrentClient.free_space(d).await?.arguments)
}
pub async fn getDownloadList(&self) -> Result<Vec<RssInfo>> {
Ok(self.getTorrent().await?)
}
pub async fn getRemove(&self) -> Result<Vec<Torrent>> {
let mut torrent = self.getWorkingTorrent().await?;
torrent.torrents.sort_by_key(|x| {
(
x.peers_getting_from_us.unwrap_or(0),
x.added_date.unwrap_or(0),
)
});
Ok(torrent.torrents.into_iter().take(5).collect())
}
pub async fn getUserInfo(&self) -> Result<UserInfo> {
let context = self
.get(format!(
"https://u2.dmhy.org/userdetails.php?id={}",
self.uid
))
.await?;
let username = Document::from(context.as_str())
.find(Name("a"))
.find(|x| match x.attr("class") {
Some(x) => x.split_at(x.len() - 5).1 == "_Name",
_ => false,
})
.ok_or("getUserInfo:can not find username node")?
.text();
let body: HashMap<String, String> = U2client::parseHtml(&context, 2)?;
let t = U2client::reduceToText(&body, "BTๆถ้ด")?;
let timeRate = U2client::matchRegex(&t, "ๅ็ง/ไธ่ฝฝๆถ้ดๆฏ็:[' ']*([0-9.]+)")?;
let uploadTime = U2client::matchRegex(&t, "ๅ็งๆถ้ด:[' ']*([ๅคฉ0-9:' ']+[0-9])")?;
let downloadTime = U2client::matchRegex(&t, "ไธ่ฝฝๆถ้ด:[' ']*([ๅคฉ0-9:' ']+[0-9])")?;
let t = U2client::reduceToText(&body, "ไผ ่พ[ๅๅฒ]")?;
let shareRate = U2client::matchRegex(&t, "ๅไบซ็:[' ']*([0-9.]+)")?;
let upload = U2client::matchRegex(&t, "ไธไผ ้:[' ']*([0-9.' ']+[TGMK]iB)")?;
let download = U2client::matchRegex(&t, "ไธ่ฝฝ้:[' ']*([0-9.' ']+[TGMK]iB)")?;
let actualUpload = U2client::matchRegex(&t, "ๅฎ้
ไธไผ :[' ']*([0-9.' ']+[TGMK]iB)")?;
let actualDownload = U2client::matchRegex(&t, "ๅฎ้
ไธ่ฝฝ:[' ']*([0-9.' ']+[TGMK]iB)")?;
let t = U2client::reduceToText(&body, "UCoin[่ฏฆๆ
]")?;
let coin = U2client::matchRegex(&t, "[(]([0-9.,]+)[)]")?;
Ok(UserInfo {
username,
download,
upload,
shareRate,
actualDownload,
actualUpload,
coin,
downloadTime,
uploadTime,
timeRate,
})
}
/// 2 => Free
/// 3 => 2x
/// 4 => 2xFree
/// 5 => 50%off
/// 6 => 2x50%off
/// 7 => 30%off
pub async fn applyMagic(&self, uid: &str, time: i32, magic: i32) -> Result<()> {
let time = time.max(24);
let url = format!(
"https://u2.dmhy.org/promotion.php?action=magic&torrent={}",
uid
);
let post = [
("action", "magic".to_string()),
("torrent", uid.to_string()),
("user", "SELF".to_string()),
("hours", time.to_string()),
("promotion", magic.to_string()),
];
let res = self.container.post(&url).form(&post).send().await?;
if res.status().as_u16() == 200 {
Ok(())
} else {
Err("apply magic failed:network failed".into())
}
}
pub async fn getTorrent(&self) -> Result<Vec<RssInfo>> {
let url = format!(
"https://u2.dmhy.org/torrentrss.php?rows=50&trackerssl=1&passkey={}",
self.passkey
);
let content = self.get(url).await?.into_bytes();
let channel = Channel::read_from(&content[..])?;
let res = channel.items.iter().map(async move |x| -> Result<RssInfo> {
let title =
|
let x = container
|
random_line_split
|
|
main.py
|
.cached_property
def auth(self):
"""Shortcut to access the auth instance as a property."""
return auth.get_auth()
@webapp2.cached_property
def user_info(self):
"""Shortcut to access a subset of the user attributes that are stored
in the session.
The list of attributes to store in the session is specified in
config['webapp2_extras.auth']['user_attributes'].
:returns
A dictionary with most user information
"""
return self.auth.get_user_by_session()
@webapp2.cached_property
def user(self):
"""Shortcut to access the current logged in user.
Unlike user_info, it fetches information from the persistence layer and
returns an instance of the underlying model.
:returns
The instance of the user model associated to the logged in user.
"""
u = self.user_info
return self.user_model.get_by_id(u['user_id']) if u else None
@webapp2.cached_property
def user_model(self):
"""Returns the implementation of the user model.
It is consistent with config['webapp2_extras.auth']['user_model'], if set.
"""
return self.auth.store.user_model
@webapp2.cached_property
def session(self):
"""Shortcut to access the current session."""
return self.session_store.get_session(backend="datastore")
def render_template(self, view_filename, params=None):
if not params:
params = {}
user = self.user_info
params['user'] = user
path = os.path.join(os.path.dirname(__file__), 'views', view_filename)
self.response.out.write(template.render(path, params))
def send_json(self, message):
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(message))
def display_message(self, message):
"""Utility function to display a template with a simple message."""
params = {
'message': message
}
self.render_template('message.html', params)
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
class MainHandler(BaseHandler):
def get(self):
user = self.user
if not user:
self.render_template('about.html')
else:
params = {
'balance': user.balance,
}
self.render_template('home.html', params)
class AboutHandler(BaseHandler):
def get(self):
self.render_template('about.html')
class TrendingHandler(BaseHandler):
def get(self):
self.render_template('trending.html')
class TipHandler(BaseHandler):
@user_required
def get(self):
self._serve_page()
@user_required
def post(self):
failed=False
user = self.user
tipReceiver = self.request.get('tipReceiver')
tipReceiver = self.user_model.get_by_auth_id(tipReceiver)
amount = self.request.get('tip')
amount = float(amount)
|
self._serve_page(failed)
def _serve_page(self, failed=False):
params = {
'failed': failed
}
self.render_template('tip.html', params)
def serve_profile_page(self):
user = self.user
params = {
'auth_id': user.auth_ids[0],
'first_name': user.name,
'last_name': user.last_name,
'email_address': user.email_address,
'balance': user.balance,
}
self.render_template('profile.html', params)
class AddCreditsHandler(BaseHandler):
@user_required
def get(self):
self._serve_page()
@user_required
def post(self):
user = self.user
credits = self.request.get('credits')
credits = float(credits)
user.balance += credits
user.put()
#User a redirect here instead
serve_profile_page(self)
def _serve_page(self):
user = self.user
params = {
}
self.render_template('add_credits.html', params)
class LogHandler(BaseHandler):
@user_required
def get(self):
user = self.user
keys = tip.TipTransactionLogShardConfig.all_keys(user)
logs = keys[0].get()
if logs:
message = { 'logs': logs.logs }
else:
message = None
self.send_json(message)
class ProfileHandler(BaseHandler):
@user_required
def get(self):
serve_profile_page(self)
class SignupHandler(BaseHandler):
def get(self):
self.render_template('signup.html')
def post(self):
user_name = self.request.get('username')
email = self.request.get('email')
name = self.request.get('name')
password = self.request.get('password')
last_name = self.request.get('lastname')
unique_properties = ['email_address']
user_data = self.user_model.create_user(user_name,
unique_properties,
email_address=email, name=name, password_raw=password,
last_name=last_name, balance=float(0), tip_log_count=0, verified=False)
if not user_data[0]: #user_data is a tuple
self.display_message('Unable to create user for email %s because of \
duplicate keys %s' % (user_name, user_data[1]))
return
user = user_data[1]
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='v', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to verify their address. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
self.display_message(msg.format(url=verification_url))
class ForgotPasswordHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
user = self.user_model.get_by_auth_id(username)
if not user:
logging.info('Could not find any user entry for username %s', username)
self._serve_page(not_found=True)
return
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='p', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to reset their password. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
self.display_message(msg.format(url=verification_url))
def _serve_page(self, not_found=False):
username = self.request.get('username')
params = {
'username': username,
'not_found': not_found
}
self.render_template('forgot.html', params)
class VerificationHandler(BaseHandler):
def get(self, *args, **kwargs):
user = None
user_id = kwargs['user_id']
signup_token = kwargs['signup_token']
verification_type = kwargs['type']
# it should be something more concise like
# self.auth.get_user_by_token(user_id, signup_token)
# unfortunately the auth interface does not (yet) allow to manipulate
# signup tokens concisely
user, ts = self.user_model.get_by_auth_token(int(user_id), signup_token,
'signup')
if not user:
logging.info('Could not find any user with id "%s" signup token "%s"',
user_id, signup_token)
self.abort(404)
# store user data in the session
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if verification_type == 'v':
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), signup_token)
if not user.verified:
user.verified = True
user.put()
self.display_message('User email address has been verified.')
return
elif verification_type == 'p':
# supply user to the page
params = {
'user': user,
'token': signup_token
}
self.render_template('resetpassword.html', params)
else:
logging.info('verification type not supported')
self.abort(404)
class SetPasswordHandler(BaseHandler):
@user_required
def post(self):
password = self.request.get('password')
old_token = self.request.get('t')
if not password or password != self.request.get('confirm_password'):
self.display_message('passwords do not match')
return
user = self.user
user.set_password(password)
user.put()
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), old_token)
self.display_message('Password updated')
class LoginHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
password = self.request.get('password')
try:
u = self.auth.get_user_by_password(username, password, remember=True,
|
try:
tip.tip(user, tipReceiver, amount)
except:
failed=True
|
random_line_split
|
main.py
|
.cached_property
def auth(self):
"""Shortcut to access the auth instance as a property."""
return auth.get_auth()
@webapp2.cached_property
def user_info(self):
"""Shortcut to access a subset of the user attributes that are stored
in the session.
The list of attributes to store in the session is specified in
config['webapp2_extras.auth']['user_attributes'].
:returns
A dictionary with most user information
"""
return self.auth.get_user_by_session()
@webapp2.cached_property
def user(self):
"""Shortcut to access the current logged in user.
Unlike user_info, it fetches information from the persistence layer and
returns an instance of the underlying model.
:returns
The instance of the user model associated to the logged in user.
"""
u = self.user_info
return self.user_model.get_by_id(u['user_id']) if u else None
@webapp2.cached_property
def user_model(self):
"""Returns the implementation of the user model.
It is consistent with config['webapp2_extras.auth']['user_model'], if set.
"""
return self.auth.store.user_model
@webapp2.cached_property
def session(self):
"""Shortcut to access the current session."""
return self.session_store.get_session(backend="datastore")
def render_template(self, view_filename, params=None):
if not params:
params = {}
user = self.user_info
params['user'] = user
path = os.path.join(os.path.dirname(__file__), 'views', view_filename)
self.response.out.write(template.render(path, params))
def send_json(self, message):
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(message))
def display_message(self, message):
"""Utility function to display a template with a simple message."""
params = {
'message': message
}
self.render_template('message.html', params)
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
class MainHandler(BaseHandler):
def get(self):
user = self.user
if not user:
self.render_template('about.html')
else:
params = {
'balance': user.balance,
}
self.render_template('home.html', params)
class AboutHandler(BaseHandler):
def get(self):
self.render_template('about.html')
class TrendingHandler(BaseHandler):
def get(self):
self.render_template('trending.html')
class TipHandler(BaseHandler):
@user_required
def get(self):
self._serve_page()
@user_required
def post(self):
failed=False
user = self.user
tipReceiver = self.request.get('tipReceiver')
tipReceiver = self.user_model.get_by_auth_id(tipReceiver)
amount = self.request.get('tip')
amount = float(amount)
try:
tip.tip(user, tipReceiver, amount)
except:
failed=True
self._serve_page(failed)
def _serve_page(self, failed=False):
params = {
'failed': failed
}
self.render_template('tip.html', params)
def serve_profile_page(self):
user = self.user
params = {
'auth_id': user.auth_ids[0],
'first_name': user.name,
'last_name': user.last_name,
'email_address': user.email_address,
'balance': user.balance,
}
self.render_template('profile.html', params)
class AddCreditsHandler(BaseHandler):
@user_required
def get(self):
self._serve_page()
@user_required
def post(self):
user = self.user
credits = self.request.get('credits')
credits = float(credits)
user.balance += credits
user.put()
#User a redirect here instead
serve_profile_page(self)
def _serve_page(self):
user = self.user
params = {
}
self.render_template('add_credits.html', params)
class LogHandler(BaseHandler):
@user_required
def get(self):
user = self.user
keys = tip.TipTransactionLogShardConfig.all_keys(user)
logs = keys[0].get()
if logs:
message = { 'logs': logs.logs }
else:
message = None
self.send_json(message)
class ProfileHandler(BaseHandler):
@user_required
def get(self):
serve_profile_page(self)
class SignupHandler(BaseHandler):
def get(self):
self.render_template('signup.html')
def post(self):
user_name = self.request.get('username')
email = self.request.get('email')
name = self.request.get('name')
password = self.request.get('password')
last_name = self.request.get('lastname')
unique_properties = ['email_address']
user_data = self.user_model.create_user(user_name,
unique_properties,
email_address=email, name=name, password_raw=password,
last_name=last_name, balance=float(0), tip_log_count=0, verified=False)
if not user_data[0]: #user_data is a tuple
self.display_message('Unable to create user for email %s because of \
duplicate keys %s' % (user_name, user_data[1]))
return
user = user_data[1]
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='v', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to verify their address. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
self.display_message(msg.format(url=verification_url))
class ForgotPasswordHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
user = self.user_model.get_by_auth_id(username)
if not user:
|
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='p', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to reset their password. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
self.display_message(msg.format(url=verification_url))
def _serve_page(self, not_found=False):
username = self.request.get('username')
params = {
'username': username,
'not_found': not_found
}
self.render_template('forgot.html', params)
class VerificationHandler(BaseHandler):
def get(self, *args, **kwargs):
user = None
user_id = kwargs['user_id']
signup_token = kwargs['signup_token']
verification_type = kwargs['type']
# it should be something more concise like
# self.auth.get_user_by_token(user_id, signup_token)
# unfortunately the auth interface does not (yet) allow to manipulate
# signup tokens concisely
user, ts = self.user_model.get_by_auth_token(int(user_id), signup_token,
'signup')
if not user:
logging.info('Could not find any user with id "%s" signup token "%s"',
user_id, signup_token)
self.abort(404)
# store user data in the session
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if verification_type == 'v':
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), signup_token)
if not user.verified:
user.verified = True
user.put()
self.display_message('User email address has been verified.')
return
elif verification_type == 'p':
# supply user to the page
params = {
'user': user,
'token': signup_token
}
self.render_template('resetpassword.html', params)
else:
logging.info('verification type not supported')
self.abort(404)
class SetPasswordHandler(BaseHandler):
@user_required
def post(self):
password = self.request.get('password')
old_token = self.request.get('t')
if not password or password != self.request.get('confirm_password'):
self.display_message('passwords do not match')
return
user = self.user
user.set_password(password)
user.put()
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), old_token)
self.display_message('Password updated')
class LoginHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
password = self.request.get('password')
try:
u = self.auth.get_user_by_password(username, password, remember=True,
|
logging.info('Could not find any user entry for username %s', username)
self._serve_page(not_found=True)
return
|
conditional_block
|
main.py
|
.cached_property
def auth(self):
"""Shortcut to access the auth instance as a property."""
return auth.get_auth()
@webapp2.cached_property
def user_info(self):
"""Shortcut to access a subset of the user attributes that are stored
in the session.
The list of attributes to store in the session is specified in
config['webapp2_extras.auth']['user_attributes'].
:returns
A dictionary with most user information
"""
return self.auth.get_user_by_session()
@webapp2.cached_property
def user(self):
"""Shortcut to access the current logged in user.
Unlike user_info, it fetches information from the persistence layer and
returns an instance of the underlying model.
:returns
The instance of the user model associated to the logged in user.
"""
u = self.user_info
return self.user_model.get_by_id(u['user_id']) if u else None
@webapp2.cached_property
def user_model(self):
"""Returns the implementation of the user model.
It is consistent with config['webapp2_extras.auth']['user_model'], if set.
"""
return self.auth.store.user_model
@webapp2.cached_property
def session(self):
"""Shortcut to access the current session."""
return self.session_store.get_session(backend="datastore")
def render_template(self, view_filename, params=None):
if not params:
params = {}
user = self.user_info
params['user'] = user
path = os.path.join(os.path.dirname(__file__), 'views', view_filename)
self.response.out.write(template.render(path, params))
def send_json(self, message):
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(message))
def display_message(self, message):
"""Utility function to display a template with a simple message."""
params = {
'message': message
}
self.render_template('message.html', params)
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
class MainHandler(BaseHandler):
def get(self):
user = self.user
if not user:
self.render_template('about.html')
else:
params = {
'balance': user.balance,
}
self.render_template('home.html', params)
class AboutHandler(BaseHandler):
def get(self):
self.render_template('about.html')
class TrendingHandler(BaseHandler):
def get(self):
self.render_template('trending.html')
class TipHandler(BaseHandler):
@user_required
def get(self):
self._serve_page()
@user_required
def post(self):
failed=False
user = self.user
tipReceiver = self.request.get('tipReceiver')
tipReceiver = self.user_model.get_by_auth_id(tipReceiver)
amount = self.request.get('tip')
amount = float(amount)
try:
tip.tip(user, tipReceiver, amount)
except:
failed=True
self._serve_page(failed)
def _serve_page(self, failed=False):
params = {
'failed': failed
}
self.render_template('tip.html', params)
def serve_profile_page(self):
user = self.user
params = {
'auth_id': user.auth_ids[0],
'first_name': user.name,
'last_name': user.last_name,
'email_address': user.email_address,
'balance': user.balance,
}
self.render_template('profile.html', params)
class AddCreditsHandler(BaseHandler):
@user_required
def get(self):
self._serve_page()
@user_required
def post(self):
user = self.user
credits = self.request.get('credits')
credits = float(credits)
user.balance += credits
user.put()
#User a redirect here instead
serve_profile_page(self)
def _serve_page(self):
user = self.user
params = {
}
self.render_template('add_credits.html', params)
class LogHandler(BaseHandler):
@user_required
def get(self):
user = self.user
keys = tip.TipTransactionLogShardConfig.all_keys(user)
logs = keys[0].get()
if logs:
message = { 'logs': logs.logs }
else:
message = None
self.send_json(message)
class ProfileHandler(BaseHandler):
@user_required
def get(self):
serve_profile_page(self)
class SignupHandler(BaseHandler):
def get(self):
self.render_template('signup.html')
def post(self):
user_name = self.request.get('username')
email = self.request.get('email')
name = self.request.get('name')
password = self.request.get('password')
last_name = self.request.get('lastname')
unique_properties = ['email_address']
user_data = self.user_model.create_user(user_name,
unique_properties,
email_address=email, name=name, password_raw=password,
last_name=last_name, balance=float(0), tip_log_count=0, verified=False)
if not user_data[0]: #user_data is a tuple
self.display_message('Unable to create user for email %s because of \
duplicate keys %s' % (user_name, user_data[1]))
return
user = user_data[1]
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='v', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to verify their address. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
self.display_message(msg.format(url=verification_url))
class ForgotPasswordHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
user = self.user_model.get_by_auth_id(username)
if not user:
logging.info('Could not find any user entry for username %s', username)
self._serve_page(not_found=True)
return
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='p', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to reset their password. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
self.display_message(msg.format(url=verification_url))
def _serve_page(self, not_found=False):
username = self.request.get('username')
params = {
'username': username,
'not_found': not_found
}
self.render_template('forgot.html', params)
class VerificationHandler(BaseHandler):
def get(self, *args, **kwargs):
user = None
user_id = kwargs['user_id']
signup_token = kwargs['signup_token']
verification_type = kwargs['type']
# it should be something more concise like
# self.auth.get_user_by_token(user_id, signup_token)
# unfortunately the auth interface does not (yet) allow to manipulate
# signup tokens concisely
user, ts = self.user_model.get_by_auth_token(int(user_id), signup_token,
'signup')
if not user:
logging.info('Could not find any user with id "%s" signup token "%s"',
user_id, signup_token)
self.abort(404)
# store user data in the session
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if verification_type == 'v':
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), signup_token)
if not user.verified:
user.verified = True
user.put()
self.display_message('User email address has been verified.')
return
elif verification_type == 'p':
# supply user to the page
params = {
'user': user,
'token': signup_token
}
self.render_template('resetpassword.html', params)
else:
logging.info('verification type not supported')
self.abort(404)
class SetPasswordHandler(BaseHandler):
@user_required
def
|
(self):
password = self.request.get('password')
old_token = self.request.get('t')
if not password or password != self.request.get('confirm_password'):
self.display_message('passwords do not match')
return
user = self.user
user.set_password(password)
user.put()
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), old_token)
self.display_message('Password updated')
class LoginHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
password = self.request.get('password')
try:
u = self.auth.get_user_by_password(username, password, remember=True,
|
post
|
identifier_name
|
main.py
|
.cached_property
def auth(self):
"""Shortcut to access the auth instance as a property."""
return auth.get_auth()
@webapp2.cached_property
def user_info(self):
"""Shortcut to access a subset of the user attributes that are stored
in the session.
The list of attributes to store in the session is specified in
config['webapp2_extras.auth']['user_attributes'].
:returns
A dictionary with most user information
"""
return self.auth.get_user_by_session()
@webapp2.cached_property
def user(self):
"""Shortcut to access the current logged in user.
Unlike user_info, it fetches information from the persistence layer and
returns an instance of the underlying model.
:returns
The instance of the user model associated to the logged in user.
"""
u = self.user_info
return self.user_model.get_by_id(u['user_id']) if u else None
@webapp2.cached_property
def user_model(self):
"""Returns the implementation of the user model.
It is consistent with config['webapp2_extras.auth']['user_model'], if set.
"""
return self.auth.store.user_model
@webapp2.cached_property
def session(self):
"""Shortcut to access the current session."""
return self.session_store.get_session(backend="datastore")
def render_template(self, view_filename, params=None):
if not params:
params = {}
user = self.user_info
params['user'] = user
path = os.path.join(os.path.dirname(__file__), 'views', view_filename)
self.response.out.write(template.render(path, params))
def send_json(self, message):
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(message))
def display_message(self, message):
"""Utility function to display a template with a simple message."""
params = {
'message': message
}
self.render_template('message.html', params)
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
class MainHandler(BaseHandler):
def get(self):
user = self.user
if not user:
self.render_template('about.html')
else:
params = {
'balance': user.balance,
}
self.render_template('home.html', params)
class AboutHandler(BaseHandler):
def get(self):
self.render_template('about.html')
class TrendingHandler(BaseHandler):
def get(self):
self.render_template('trending.html')
class TipHandler(BaseHandler):
@user_required
def get(self):
self._serve_page()
@user_required
def post(self):
failed=False
user = self.user
tipReceiver = self.request.get('tipReceiver')
tipReceiver = self.user_model.get_by_auth_id(tipReceiver)
amount = self.request.get('tip')
amount = float(amount)
try:
tip.tip(user, tipReceiver, amount)
except:
failed=True
self._serve_page(failed)
def _serve_page(self, failed=False):
params = {
'failed': failed
}
self.render_template('tip.html', params)
def serve_profile_page(self):
user = self.user
params = {
'auth_id': user.auth_ids[0],
'first_name': user.name,
'last_name': user.last_name,
'email_address': user.email_address,
'balance': user.balance,
}
self.render_template('profile.html', params)
class AddCreditsHandler(BaseHandler):
@user_required
def get(self):
self._serve_page()
@user_required
def post(self):
user = self.user
credits = self.request.get('credits')
credits = float(credits)
user.balance += credits
user.put()
#User a redirect here instead
serve_profile_page(self)
def _serve_page(self):
user = self.user
params = {
}
self.render_template('add_credits.html', params)
class LogHandler(BaseHandler):
@user_required
def get(self):
user = self.user
keys = tip.TipTransactionLogShardConfig.all_keys(user)
logs = keys[0].get()
if logs:
message = { 'logs': logs.logs }
else:
message = None
self.send_json(message)
class ProfileHandler(BaseHandler):
@user_required
def get(self):
serve_profile_page(self)
class SignupHandler(BaseHandler):
def get(self):
self.render_template('signup.html')
def post(self):
user_name = self.request.get('username')
email = self.request.get('email')
name = self.request.get('name')
password = self.request.get('password')
last_name = self.request.get('lastname')
unique_properties = ['email_address']
user_data = self.user_model.create_user(user_name,
unique_properties,
email_address=email, name=name, password_raw=password,
last_name=last_name, balance=float(0), tip_log_count=0, verified=False)
if not user_data[0]: #user_data is a tuple
self.display_message('Unable to create user for email %s because of \
duplicate keys %s' % (user_name, user_data[1]))
return
user = user_data[1]
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='v', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to verify their address. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
self.display_message(msg.format(url=verification_url))
class ForgotPasswordHandler(BaseHandler):
|
self.display_message(msg.format(url=verification_url))
def _serve_page(self, not_found=False):
username = self.request.get('username')
params = {
'username': username,
'not_found': not_found
}
self.render_template('forgot.html', params)
class VerificationHandler(BaseHandler):
def get(self, *args, **kwargs):
user = None
user_id = kwargs['user_id']
signup_token = kwargs['signup_token']
verification_type = kwargs['type']
# it should be something more concise like
# self.auth.get_user_by_token(user_id, signup_token)
# unfortunately the auth interface does not (yet) allow to manipulate
# signup tokens concisely
user, ts = self.user_model.get_by_auth_token(int(user_id), signup_token,
'signup')
if not user:
logging.info('Could not find any user with id "%s" signup token "%s"',
user_id, signup_token)
self.abort(404)
# store user data in the session
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if verification_type == 'v':
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), signup_token)
if not user.verified:
user.verified = True
user.put()
self.display_message('User email address has been verified.')
return
elif verification_type == 'p':
# supply user to the page
params = {
'user': user,
'token': signup_token
}
self.render_template('resetpassword.html', params)
else:
logging.info('verification type not supported')
self.abort(404)
class SetPasswordHandler(BaseHandler):
@user_required
def post(self):
password = self.request.get('password')
old_token = self.request.get('t')
if not password or password != self.request.get('confirm_password'):
self.display_message('passwords do not match')
return
user = self.user
user.set_password(password)
user.put()
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), old_token)
self.display_message('Password updated')
class LoginHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
password = self.request.get('password')
try:
u = self.auth.get_user_by_password(username, password, remember=True,
|
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
user = self.user_model.get_by_auth_id(username)
if not user:
logging.info('Could not find any user entry for username %s', username)
self._serve_page(not_found=True)
return
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='p', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to reset their password. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
|
identifier_body
|
tiles.rs
|
ylen: PosUnit,
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct MapChunk {
pub tiles: Tiles,
pub pos: Pos,
pub xlen: PosUnit,
pub ylen: PosUnit,
pub zlen: PosUnit,
}
pub fn init_map(root: &Path) -> Map {
info!("Initializing map");
let test_path = root.join("static/inc/maps/smol_map_excel.sfm.csv");
let path_str = test_path
.to_str()
.expect("Unicode decode error");
// Load materials properties file
let materials = init_materials(root);
load_map(path_str, materials).expect("Could not load map")
}
impl Tile {
fn new(material: MaterialID, mode: Mode) -> Tile {
Tile {
material: material,
mode: mode,
marked: false,
}
}
}
impl Map {
#[allow(dead_code)]
pub fn print(&self) {
//Debug print method
//[debug] func
for z in 0..self.zlen {
for y in 0..self.ylen {
for x in 0..self.xlen {
match self.get_tile((x, y, z)) {
Some(tile) => print!("{0}", tile.material % 10),
None => print!(" "),
}
}
println!();
}
println!();
}
}
pub fn size(&self) -> Pos {
(self.xlen, self.ylen, self.zlen)
}
// Resize map as given with blank tiles
pub fn resize(&mut self, pos: Pos)
|
pub fn get_chunk(&self, pos: Pos, size: Pos) -> MapChunk {
let (x0, y0, z0) = pos;
let (xlen, ylen, zlen) = size;
let mut tiles = Tiles::new();
for x in x0..(x0 + xlen) {
for y in y0..(y0 + ylen) {
for z in z0..(z0 + zlen) {
let index = self.coords_to_index((x, y, z));
tiles.push(self.tiles[index]);
}
}
}
MapChunk {
tiles: tiles,
pos: pos,
xlen: xlen,
ylen: ylen,
zlen: zlen,
}
}
// TODO Add duplication factor
pub fn to_chunks(&self) -> Vec<MapChunk> {
let mut chunks = Vec::<MapChunk>::new();
let x_chunks = Map::get_num_chunks(self.xlen, CHUNK_TILES_X);
let y_chunks = Map::get_num_chunks(self.ylen, CHUNK_TILES_Y);
let z_chunks = Map::get_num_chunks(self.zlen, CHUNK_TILES_Z);
for dx in 0..x_chunks {
for dy in 0..y_chunks {
for dz in 0..z_chunks {
let x = dx * CHUNK_TILES_X;
let y = dy * CHUNK_TILES_Y;
let z = dz * CHUNK_TILES_Z;
let pos = (x, y, z);
let xlen = min(CHUNK_TILES_X, self.xlen - dx * CHUNK_TILES_X);
let ylen = min(CHUNK_TILES_Y, self.ylen - dy * CHUNK_TILES_Y);
let zlen = min(CHUNK_TILES_Z, self.zlen - dz * CHUNK_TILES_Z);
let size = (xlen, ylen, zlen);
chunks.push(self.get_chunk(pos, size))
}
}
}
chunks
}
fn get_num_chunks(map_len: PosUnit, chunk_len: PosUnit) -> PosUnit {
if map_len % chunk_len == 0 {
map_len / chunk_len
} else {
map_len / chunk_len + 1
}
}
pub fn apply_chunk(&mut self, chunk: &MapChunk) {
let (x0, y0, z0) = chunk.pos;
let mut chunk_i = 0;
for x in 0..chunk.xlen {
for y in 0..chunk.ylen {
for z in 0..chunk.zlen {
let mx = x + x0;
let my = (y + y0) * self.xlen;
let mz = (z + z0) * self.xlen * self.ylen;
let map_i = (mx + my + mz) as usize;
self.tiles[map_i] = chunk.tiles[chunk_i];
chunk_i += 1;
}
}
}
}
/// Tile accesor method
pub fn get_tile(&self, pos: Pos) -> Option<Tile> {
if self.in_bounds(pos) {
let index = self.coords_to_index(pos);
Some(self.tiles[index])
} else {
None
}
}
/// Perform some mutable operation to a tile
fn apply_tile_func<F>(&mut self, pos: Pos, func: F)
where F: Fn(&mut Tile) {
if self.in_bounds(pos) {
let index = self.coords_to_index(pos);
func(&mut self.tiles[index]);
}
}
fn in_bounds(&self, pos: Pos) -> bool {
let (x, y, z) = pos;
!(0 > x || 0 > y || 0 > z || x >= self.xlen || y >= self.ylen || z >= self.zlen)
}
fn coords_to_index(&self, pos: Pos) -> usize {
let (x, y, z) = pos;
(x + y * self.xlen + z * self.xlen * self.ylen) as usize
}
pub fn update_tile(&mut self, new_tile: Tile, pos: Pos) {
self.apply_tile_func(pos, |tile| {
tile.material = new_tile.material;
tile.marked = false;
});
}
pub fn dig(&mut self, pos: Pos) {
let alt = self.get_alt(pos);
self.apply_tile_func(pos, |tile| tile.material = alt);
}
pub fn mark(&mut self, pos: Pos) {
self.apply_tile_func(pos, |tile| tile.marked = true);
}
#[allow(dead_code)]
pub fn unmark(&mut self, pos: Pos) {
self.apply_tile_func(pos, |tile| tile.marked = false);
}
fn grab_material(&self, pos: Pos) -> Option<Material> {
if let Some(tile) = self.get_tile(pos) {
if let Some(material) = self.materials.get(&tile.material) {
Some(material.clone())
} else {
None
}
} else {
None
}
}
pub fn get_alt(&self, pos: Pos) -> MaterialID {
if let Some(material) = self.grab_material(pos) {
material.alt
} else {
0
}
}
pub fn diggable(&self, pos: Pos) -> bool {
if let Some(tile) = self.get_tile(pos) {
if let Some(material) = self.materials.get(&tile.material) {
material.diggable && !tile.marked
} else {
false
}
} else {
false
}
}
pub fn passable(&self, pos: Pos) -> bool {
if let Some(tile) = self.get_tile(pos) {
match tile.mode {
Mode::Block => false,
_ => true,
}
} else {
false
}
}
#[allow(dead_code)]
pub fn save(&self, path: &str) -> Result<(), Error> {
//Saves map as file. Currently unversioned, so take heed.
let f = try!(File::create(&path));
let mut writer = BufWriter::new(&f);
try!(write!(&mut writer, "{} {} {}\n", self.xlen, self.ylen, self.zlen));
for z in 0..self.zlen {
for y in 0..self.ylen {
for x in 0..self.xlen {
try!(write!(&mut writer, "{} ",
self.get_tile((x, y, z)).expect("Malformed map").material));
}
try!(write!(&mut writer, "\n"));
}
try!(write!(&mut writer, "\n"));
}
Ok(())
}
}
impl MapSnapshot {
#[allow(dead_code)]
pub fn print(&self) {
//MapSnapshot debug
//[debug] func
for y in 0..self.ylen {
for x in 0..self.xlen {
let index = self.coords_to_index((x, y, 0));
print!("{0}", self.tiles[index].material % 10);
}
println!();
}
println!();
}
fn coords_to_index(&self, pos: Pos) -> usize {
let (x, y, _) = pos;
(x + y * self.xlen
|
{
let (x, y, z) = pos;
self.tiles = vec![AIR_TILE; (x * y * z) as usize];
self.xlen = x;
self.ylen = y;
self.zlen = z;
}
|
identifier_body
|
tiles.rs
|
ylen: PosUnit,
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct MapChunk {
pub tiles: Tiles,
pub pos: Pos,
pub xlen: PosUnit,
pub ylen: PosUnit,
pub zlen: PosUnit,
}
pub fn init_map(root: &Path) -> Map {
info!("Initializing map");
let test_path = root.join("static/inc/maps/smol_map_excel.sfm.csv");
let path_str = test_path
.to_str()
.expect("Unicode decode error");
// Load materials properties file
let materials = init_materials(root);
load_map(path_str, materials).expect("Could not load map")
}
impl Tile {
fn new(material: MaterialID, mode: Mode) -> Tile {
Tile {
material: material,
mode: mode,
marked: false,
}
}
}
impl Map {
#[allow(dead_code)]
pub fn print(&self) {
//Debug print method
//[debug] func
for z in 0..self.zlen {
for y in 0..self.ylen {
for x in 0..self.xlen {
match self.get_tile((x, y, z)) {
Some(tile) => print!("{0}", tile.material % 10),
None => print!(" "),
}
}
println!();
}
println!();
}
}
pub fn size(&self) -> Pos {
(self.xlen, self.ylen, self.zlen)
}
// Resize map as given with blank tiles
pub fn resize(&mut self, pos: Pos) {
let (x, y, z) = pos;
self.tiles = vec![AIR_TILE; (x * y * z) as usize];
self.xlen = x;
self.ylen = y;
self.zlen = z;
}
pub fn get_chunk(&self, pos: Pos, size: Pos) -> MapChunk {
let (x0, y0, z0) = pos;
let (xlen, ylen, zlen) = size;
let mut tiles = Tiles::new();
for x in x0..(x0 + xlen) {
for y in y0..(y0 + ylen) {
for z in z0..(z0 + zlen) {
let index = self.coords_to_index((x, y, z));
tiles.push(self.tiles[index]);
}
}
}
MapChunk {
tiles: tiles,
pos: pos,
xlen: xlen,
ylen: ylen,
zlen: zlen,
}
}
// TODO Add duplication factor
pub fn to_chunks(&self) -> Vec<MapChunk> {
let mut chunks = Vec::<MapChunk>::new();
let x_chunks = Map::get_num_chunks(self.xlen, CHUNK_TILES_X);
let y_chunks = Map::get_num_chunks(self.ylen, CHUNK_TILES_Y);
let z_chunks = Map::get_num_chunks(self.zlen, CHUNK_TILES_Z);
for dx in 0..x_chunks {
for dy in 0..y_chunks {
for dz in 0..z_chunks {
let x = dx * CHUNK_TILES_X;
let y = dy * CHUNK_TILES_Y;
|
let zlen = min(CHUNK_TILES_Z, self.zlen - dz * CHUNK_TILES_Z);
let size = (xlen, ylen, zlen);
chunks.push(self.get_chunk(pos, size))
}
}
}
chunks
}
fn get_num_chunks(map_len: PosUnit, chunk_len: PosUnit) -> PosUnit {
if map_len % chunk_len == 0 {
map_len / chunk_len
} else {
map_len / chunk_len + 1
}
}
pub fn apply_chunk(&mut self, chunk: &MapChunk) {
let (x0, y0, z0) = chunk.pos;
let mut chunk_i = 0;
for x in 0..chunk.xlen {
for y in 0..chunk.ylen {
for z in 0..chunk.zlen {
let mx = x + x0;
let my = (y + y0) * self.xlen;
let mz = (z + z0) * self.xlen * self.ylen;
let map_i = (mx + my + mz) as usize;
self.tiles[map_i] = chunk.tiles[chunk_i];
chunk_i += 1;
}
}
}
}
/// Tile accesor method
pub fn get_tile(&self, pos: Pos) -> Option<Tile> {
if self.in_bounds(pos) {
let index = self.coords_to_index(pos);
Some(self.tiles[index])
} else {
None
}
}
/// Perform some mutable operation to a tile
fn apply_tile_func<F>(&mut self, pos: Pos, func: F)
where F: Fn(&mut Tile) {
if self.in_bounds(pos) {
let index = self.coords_to_index(pos);
func(&mut self.tiles[index]);
}
}
fn in_bounds(&self, pos: Pos) -> bool {
let (x, y, z) = pos;
!(0 > x || 0 > y || 0 > z || x >= self.xlen || y >= self.ylen || z >= self.zlen)
}
fn coords_to_index(&self, pos: Pos) -> usize {
let (x, y, z) = pos;
(x + y * self.xlen + z * self.xlen * self.ylen) as usize
}
pub fn update_tile(&mut self, new_tile: Tile, pos: Pos) {
self.apply_tile_func(pos, |tile| {
tile.material = new_tile.material;
tile.marked = false;
});
}
pub fn dig(&mut self, pos: Pos) {
let alt = self.get_alt(pos);
self.apply_tile_func(pos, |tile| tile.material = alt);
}
pub fn mark(&mut self, pos: Pos) {
self.apply_tile_func(pos, |tile| tile.marked = true);
}
#[allow(dead_code)]
pub fn unmark(&mut self, pos: Pos) {
self.apply_tile_func(pos, |tile| tile.marked = false);
}
fn grab_material(&self, pos: Pos) -> Option<Material> {
if let Some(tile) = self.get_tile(pos) {
if let Some(material) = self.materials.get(&tile.material) {
Some(material.clone())
} else {
None
}
} else {
None
}
}
pub fn get_alt(&self, pos: Pos) -> MaterialID {
if let Some(material) = self.grab_material(pos) {
material.alt
} else {
0
}
}
pub fn diggable(&self, pos: Pos) -> bool {
if let Some(tile) = self.get_tile(pos) {
if let Some(material) = self.materials.get(&tile.material) {
material.diggable && !tile.marked
} else {
false
}
} else {
false
}
}
pub fn passable(&self, pos: Pos) -> bool {
if let Some(tile) = self.get_tile(pos) {
match tile.mode {
Mode::Block => false,
_ => true,
}
} else {
false
}
}
#[allow(dead_code)]
pub fn save(&self, path: &str) -> Result<(), Error> {
//Saves map as file. Currently unversioned, so take heed.
let f = try!(File::create(&path));
let mut writer = BufWriter::new(&f);
try!(write!(&mut writer, "{} {} {}\n", self.xlen, self.ylen, self.zlen));
for z in 0..self.zlen {
for y in 0..self.ylen {
for x in 0..self.xlen {
try!(write!(&mut writer, "{} ",
self.get_tile((x, y, z)).expect("Malformed map").material));
}
try!(write!(&mut writer, "\n"));
}
try!(write!(&mut writer, "\n"));
}
Ok(())
}
}
impl MapSnapshot {
#[allow(dead_code)]
pub fn print(&self) {
//MapSnapshot debug
//[debug] func
for y in 0..self.ylen {
for x in 0..self.xlen {
let index = self.coords_to_index((x, y, 0));
print!("{0}", self.tiles[index].material % 10);
}
println!();
}
println!();
}
fn coords_to_index(&self, pos: Pos) -> usize {
let (x, y, _) = pos;
(x + y * self.xlen) as
|
let z = dz * CHUNK_TILES_Z;
let pos = (x, y, z);
let xlen = min(CHUNK_TILES_X, self.xlen - dx * CHUNK_TILES_X);
let ylen = min(CHUNK_TILES_Y, self.ylen - dy * CHUNK_TILES_Y);
|
random_line_split
|
tiles.rs
|
ylen: PosUnit,
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct
|
{
pub tiles: Tiles,
pub pos: Pos,
pub xlen: PosUnit,
pub ylen: PosUnit,
pub zlen: PosUnit,
}
pub fn init_map(root: &Path) -> Map {
info!("Initializing map");
let test_path = root.join("static/inc/maps/smol_map_excel.sfm.csv");
let path_str = test_path
.to_str()
.expect("Unicode decode error");
// Load materials properties file
let materials = init_materials(root);
load_map(path_str, materials).expect("Could not load map")
}
impl Tile {
fn new(material: MaterialID, mode: Mode) -> Tile {
Tile {
material: material,
mode: mode,
marked: false,
}
}
}
impl Map {
#[allow(dead_code)]
pub fn print(&self) {
//Debug print method
//[debug] func
for z in 0..self.zlen {
for y in 0..self.ylen {
for x in 0..self.xlen {
match self.get_tile((x, y, z)) {
Some(tile) => print!("{0}", tile.material % 10),
None => print!(" "),
}
}
println!();
}
println!();
}
}
pub fn size(&self) -> Pos {
(self.xlen, self.ylen, self.zlen)
}
// Resize map as given with blank tiles
pub fn resize(&mut self, pos: Pos) {
let (x, y, z) = pos;
self.tiles = vec![AIR_TILE; (x * y * z) as usize];
self.xlen = x;
self.ylen = y;
self.zlen = z;
}
pub fn get_chunk(&self, pos: Pos, size: Pos) -> MapChunk {
let (x0, y0, z0) = pos;
let (xlen, ylen, zlen) = size;
let mut tiles = Tiles::new();
for x in x0..(x0 + xlen) {
for y in y0..(y0 + ylen) {
for z in z0..(z0 + zlen) {
let index = self.coords_to_index((x, y, z));
tiles.push(self.tiles[index]);
}
}
}
MapChunk {
tiles: tiles,
pos: pos,
xlen: xlen,
ylen: ylen,
zlen: zlen,
}
}
// TODO Add duplication factor
pub fn to_chunks(&self) -> Vec<MapChunk> {
let mut chunks = Vec::<MapChunk>::new();
let x_chunks = Map::get_num_chunks(self.xlen, CHUNK_TILES_X);
let y_chunks = Map::get_num_chunks(self.ylen, CHUNK_TILES_Y);
let z_chunks = Map::get_num_chunks(self.zlen, CHUNK_TILES_Z);
for dx in 0..x_chunks {
for dy in 0..y_chunks {
for dz in 0..z_chunks {
let x = dx * CHUNK_TILES_X;
let y = dy * CHUNK_TILES_Y;
let z = dz * CHUNK_TILES_Z;
let pos = (x, y, z);
let xlen = min(CHUNK_TILES_X, self.xlen - dx * CHUNK_TILES_X);
let ylen = min(CHUNK_TILES_Y, self.ylen - dy * CHUNK_TILES_Y);
let zlen = min(CHUNK_TILES_Z, self.zlen - dz * CHUNK_TILES_Z);
let size = (xlen, ylen, zlen);
chunks.push(self.get_chunk(pos, size))
}
}
}
chunks
}
fn get_num_chunks(map_len: PosUnit, chunk_len: PosUnit) -> PosUnit {
if map_len % chunk_len == 0 {
map_len / chunk_len
} else {
map_len / chunk_len + 1
}
}
pub fn apply_chunk(&mut self, chunk: &MapChunk) {
let (x0, y0, z0) = chunk.pos;
let mut chunk_i = 0;
for x in 0..chunk.xlen {
for y in 0..chunk.ylen {
for z in 0..chunk.zlen {
let mx = x + x0;
let my = (y + y0) * self.xlen;
let mz = (z + z0) * self.xlen * self.ylen;
let map_i = (mx + my + mz) as usize;
self.tiles[map_i] = chunk.tiles[chunk_i];
chunk_i += 1;
}
}
}
}
/// Tile accesor method
pub fn get_tile(&self, pos: Pos) -> Option<Tile> {
if self.in_bounds(pos) {
let index = self.coords_to_index(pos);
Some(self.tiles[index])
} else {
None
}
}
/// Perform some mutable operation to a tile
fn apply_tile_func<F>(&mut self, pos: Pos, func: F)
where F: Fn(&mut Tile) {
if self.in_bounds(pos) {
let index = self.coords_to_index(pos);
func(&mut self.tiles[index]);
}
}
fn in_bounds(&self, pos: Pos) -> bool {
let (x, y, z) = pos;
!(0 > x || 0 > y || 0 > z || x >= self.xlen || y >= self.ylen || z >= self.zlen)
}
fn coords_to_index(&self, pos: Pos) -> usize {
let (x, y, z) = pos;
(x + y * self.xlen + z * self.xlen * self.ylen) as usize
}
pub fn update_tile(&mut self, new_tile: Tile, pos: Pos) {
self.apply_tile_func(pos, |tile| {
tile.material = new_tile.material;
tile.marked = false;
});
}
pub fn dig(&mut self, pos: Pos) {
let alt = self.get_alt(pos);
self.apply_tile_func(pos, |tile| tile.material = alt);
}
pub fn mark(&mut self, pos: Pos) {
self.apply_tile_func(pos, |tile| tile.marked = true);
}
#[allow(dead_code)]
pub fn unmark(&mut self, pos: Pos) {
self.apply_tile_func(pos, |tile| tile.marked = false);
}
fn grab_material(&self, pos: Pos) -> Option<Material> {
if let Some(tile) = self.get_tile(pos) {
if let Some(material) = self.materials.get(&tile.material) {
Some(material.clone())
} else {
None
}
} else {
None
}
}
pub fn get_alt(&self, pos: Pos) -> MaterialID {
if let Some(material) = self.grab_material(pos) {
material.alt
} else {
0
}
}
pub fn diggable(&self, pos: Pos) -> bool {
if let Some(tile) = self.get_tile(pos) {
if let Some(material) = self.materials.get(&tile.material) {
material.diggable && !tile.marked
} else {
false
}
} else {
false
}
}
pub fn passable(&self, pos: Pos) -> bool {
if let Some(tile) = self.get_tile(pos) {
match tile.mode {
Mode::Block => false,
_ => true,
}
} else {
false
}
}
#[allow(dead_code)]
pub fn save(&self, path: &str) -> Result<(), Error> {
//Saves map as file. Currently unversioned, so take heed.
let f = try!(File::create(&path));
let mut writer = BufWriter::new(&f);
try!(write!(&mut writer, "{} {} {}\n", self.xlen, self.ylen, self.zlen));
for z in 0..self.zlen {
for y in 0..self.ylen {
for x in 0..self.xlen {
try!(write!(&mut writer, "{} ",
self.get_tile((x, y, z)).expect("Malformed map").material));
}
try!(write!(&mut writer, "\n"));
}
try!(write!(&mut writer, "\n"));
}
Ok(())
}
}
impl MapSnapshot {
#[allow(dead_code)]
pub fn print(&self) {
//MapSnapshot debug
//[debug] func
for y in 0..self.ylen {
for x in 0..self.xlen {
let index = self.coords_to_index((x, y, 0));
print!("{0}", self.tiles[index].material % 10);
}
println!();
}
println!();
}
fn coords_to_index(&self, pos: Pos) -> usize {
let (x, y, _) = pos;
(x + y * self.xlen)
|
MapChunk
|
identifier_name
|
rotas.component.ts
|
title: c.rota.nome+' - '+c.nome
});
if (c.rota.img != 'semRota') {
var image = {
url: '../../assets/img/' + c.rota.img + '.png',
// The anchor for this image is the base of the flagpole at (0, 32).
anchor: new google.maps.Point(0, 16),
// This marker is 20 pixels wide by 32 pixels high.
size: new google.maps.Size(52, 52),
//The size of the entire image after scaling, if any. Use this property to stretch/shrink an image or a sprite.
scaledSize: new google.maps.Size(42, 42),
// The origin for this image is (0, 0).
//origin: new google.maps.Point(1, 1),
};
//anchor: [16,16],
//size: [52,52],
//scaledSize: [52,52]
//var image1 = '../../assets/img/'+c.rota.img+'.png';
marker.setIcon(image);
//marker.setDraggable(true);
//marker.nguiMapComponent.center = ''+this.marker.lat+','+this.marker.lng+'';
//marker.nguiMapComponent.zoom = '16';
//marker.nguiMapComponent.openInfoWindow('iw', marker);
}
marker.addListener('click', function () {
var contentString = '<div id="content">' +
'<div id="siteNotice">' +
'</div>' +
'<h1 id="firstHeading" class="firstHeading">' + c.nome + '</h1>' +
'<div id="bodyContent">' +
'<p><img style="max-height: 100px; max-width: 100px;" class="resize" src="../../assets/img/cliente.jpg" align="left" />O cliente <b>' +
c.nome + '</b>, reside na rua <b>' + c.rua.nome + '</b>, Nยบ <b>' + c.numero + '</b> <br />' +
'colocar aqui mais informaรงรตes importรขntes .... ' +
' Uluru is listed as a World ' +
'Heritage Site.</p>' +
'<p>Link do Facebook: , <a href="https://en.wikipedia.org/w/index.php?title=Uluru&oldid=297882194">' +
'https://en.wikipedia.org/w/index.php?title=Uluru</a> ' +
'(last visited June 22, 2009).</p>' +
'</div>' +
'</div>';
var infowindow = new google.maps.InfoWindow({
content: contentString,
position : cliLatLng
});
infowindow.open(map, this.marker);
});
marker.addListener('dblclick', function () {
console.log('clique duplo ' + c.nome);
this.marker.setDraggable(true);
this.ativarAutalizacaoCliente = true;
this.larguraMapa = '8';
this.ativarCriacaoRotaClientes = false;
});
marker.setMap(this.map);
}
});
}*/
permiterMoverPonto1({ target: marker }) {
marker.setDraggable(true);
this.ativarAutalizacaoCliente = true;
this.larguraMapa = '8';
this.ativarCriacaoRotaClientes = false;
}
public ativarTabelaIncluirClientesNovosRota : boolean = false;
public ativarTabela_Alterar_ClientesRota : boolean = false;
/**
* utilizado para comparar todos os pontos que existem no mapa com as lat_lng do poligono
* criado pelo usuario. Esse metodo compara todas as lat_lng dos clientes existentes no mapa
* e pergunta individualmente se o cliente esta dentro da poligono
*/
clientesSelecionadosRota() {
console.log('chamou clientes Selecioandso rodas ....');
//var sydneyLoc = new google.maps.LatLng(-30.342013, -54.339090);
//console.log(google.maps.geometry.poly.containsLocation(sydneyLoc, this.selectedOverlay));
for (let cli of this.clientes) {
if (cli.rota.img == 'semRota') {
var cliLatLng = new google.maps.LatLng(parseFloat(cli.latitude), parseFloat(cli.longitude));
if (google.maps.geometry.poly.containsLocation(cliLatLng, this.selectedOverlay) === true) {
cli.checked = true;
//console.log('qtd clientesSelec ****'+this.clientesSelec.length);
var encontrado = false;
//laco for para testar no momento de faze varios poligonos no mapa
//nao sera colocado mais de 1x o cliente na lista
for (let client of this.clientesSelec) {
if (client.id === cli.id) {
encontrado = true
}
}
if (encontrado) {
console.log('cliente jรก existe dentro do poligono')
} else {
this.ativarTabelaIncluirClientesNovosRota = true;
this.clientesSelec.push(cli);
}
}
} else {
|
var cliLatLng = new google.maps.LatLng(parseFloat(cli.latitude), parseFloat(cli.longitude));
if (google.maps.geometry.poly.containsLocation(cliLatLng, this.selectedOverlay) === true) {
cli.checked = true;
//console.log('qtd clientesSelec ****'+this.clientesSelec.length);
var encontrado = false;
//laco for para testar no momento de faze varios poligonos no mapa
//nao sera colocado mais de 1x o cliente na lista
for (let client of this.clientesSelecAlterar) {
if (client.id === cli.id) {
encontrado = true
}
}
if (encontrado) {
console.log('cliente jรก existe dentro do poligono')
} else {
this.ativarTabela_Alterar_ClientesRota = true;
this.clientesSelecAlterar.push(cli);
}
}
console.log('cliente jรก tem rota... ' + cli.rota.nome);
}
}
//this.achaClientesVindosBackend_e_que_nao_marcados_mapa();
console.log('********************************');
console.log('**** clientes selecionados ****' + this.clientesSelec.length);
for (let cli of this.clientesSelec) {
console.log('Nome: ' + cli.nome + ' checked? ' + cli.checked);
}
this.larguraMapa = '8';
this.ativarAutalizacaoCliente = false;
this.ativarCriacaoRotaClientes = true;
}
/**
laรงo de repedicao usado para idt clientes q nao foram
marcados no mapa mas vieram do backend
*/
achaClientesVindosBackend_e_que_nao_marcados_mapa(): void {
for (let cliT of this.clientes) {
let cod = false;
for (let cli of this.clientesSelec) {
if (cliT.nome == cli.nome) {
cod = true;
}
}
if (!cod) {
console.log('&&&&');
console.log('&&&& clientes q nรฃo apareceram no mapa' + cliT.nome);
}
}
}
/**
* ativa mapa streetView
*/
ativaStreetViewMap(): void {
console.log('ativaStreetView() ... ');
this.ativaStreetView = true;
}
/**
* Apaga o poligono criado pelo usuario, zera o conjunto de clientes selecionados
* esconde a coluna para insercao dos clientes na rota
* aumenta a largura do mapa para 12 colunas
*/
deleteSelectedOverlay() {
if (this.selectedOverlay) {
this.selectedOverlay.setMap(null);
delete this.selectedOverlay;
}
this.clientesSelec = [];
this.clientesSelecAlterar = [];
this.ativarTabela_Alterar_ClientesRota = false;
this.ativarCriacaoRotaClientes = false;
this.larguraMapa = '12';
// console.log('vai navegar para /mapa ...');
// this.router.navigate(['/mapa']);
}
// codigo abaixo esta relacionado ao mapa principal
moveuPonto({ target: panorama }) {
// let lati = parseFloat(panorama.getPosition().lat());
// this.clienteSelecionado.latitude = String(lati) ;
//var myLatlng = new google.maps.LatLng(parseFloat(panorama.getPosition().lat()),parseFloat(panorama.getPosition().lng()));
console.log('novaLat --- >' + panorama.getPosition().lat());
console.log('novaLng --- >' + panorama.getPosition().lng());
this.clienteSelecionado.latv = parseFloat(panorama.getPosition().lat());
this.clienteSelecionado.lngv = parseFloat(panorama.getPosition().lng());
}
posicao({ target: panorama }) {
console.log('posicao ...');
console.log('posicao --- >' + panorama.getPosition());
panorama = new google.maps.StreetViewPanorama(document.getElementById('sv'));
}
dbClik
|
random_line_split
|
|
rotas.component.ts
|
(let cli of this.clientesSelec) {
console.log('Nome: ' + cli.nome + ' checked? ' + cli.checked);
}
this.larguraMapa = '8';
this.ativarAutalizacaoCliente = false;
this.ativarCriacaoRotaClientes = true;
}
/**
laรงo de repedicao usado para idt clientes q nao foram
marcados no mapa mas vieram do backend
*/
achaClientesVindosBackend_e_que_nao_marcados_mapa(): void {
for (let cliT of this.clientes) {
let cod = false;
for (let cli of this.clientesSelec) {
if (cliT.nome == cli.nome) {
cod = true;
}
}
if (!cod) {
console.log('&&&&');
console.log('&&&& clientes q nรฃo apareceram no mapa' + cliT.nome);
}
}
}
/**
* ativa mapa streetView
*/
ativaStreetViewMap(): void {
console.log('ativaStreetView() ... ');
this.ativaStreetView = true;
}
/**
* Apaga o poligono criado pelo usuario, zera o conjunto de clientes selecionados
* esconde a coluna para insercao dos clientes na rota
* aumenta a largura do mapa para 12 colunas
*/
deleteSelectedOverlay() {
if (this.selectedOverlay) {
this.selectedOverlay.setMap(null);
delete this.selectedOverlay;
}
this.clientesSelec = [];
this.clientesSelecAlterar = [];
this.ativarTabela_Alterar_ClientesRota = false;
this.ativarCriacaoRotaClientes = false;
this.larguraMapa = '12';
// console.log('vai navegar para /mapa ...');
// this.router.navigate(['/mapa']);
}
// codigo abaixo esta relacionado ao mapa principal
moveuPonto({ target: panorama }) {
// let lati = parseFloat(panorama.getPosition().lat());
// this.clienteSelecionado.latitude = String(lati) ;
//var myLatlng = new google.maps.LatLng(parseFloat(panorama.getPosition().lat()),parseFloat(panorama.getPosition().lng()));
console.log('novaLat --- >' + panorama.getPosition().lat());
console.log('novaLng --- >' + panorama.getPosition().lng());
this.clienteSelecionado.latv = parseFloat(panorama.getPosition().lat());
this.clienteSelecionado.lngv = parseFloat(panorama.getPosition().lng());
}
posicao({ target: panorama }) {
console.log('posicao ...');
console.log('posicao --- >' + panorama.getPosition());
panorama = new google.maps.StreetViewPanorama(document.getElementById('sv'));
}
dbClikPermiterMoverPonto({ target: marker }) {
if(this.clienteSelecionado.rota.img == 'semRota'){
swal("AVISO!", "Cliente sem rota! Adicione o cliente em alguma ROTA para ter permissรฃo de alterar seus dados", "info");
}else{
marker.setDraggable(true);
this.ativarAutalizacaoCliente = true;
this.larguraMapa = '8';
this.ativarCriacaoRotaClientes = false;
}
}
// obj marker com informacoes personalizadas
private marker = {
display: true,
lat: null,
lng: null,
title: null,
nome: null,
nomeRua: null,
numero: null,
nomeRota: null,
foto : null
};
clicked({ target: marker }, cli: Cliente) {
this.markerInstanciaSelecionadoMapa = marker;
this.marker.lat = marker.getPosition().lat();
this.marker.lng = marker.getPosition().lng();
this.marker.title = 'Nome: ' + cli.nome + ' - ' + cli.rua.nome + ' Nยบ ' + cli.numero + ' Rota: ' + cli.rota.nome;
this.marker.nome = cli.nome;
this.marker.nomeRua = cli.rua.nome;
this.marker.numero = cli.numero;
this.marker.foto = cli.foto;
this.marker.nomeRota = cli.rota.nome;
this.clienteSelecionado = cli;
this.clienteSelecionado.latv = parseFloat(this.clienteSelecionado.latitude);
this.clienteSelecionado.lngv = parseFloat(this.clienteSelecionado.longitude);
console.log('--->' + this.clienteSelecionado.nome);
console.log('--->' + this.clienteSelecionado.latitude);
console.log('---> cpf' + this.clienteSelecionado.cpf);
console.log('--->' + this.clienteSelecionado.rota.nome);
//console.log('getTitle() .... '+marker.getTitle());
//console.log(marker.cli);
marker.setTitle(cli.rota.nome);
//marker.setDraggable(true);
//marker.nguiMapComponent.center = ''+this.marker.lat+','+this.marker.lng+'';
//marker.nguiMapComponent.zoom = '16';
marker.nguiMapComponent.openInfoWindow('iw', marker);
}
/* private customMarker = {
display: true,
lat: null,
lng: null,
title: null,
};*/
onCustomMarkerInit(customMarker, c: Cliente) {
if (c.rota.img != 'semRota') {
var image = {
url: '../../assets/img/' + c.rota.img + '.png',
// The anchor for this image is the base of the flagpole at (0, 32).
anchor: new google.maps.Point(0, 16),
// This marker is 20 pixels wide by 32 pixels high.
size: new google.maps.Size(52, 52),
//The size of the entire image after scaling, if any. Use this property to stretch/shrink an image or a sprite.
scaledSize: new google.maps.Size(42, 42),
// The origin for this image is (0, 0).
//origin: new google.maps.Point(1, 1),
};
//anchor: [16,16],
//size: [52,52],
//scaledSize: [52,52]
//var image1 = '../../assets/img/'+c.rota.img+'.png';
//marker.setIcon(image);
customMarker.setIcon(image);
//marker.setDraggable(true);
//marker.nguiMapComponent.center = ''+this.marker.lat+','+this.marker.lng+'';
//marker.nguiMapComponent.zoom = '16';
//marker.nguiMapComponent.openInfoWindow('iw', marker);
}
}
/*hideMarkerInfo() {
this.marker.display = !this.marker.display;
}*/
//https://rawgit.com/ng2-ui/map/master/app/index.html#/custom-marker
// codigo abaixo esta relacionado ao formulario para atualizacao de clientes
/**
* Atualiza cliente com o backend
*
*/
atualizar(): void {
// aqui esta tendo um erro na conversao da lat lng para string (LatLngLiteral: in property lat: not a number) TRATAR mais tarde!
console.log('... Dados q serรฃo atualizados ....');
this.clienteSelecionado.latitude = String(this.clienteSelecionado.latv);
console.log('latitude final ..' + this.clienteSelecionado.latitude);
this.clienteSelecionado.longitude = String(this.clienteSelecionado.lngv);
console.log('longitude final ...' + this.clienteSelecionado.longitude);
console.log(' rota id ...' + this.clienteSelecionado.rota.id);
console.log(' rota ...' + this.clienteSelecionado.rota.nome);
var rotaImg: Rota;
for (let rot of this.rotas) {
//console.log('testando rotas... '+rot.id+ ' - '+this.clienteSelecionado.rota.id);
if (rot.id == this.clienteSelecionado.rota.id) {
//console.log(' rota img selecionada ...'+rot.img);
rotaImg = rot;
}
}
var image = {
url: '../../assets/img/' + rotaImg.img + '.png',
anchor: new google.maps.Point(0, 16),
size: new google.maps.Size(52, 52),
scaledSize: new google.maps.Size(42, 42),
};
this.markerInstanciaSelecionadoMapa.setIcon(image);
this.busy = this.clienteService.atualiza_lat_lng(this.clienteSelecionado).subscribe(res => {
if (res === 'e') {
swal("ERRO!", "Problemas na atulizaรงรฃo das coordenadas", "error");
} else {
swal('Atualizado!', 'Coordenadas Atualizada com sucesso', 'success');
this.fecharFromAtualizacao();
}
});
}
fecharFromAtualizacao(): void {
this.ativaStreetView = false;
this.markerInstanciaSelecionadoMapa.setDraggable(false);
this.ativarAutalizacaoCliente = false;
this.larguraMapa = '12';
}
incluirSelecte
|
dCli() { // right
|
identifier_name
|
|
rotas.component.ts
|
: c.rota.nome+' - '+c.nome
});
if (c.rota.img != 'semRota') {
var image = {
url: '../../assets/img/' + c.rota.img + '.png',
// The anchor for this image is the base of the flagpole at (0, 32).
anchor: new google.maps.Point(0, 16),
// This marker is 20 pixels wide by 32 pixels high.
size: new google.maps.Size(52, 52),
//The size of the entire image after scaling, if any. Use this property to stretch/shrink an image or a sprite.
scaledSize: new google.maps.Size(42, 42),
// The origin for this image is (0, 0).
//origin: new google.maps.Point(1, 1),
};
//anchor: [16,16],
//size: [52,52],
//scaledSize: [52,52]
//var image1 = '../../assets/img/'+c.rota.img+'.png';
marker.setIcon(image);
//marker.setDraggable(true);
//marker.nguiMapComponent.center = ''+this.marker.lat+','+this.marker.lng+'';
//marker.nguiMapComponent.zoom = '16';
//marker.nguiMapComponent.openInfoWindow('iw', marker);
}
marker.addListener('click', function () {
var contentString = '<div id="content">' +
'<div id="siteNotice">' +
'</div>' +
'<h1 id="firstHeading" class="firstHeading">' + c.nome + '</h1>' +
'<div id="bodyContent">' +
'<p><img style="max-height: 100px; max-width: 100px;" class="resize" src="../../assets/img/cliente.jpg" align="left" />O cliente <b>' +
c.nome + '</b>, reside na rua <b>' + c.rua.nome + '</b>, Nยบ <b>' + c.numero + '</b> <br />' +
'colocar aqui mais informaรงรตes importรขntes .... ' +
' Uluru is listed as a World ' +
'Heritage Site.</p>' +
'<p>Link do Facebook: , <a href="https://en.wikipedia.org/w/index.php?title=Uluru&oldid=297882194">' +
'https://en.wikipedia.org/w/index.php?title=Uluru</a> ' +
'(last visited June 22, 2009).</p>' +
'</div>' +
'</div>';
var infowindow = new google.maps.InfoWindow({
content: contentString,
position : cliLatLng
});
infowindow.open(map, this.marker);
});
marker.addListener('dblclick', function () {
console.log('clique duplo ' + c.nome);
this.marker.setDraggable(true);
this.ativarAutalizacaoCliente = true;
this.larguraMapa = '8';
this.ativarCriacaoRotaClientes = false;
});
marker.setMap(this.map);
}
});
}*/
permiterMoverPonto1({ target: marker }) {
marker.setDraggable(true);
this.ativarAutalizacaoCliente = true;
this.larguraMapa = '8';
this.ativarCriacaoRotaClientes = false;
}
public ativarTabelaIncluirClientesNovosRota : boolean = false;
public ativarTabela_Alterar_ClientesRota : boolean = false;
/**
* utilizado para comparar todos os pontos que existem no mapa com as lat_lng do poligono
* criado pelo usuario. Esse metodo compara todas as lat_lng dos clientes existentes no mapa
* e pergunta individualmente se o cliente esta dentro da poligono
*/
clientesSelecionadosRota() {
console.log('chamou clientes Selecioandso rodas ....');
//var sydneyLoc = new google.maps.LatLng(-30.342013, -54.339090);
//console.log(google.maps.geometry.poly.containsLocation(sydneyLoc, this.selectedOverlay));
for (let cli of this.clientes) {
if (cli.rota.img == 'semRota') {
var cliLatLng = new google.maps.LatLng(parseFloat(cli.latitude), parseFloat(cli.longitude));
if (google.maps.geometry.poly.containsLocation(cliLatLng, this.selectedOverlay) === true) {
|
} else {
var cliLatLng = new google.maps.LatLng(parseFloat(cli.latitude), parseFloat(cli.longitude));
if (google.maps.geometry.poly.containsLocation(cliLatLng, this.selectedOverlay) === true) {
cli.checked = true;
//console.log('qtd clientesSelec ****'+this.clientesSelec.length);
var encontrado = false;
//laco for para testar no momento de faze varios poligonos no mapa
//nao sera colocado mais de 1x o cliente na lista
for (let client of this.clientesSelecAlterar) {
if (client.id === cli.id) {
encontrado = true
}
}
if (encontrado) {
console.log('cliente jรก existe dentro do poligono')
} else {
this.ativarTabela_Alterar_ClientesRota = true;
this.clientesSelecAlterar.push(cli);
}
}
console.log('cliente jรก tem rota... ' + cli.rota.nome);
}
}
//this.achaClientesVindosBackend_e_que_nao_marcados_mapa();
console.log('********************************');
console.log('**** clientes selecionados ****' + this.clientesSelec.length);
for (let cli of this.clientesSelec) {
console.log('Nome: ' + cli.nome + ' checked? ' + cli.checked);
}
this.larguraMapa = '8';
this.ativarAutalizacaoCliente = false;
this.ativarCriacaoRotaClientes = true;
}
/**
laรงo de repedicao usado para idt clientes q nao foram
marcados no mapa mas vieram do backend
*/
achaClientesVindosBackend_e_que_nao_marcados_mapa(): void {
for (let cliT of this.clientes) {
let cod = false;
for (let cli of this.clientesSelec) {
if (cliT.nome == cli.nome) {
cod = true;
}
}
if (!cod) {
console.log('&&&&');
console.log('&&&& clientes q nรฃo apareceram no mapa' + cliT.nome);
}
}
}
/**
* ativa mapa streetView
*/
ativaStreetViewMap(): void {
console.log('ativaStreetView() ... ');
this.ativaStreetView = true;
}
/**
* Apaga o poligono criado pelo usuario, zera o conjunto de clientes selecionados
* esconde a coluna para insercao dos clientes na rota
* aumenta a largura do mapa para 12 colunas
*/
deleteSelectedOverlay() {
if (this.selectedOverlay) {
this.selectedOverlay.setMap(null);
delete this.selectedOverlay;
}
this.clientesSelec = [];
this.clientesSelecAlterar = [];
this.ativarTabela_Alterar_ClientesRota = false;
this.ativarCriacaoRotaClientes = false;
this.larguraMapa = '12';
// console.log('vai navegar para /mapa ...');
// this.router.navigate(['/mapa']);
}
// codigo abaixo esta relacionado ao mapa principal
moveuPonto({ target: panorama }) {
// let lati = parseFloat(panorama.getPosition().lat());
// this.clienteSelecionado.latitude = String(lati) ;
//var myLatlng = new google.maps.LatLng(parseFloat(panorama.getPosition().lat()),parseFloat(panorama.getPosition().lng()));
console.log('novaLat --- >' + panorama.getPosition().lat());
console.log('novaLng --- >' + panorama.getPosition().lng());
this.clienteSelecionado.latv = parseFloat(panorama.getPosition().lat());
this.clienteSelecionado.lngv = parseFloat(panorama.getPosition().lng());
}
posicao({ target: panorama }) {
console.log('posicao ...');
console.log('posicao --- >' + panorama.getPosition());
panorama = new google.maps.StreetViewPanorama(document.getElementById('sv'));
}
dbC
|
cli.checked = true;
//console.log('qtd clientesSelec ****'+this.clientesSelec.length);
var encontrado = false;
//laco for para testar no momento de faze varios poligonos no mapa
//nao sera colocado mais de 1x o cliente na lista
for (let client of this.clientesSelec) {
if (client.id === cli.id) {
encontrado = true
}
}
if (encontrado) {
console.log('cliente jรก existe dentro do poligono')
} else {
this.ativarTabelaIncluirClientesNovosRota = true;
this.clientesSelec.push(cli);
}
}
|
conditional_block
|
main.js
|
fully support the CSS solution
else {
if (EdgeCheck) console.log("Smooth scrolling enabled on Edge, but might not work as smooth as possible due to some limitations, for ex. links with # will probably not scroll smoothly!");
else console.log("Smooth scrolling enabled but might not work properly on this browser!");
$("a").on('click', function(event) {
if (this.hash !== "") {
var hash = this.hash;
$('html, body').animate({
scrollTop: $(hash).offset().top
}, 200, function(){
window.location.hash = hash;
});
}
});
}
/* Mobile menu toggle functions */
let mobileMenuToggle = $('#header .mobile-menu-toggle');
let mobileMenuOverlay = $('#mobile-menu-overlay');
let mobileMenuCloseBtn = $('#mobile-menu-overlay .mobile-menu .mobile-menu-header .mobile-menu-close');
let mobileMenuList = $('#mobile-menu-overlay .mobile-menu .mobile-menu-body .menu-items');
// Open mobile menu
mobileMenuToggle.on('click', function() {
mobileMenuOverlay.fadeIn(300);
});
// Close mobile menu
mobileMenuCloseBtn.on('click', function() {
mobileMenuOverlay.fadeOut(300);
});
// Close mobile menu when clicking on menu items
mobileMenuList.find('li').on('click', function() {
mobileMenuOverlay.fadeOut(300);
});
// Close mobile menu when clicked outside of modal
$(document).on('click', function (e) {
if ( $(e.target).is(mobileMenuOverlay) ) {
mobileMenuOverlay.fadeOut(300);
}
});
/* Work experience details toggle */
$('#about .work-experience-container .work-place i').on('click', function() {
if ( $(this).parent().find('.work-place-desc').is(':hidden') ) {
$('#about .work-experience-container .work-place .work-place-desc').fadeOut(500);
$(this).parent().find('.work-place-desc').fadeIn(500);
$('#about .work-experience-container .work-place i').removeClass('rotated');
$(this).addClass('rotated');
} else {
$(this).parent().find('.work-place-desc').fadeOut(500);
$(this).removeClass('rotated');
}
});
/* Services section functions */
let servicesMainContainer = $('#services .services-content-container');
let backToServiceBtn = $('#services .services-content-container .services-content-info-container .back-to-service-intro-btn');
let initialServicesContent = $('#services .services-content-container .services-content-info-container .initial-services-content');
let moreServiceContent = $('#services .services-content-container .services-content-info-container .services-more-content-container');
let moreServiceContentTitle = $('#services .services-content-container .services-content-info-container .services-more-content-container .service-title');
let moreServiceContentDesc = $('#services .services-content-container .services-content-info-container .services-more-content-container .service-desc');
let servicesRightContainer = $('#services .services-content-container .services-content-list-container');
// Service click
servicesRightContainer.find('.service').on('click', function() {
let serviceTitle = $(this).find('h4').text();
let serviceLongDesc = $(this).find('.long-desc').html();
// If it's a mobile or tablet, scroll up to the service more content section
if ( mobile.matches ) {
$('html, body').animate({
scrollTop: $("#services-content-info-container").offset().top - 100
}, 100);
}
if ( tablet.matches ) {
$('html, body').animate({
scrollTop: $("#services-content-info-container").offset().top - 150
}, 100);
}
// Add class to main services content container
servicesMainContainer.addClass('a-service-clicked');
// Remove class from all services and add only to the clicked service
servicesRightContainer.find('.service').removeClass('service-clicked');
$(this).addClass('service-clicked');
// Hide initial service content, show back to initial service content button and service content section
initialServicesContent.slideUp(300);
backToServiceBtn.fadeIn(300);
moreServiceContent.slideDown(300);
// Add service title and description to the service more content continaer
moreServiceContentTitle.text(serviceTitle);
moreServiceContentDesc.html(serviceLongDesc);
});
// Back to service button click
backToServiceBtn.on('click', function() {
// Remove class from main services content container
servicesMainContainer.removeClass('a-service-clicked');
// Remove class from all services
servicesRightContainer.find('.service').removeClass('service-clicked');
// Show initial service content, hide back to initial service content button and service content section
initialServicesContent.slideDown(300);
backToServiceBtn.fadeOut(300);
moreServiceContent.fadeOut(300);
});
/* Load Typeform after user scrolls */
let startedForm = 0;
function startForm()
|
document.addEventListener('scroll', () => {
startForm();
});
/* Toggle contact methods dropdown */
let connectBtn = $('#contact-methods .contact-methods-container h3');
let connectList = $('#contact-methods .contact-methods-container .contact-methods-list');
connectBtn.on('click', function() {
if ( $(connectList).css('display') === 'none' ) {
$(connectBtn).addClass('connectListToggled');
$(connectList).css('display', 'inline-block');
} else {
$(connectBtn).removeClass('connectListToggled');
$(connectList).css('display', 'none');
}
});
/* Easter egg that nobody asked for */
// Counters
let clickCounter = 0;
let countdownCounter = 100;
// Checks for opacity countdown timer
function countdownCounterCheck(n){ return (n < 10 ? "0" : n == 10 ? "10" : "") + n; }
// Image variables
let clickableImg = $('#banner .banner-image-inner img');
let clickableImgParent = $('#banner .banner-image');
let clickableImgInnerParent = $('#banner .banner-image .banner-image-inner');
// Easter egg magic
$(clickableImg).on('click', function() {
clickCounter++;
countdownCounter--;
$(clickableImg).css('opacity', '0.'+countdownCounterCheck(countdownCounter));
if ( clickCounter <= 1 ) $(clickableImgParent).prepend('<div class="easter-egg-container"><p></p></div>');
if ( clickCounter >= 5 ) {
$(clickableImgParent).find('.easter-egg-container').fadeIn(300);
$('.easter-egg-container p').text('So you decided to click on my image');
}
if ( clickCounter >= 10 ) { $('.easter-egg-container p').text('You really wanna do this?'); }
if ( clickCounter >= 25 ) { $('.easter-egg-container p').text('Does this look fun to you?'); }
if ( clickCounter >= 35 ) { $('.easter-egg-container p').text('I would get bored at this point but go on champ \u{1f64c}'); }
if ( clickCounter >= 50 ) { $('.easter-egg-container p').text('Meh, continue I guess'); }
if ( clickCounter >= 65 ) { $('.easter-egg-container p').text('The person in front of the screen still clicking like crazy :D'); }
if ( clickCounter >= 75 ) { $('.easter-egg-container p').text('Ok, not much left'); }
if ( clickCounter >= 90 ) { $('.easter-egg-container p').text('Just a few more clicks'); }
if ( clickCounter == 97 ) { $('.easter-egg-container p').text('Just 3 more clicks'); }
if ( clickCounter == 98 ) { $('.easter-egg-container p').text('Just 2 more clicks'); }
if ( clickCounter == 99 ) { $('.easter-egg-container p').text('Just 1 more click dude!!'); }
if ( clickCounter
|
{
if(startedForm == 0) {
// $('.contact-content-container').html('<div data-tf-widget="zdz53C" data-tf-opacity="100" data-tf-iframe-props="title=Contact Form Submission" data-tf-transitive-search-params data-tf-medium="snippet" style="width:100%;height:600px;"></div>');
// $('.contact-content-container').append('<script src="//embed.typeform.com/next/embed.js">');
$('.contact-content-container').html('<button data-tf-popup="zdz53C" data-tf-opacity="100" data-tf-size="70" data-tf-iframe-props="title=Contact Form Submission" data-tf-transitive-search-params data-tf-medium="snippet">Get in Touch</button>');
$('.contact-content-container').append('<script src="//embed.typeform.com/next/embed.js"></script>');
}
startedForm = 1;
}
|
identifier_body
|
main.js
|
not fully support the CSS solution
else {
if (EdgeCheck) console.log("Smooth scrolling enabled on Edge, but might not work as smooth as possible due to some limitations, for ex. links with # will probably not scroll smoothly!");
else console.log("Smooth scrolling enabled but might not work properly on this browser!");
$("a").on('click', function(event) {
if (this.hash !== "") {
var hash = this.hash;
$('html, body').animate({
scrollTop: $(hash).offset().top
}, 200, function(){
window.location.hash = hash;
});
}
});
}
/* Mobile menu toggle functions */
let mobileMenuToggle = $('#header .mobile-menu-toggle');
let mobileMenuOverlay = $('#mobile-menu-overlay');
let mobileMenuCloseBtn = $('#mobile-menu-overlay .mobile-menu .mobile-menu-header .mobile-menu-close');
let mobileMenuList = $('#mobile-menu-overlay .mobile-menu .mobile-menu-body .menu-items');
// Open mobile menu
mobileMenuToggle.on('click', function() {
mobileMenuOverlay.fadeIn(300);
});
// Close mobile menu
mobileMenuCloseBtn.on('click', function() {
mobileMenuOverlay.fadeOut(300);
});
// Close mobile menu when clicking on menu items
mobileMenuList.find('li').on('click', function() {
mobileMenuOverlay.fadeOut(300);
});
// Close mobile menu when clicked outside of modal
$(document).on('click', function (e) {
if ( $(e.target).is(mobileMenuOverlay) ) {
mobileMenuOverlay.fadeOut(300);
}
});
/* Work experience details toggle */
$('#about .work-experience-container .work-place i').on('click', function() {
if ( $(this).parent().find('.work-place-desc').is(':hidden') ) {
$('#about .work-experience-container .work-place .work-place-desc').fadeOut(500);
$(this).parent().find('.work-place-desc').fadeIn(500);
$('#about .work-experience-container .work-place i').removeClass('rotated');
$(this).addClass('rotated');
} else {
$(this).parent().find('.work-place-desc').fadeOut(500);
$(this).removeClass('rotated');
}
});
/* Services section functions */
let servicesMainContainer = $('#services .services-content-container');
let backToServiceBtn = $('#services .services-content-container .services-content-info-container .back-to-service-intro-btn');
let initialServicesContent = $('#services .services-content-container .services-content-info-container .initial-services-content');
let moreServiceContent = $('#services .services-content-container .services-content-info-container .services-more-content-container');
let moreServiceContentTitle = $('#services .services-content-container .services-content-info-container .services-more-content-container .service-title');
let moreServiceContentDesc = $('#services .services-content-container .services-content-info-container .services-more-content-container .service-desc');
let servicesRightContainer = $('#services .services-content-container .services-content-list-container');
// Service click
servicesRightContainer.find('.service').on('click', function() {
let serviceTitle = $(this).find('h4').text();
let serviceLongDesc = $(this).find('.long-desc').html();
// If it's a mobile or tablet, scroll up to the service more content section
if ( mobile.matches ) {
$('html, body').animate({
scrollTop: $("#services-content-info-container").offset().top - 100
}, 100);
}
if ( tablet.matches ) {
$('html, body').animate({
scrollTop: $("#services-content-info-container").offset().top - 150
}, 100);
}
// Add class to main services content container
servicesMainContainer.addClass('a-service-clicked');
// Remove class from all services and add only to the clicked service
servicesRightContainer.find('.service').removeClass('service-clicked');
$(this).addClass('service-clicked');
// Hide initial service content, show back to initial service content button and service content section
initialServicesContent.slideUp(300);
backToServiceBtn.fadeIn(300);
moreServiceContent.slideDown(300);
// Add service title and description to the service more content continaer
moreServiceContentTitle.text(serviceTitle);
moreServiceContentDesc.html(serviceLongDesc);
});
// Back to service button click
backToServiceBtn.on('click', function() {
// Remove class from main services content container
servicesMainContainer.removeClass('a-service-clicked');
// Remove class from all services
servicesRightContainer.find('.service').removeClass('service-clicked');
// Show initial service content, hide back to initial service content button and service content section
initialServicesContent.slideDown(300);
backToServiceBtn.fadeOut(300);
moreServiceContent.fadeOut(300);
});
/* Load Typeform after user scrolls */
let startedForm = 0;
function
|
() {
if(startedForm == 0) {
// $('.contact-content-container').html('<div data-tf-widget="zdz53C" data-tf-opacity="100" data-tf-iframe-props="title=Contact Form Submission" data-tf-transitive-search-params data-tf-medium="snippet" style="width:100%;height:600px;"></div>');
// $('.contact-content-container').append('<script src="//embed.typeform.com/next/embed.js">');
$('.contact-content-container').html('<button data-tf-popup="zdz53C" data-tf-opacity="100" data-tf-size="70" data-tf-iframe-props="title=Contact Form Submission" data-tf-transitive-search-params data-tf-medium="snippet">Get in Touch</button>');
$('.contact-content-container').append('<script src="//embed.typeform.com/next/embed.js"></script>');
}
startedForm = 1;
}
document.addEventListener('scroll', () => {
startForm();
});
/* Toggle contact methods dropdown */
let connectBtn = $('#contact-methods .contact-methods-container h3');
let connectList = $('#contact-methods .contact-methods-container .contact-methods-list');
connectBtn.on('click', function() {
if ( $(connectList).css('display') === 'none' ) {
$(connectBtn).addClass('connectListToggled');
$(connectList).css('display', 'inline-block');
} else {
$(connectBtn).removeClass('connectListToggled');
$(connectList).css('display', 'none');
}
});
/* Easter egg that nobody asked for */
// Counters
let clickCounter = 0;
let countdownCounter = 100;
// Checks for opacity countdown timer
function countdownCounterCheck(n){ return (n < 10 ? "0" : n == 10 ? "10" : "") + n; }
// Image variables
let clickableImg = $('#banner .banner-image-inner img');
let clickableImgParent = $('#banner .banner-image');
let clickableImgInnerParent = $('#banner .banner-image .banner-image-inner');
// Easter egg magic
$(clickableImg).on('click', function() {
clickCounter++;
countdownCounter--;
$(clickableImg).css('opacity', '0.'+countdownCounterCheck(countdownCounter));
if ( clickCounter <= 1 ) $(clickableImgParent).prepend('<div class="easter-egg-container"><p></p></div>');
if ( clickCounter >= 5 ) {
$(clickableImgParent).find('.easter-egg-container').fadeIn(300);
$('.easter-egg-container p').text('So you decided to click on my image');
}
if ( clickCounter >= 10 ) { $('.easter-egg-container p').text('You really wanna do this?'); }
if ( clickCounter >= 25 ) { $('.easter-egg-container p').text('Does this look fun to you?'); }
if ( clickCounter >= 35 ) { $('.easter-egg-container p').text('I would get bored at this point but go on champ \u{1f64c}'); }
if ( clickCounter >= 50 ) { $('.easter-egg-container p').text('Meh, continue I guess'); }
if ( clickCounter >= 65 ) { $('.easter-egg-container p').text('The person in front of the screen still clicking like crazy :D'); }
if ( clickCounter >= 75 ) { $('.easter-egg-container p').text('Ok, not much left'); }
if ( clickCounter >= 90 ) { $('.easter-egg-container p').text('Just a few more clicks'); }
if ( clickCounter == 97 ) { $('.easter-egg-container p').text('Just 3 more clicks'); }
if ( clickCounter == 98 ) { $('.easter-egg-container p').text('Just 2 more clicks'); }
if ( clickCounter == 99 ) { $('.easter-egg-container p').text('Just 1 more click dude!!'); }
if ( clickCounter
|
startForm
|
identifier_name
|
main.js
|
not fully support the CSS solution
else {
if (EdgeCheck) console.log("Smooth scrolling enabled on Edge, but might not work as smooth as possible due to some limitations, for ex. links with # will probably not scroll smoothly!");
else console.log("Smooth scrolling enabled but might not work properly on this browser!");
$("a").on('click', function(event) {
if (this.hash !== "") {
var hash = this.hash;
$('html, body').animate({
scrollTop: $(hash).offset().top
}, 200, function(){
window.location.hash = hash;
});
}
});
}
/* Mobile menu toggle functions */
let mobileMenuToggle = $('#header .mobile-menu-toggle');
let mobileMenuOverlay = $('#mobile-menu-overlay');
let mobileMenuCloseBtn = $('#mobile-menu-overlay .mobile-menu .mobile-menu-header .mobile-menu-close');
let mobileMenuList = $('#mobile-menu-overlay .mobile-menu .mobile-menu-body .menu-items');
// Open mobile menu
mobileMenuToggle.on('click', function() {
mobileMenuOverlay.fadeIn(300);
});
// Close mobile menu
mobileMenuCloseBtn.on('click', function() {
mobileMenuOverlay.fadeOut(300);
});
// Close mobile menu when clicking on menu items
mobileMenuList.find('li').on('click', function() {
mobileMenuOverlay.fadeOut(300);
});
// Close mobile menu when clicked outside of modal
$(document).on('click', function (e) {
if ( $(e.target).is(mobileMenuOverlay) ) {
mobileMenuOverlay.fadeOut(300);
}
});
/* Work experience details toggle */
$('#about .work-experience-container .work-place i').on('click', function() {
if ( $(this).parent().find('.work-place-desc').is(':hidden') ) {
$('#about .work-experience-container .work-place .work-place-desc').fadeOut(500);
$(this).parent().find('.work-place-desc').fadeIn(500);
$('#about .work-experience-container .work-place i').removeClass('rotated');
$(this).addClass('rotated');
} else {
$(this).parent().find('.work-place-desc').fadeOut(500);
$(this).removeClass('rotated');
}
});
/* Services section functions */
let servicesMainContainer = $('#services .services-content-container');
let backToServiceBtn = $('#services .services-content-container .services-content-info-container .back-to-service-intro-btn');
let initialServicesContent = $('#services .services-content-container .services-content-info-container .initial-services-content');
let moreServiceContent = $('#services .services-content-container .services-content-info-container .services-more-content-container');
let moreServiceContentTitle = $('#services .services-content-container .services-content-info-container .services-more-content-container .service-title');
let moreServiceContentDesc = $('#services .services-content-container .services-content-info-container .services-more-content-container .service-desc');
let servicesRightContainer = $('#services .services-content-container .services-content-list-container');
// Service click
servicesRightContainer.find('.service').on('click', function() {
let serviceTitle = $(this).find('h4').text();
let serviceLongDesc = $(this).find('.long-desc').html();
// If it's a mobile or tablet, scroll up to the service more content section
if ( mobile.matches ) {
$('html, body').animate({
scrollTop: $("#services-content-info-container").offset().top - 100
}, 100);
}
if ( tablet.matches ) {
$('html, body').animate({
scrollTop: $("#services-content-info-container").offset().top - 150
}, 100);
}
// Add class to main services content container
servicesMainContainer.addClass('a-service-clicked');
// Remove class from all services and add only to the clicked service
servicesRightContainer.find('.service').removeClass('service-clicked');
$(this).addClass('service-clicked');
// Hide initial service content, show back to initial service content button and service content section
initialServicesContent.slideUp(300);
backToServiceBtn.fadeIn(300);
moreServiceContent.slideDown(300);
// Add service title and description to the service more content continaer
moreServiceContentTitle.text(serviceTitle);
moreServiceContentDesc.html(serviceLongDesc);
});
// Back to service button click
backToServiceBtn.on('click', function() {
// Remove class from main services content container
servicesMainContainer.removeClass('a-service-clicked');
// Remove class from all services
servicesRightContainer.find('.service').removeClass('service-clicked');
// Show initial service content, hide back to initial service content button and service content section
initialServicesContent.slideDown(300);
backToServiceBtn.fadeOut(300);
moreServiceContent.fadeOut(300);
});
/* Load Typeform after user scrolls */
let startedForm = 0;
function startForm() {
if(startedForm == 0) {
// $('.contact-content-container').html('<div data-tf-widget="zdz53C" data-tf-opacity="100" data-tf-iframe-props="title=Contact Form Submission" data-tf-transitive-search-params data-tf-medium="snippet" style="width:100%;height:600px;"></div>');
// $('.contact-content-container').append('<script src="//embed.typeform.com/next/embed.js">');
$('.contact-content-container').html('<button data-tf-popup="zdz53C" data-tf-opacity="100" data-tf-size="70" data-tf-iframe-props="title=Contact Form Submission" data-tf-transitive-search-params data-tf-medium="snippet">Get in Touch</button>');
$('.contact-content-container').append('<script src="//embed.typeform.com/next/embed.js"></script>');
}
startedForm = 1;
}
document.addEventListener('scroll', () => {
startForm();
});
/* Toggle contact methods dropdown */
let connectBtn = $('#contact-methods .contact-methods-container h3');
let connectList = $('#contact-methods .contact-methods-container .contact-methods-list');
|
connectBtn.on('click', function() {
if ( $(connectList).css('display') === 'none' ) {
$(connectBtn).addClass('connectListToggled');
$(connectList).css('display', 'inline-block');
} else {
$(connectBtn).removeClass('connectListToggled');
$(connectList).css('display', 'none');
}
});
/* Easter egg that nobody asked for */
// Counters
let clickCounter = 0;
let countdownCounter = 100;
// Checks for opacity countdown timer
function countdownCounterCheck(n){ return (n < 10 ? "0" : n == 10 ? "10" : "") + n; }
// Image variables
let clickableImg = $('#banner .banner-image-inner img');
let clickableImgParent = $('#banner .banner-image');
let clickableImgInnerParent = $('#banner .banner-image .banner-image-inner');
// Easter egg magic
$(clickableImg).on('click', function() {
clickCounter++;
countdownCounter--;
$(clickableImg).css('opacity', '0.'+countdownCounterCheck(countdownCounter));
if ( clickCounter <= 1 ) $(clickableImgParent).prepend('<div class="easter-egg-container"><p></p></div>');
if ( clickCounter >= 5 ) {
$(clickableImgParent).find('.easter-egg-container').fadeIn(300);
$('.easter-egg-container p').text('So you decided to click on my image');
}
if ( clickCounter >= 10 ) { $('.easter-egg-container p').text('You really wanna do this?'); }
if ( clickCounter >= 25 ) { $('.easter-egg-container p').text('Does this look fun to you?'); }
if ( clickCounter >= 35 ) { $('.easter-egg-container p').text('I would get bored at this point but go on champ \u{1f64c}'); }
if ( clickCounter >= 50 ) { $('.easter-egg-container p').text('Meh, continue I guess'); }
if ( clickCounter >= 65 ) { $('.easter-egg-container p').text('The person in front of the screen still clicking like crazy :D'); }
if ( clickCounter >= 75 ) { $('.easter-egg-container p').text('Ok, not much left'); }
if ( clickCounter >= 90 ) { $('.easter-egg-container p').text('Just a few more clicks'); }
if ( clickCounter == 97 ) { $('.easter-egg-container p').text('Just 3 more clicks'); }
if ( clickCounter == 98 ) { $('.easter-egg-container p').text('Just 2 more clicks'); }
if ( clickCounter == 99 ) { $('.easter-egg-container p').text('Just 1 more click dude!!'); }
if ( clickCounter
|
random_line_split
|
|
main.js
|
fully support the CSS solution
else {
if (EdgeCheck) console.log("Smooth scrolling enabled on Edge, but might not work as smooth as possible due to some limitations, for ex. links with # will probably not scroll smoothly!");
else console.log("Smooth scrolling enabled but might not work properly on this browser!");
$("a").on('click', function(event) {
if (this.hash !== "") {
var hash = this.hash;
$('html, body').animate({
scrollTop: $(hash).offset().top
}, 200, function(){
window.location.hash = hash;
});
}
});
}
/* Mobile menu toggle functions */
let mobileMenuToggle = $('#header .mobile-menu-toggle');
let mobileMenuOverlay = $('#mobile-menu-overlay');
let mobileMenuCloseBtn = $('#mobile-menu-overlay .mobile-menu .mobile-menu-header .mobile-menu-close');
let mobileMenuList = $('#mobile-menu-overlay .mobile-menu .mobile-menu-body .menu-items');
// Open mobile menu
mobileMenuToggle.on('click', function() {
mobileMenuOverlay.fadeIn(300);
});
// Close mobile menu
mobileMenuCloseBtn.on('click', function() {
mobileMenuOverlay.fadeOut(300);
});
// Close mobile menu when clicking on menu items
mobileMenuList.find('li').on('click', function() {
mobileMenuOverlay.fadeOut(300);
});
// Close mobile menu when clicked outside of modal
$(document).on('click', function (e) {
if ( $(e.target).is(mobileMenuOverlay) ) {
mobileMenuOverlay.fadeOut(300);
}
});
/* Work experience details toggle */
$('#about .work-experience-container .work-place i').on('click', function() {
if ( $(this).parent().find('.work-place-desc').is(':hidden') ) {
$('#about .work-experience-container .work-place .work-place-desc').fadeOut(500);
$(this).parent().find('.work-place-desc').fadeIn(500);
$('#about .work-experience-container .work-place i').removeClass('rotated');
$(this).addClass('rotated');
} else {
$(this).parent().find('.work-place-desc').fadeOut(500);
$(this).removeClass('rotated');
}
});
/* Services section functions */
let servicesMainContainer = $('#services .services-content-container');
let backToServiceBtn = $('#services .services-content-container .services-content-info-container .back-to-service-intro-btn');
let initialServicesContent = $('#services .services-content-container .services-content-info-container .initial-services-content');
let moreServiceContent = $('#services .services-content-container .services-content-info-container .services-more-content-container');
let moreServiceContentTitle = $('#services .services-content-container .services-content-info-container .services-more-content-container .service-title');
let moreServiceContentDesc = $('#services .services-content-container .services-content-info-container .services-more-content-container .service-desc');
let servicesRightContainer = $('#services .services-content-container .services-content-list-container');
// Service click
servicesRightContainer.find('.service').on('click', function() {
let serviceTitle = $(this).find('h4').text();
let serviceLongDesc = $(this).find('.long-desc').html();
// If it's a mobile or tablet, scroll up to the service more content section
if ( mobile.matches ) {
$('html, body').animate({
scrollTop: $("#services-content-info-container").offset().top - 100
}, 100);
}
if ( tablet.matches ) {
$('html, body').animate({
scrollTop: $("#services-content-info-container").offset().top - 150
}, 100);
}
// Add class to main services content container
servicesMainContainer.addClass('a-service-clicked');
// Remove class from all services and add only to the clicked service
servicesRightContainer.find('.service').removeClass('service-clicked');
$(this).addClass('service-clicked');
// Hide initial service content, show back to initial service content button and service content section
initialServicesContent.slideUp(300);
backToServiceBtn.fadeIn(300);
moreServiceContent.slideDown(300);
// Add service title and description to the service more content continaer
moreServiceContentTitle.text(serviceTitle);
moreServiceContentDesc.html(serviceLongDesc);
});
// Back to service button click
backToServiceBtn.on('click', function() {
// Remove class from main services content container
servicesMainContainer.removeClass('a-service-clicked');
// Remove class from all services
servicesRightContainer.find('.service').removeClass('service-clicked');
// Show initial service content, hide back to initial service content button and service content section
initialServicesContent.slideDown(300);
backToServiceBtn.fadeOut(300);
moreServiceContent.fadeOut(300);
});
/* Load Typeform after user scrolls */
let startedForm = 0;
function startForm() {
if(startedForm == 0) {
// $('.contact-content-container').html('<div data-tf-widget="zdz53C" data-tf-opacity="100" data-tf-iframe-props="title=Contact Form Submission" data-tf-transitive-search-params data-tf-medium="snippet" style="width:100%;height:600px;"></div>');
// $('.contact-content-container').append('<script src="//embed.typeform.com/next/embed.js">');
$('.contact-content-container').html('<button data-tf-popup="zdz53C" data-tf-opacity="100" data-tf-size="70" data-tf-iframe-props="title=Contact Form Submission" data-tf-transitive-search-params data-tf-medium="snippet">Get in Touch</button>');
$('.contact-content-container').append('<script src="//embed.typeform.com/next/embed.js"></script>');
}
startedForm = 1;
}
document.addEventListener('scroll', () => {
startForm();
});
/* Toggle contact methods dropdown */
let connectBtn = $('#contact-methods .contact-methods-container h3');
let connectList = $('#contact-methods .contact-methods-container .contact-methods-list');
connectBtn.on('click', function() {
if ( $(connectList).css('display') === 'none' ) {
$(connectBtn).addClass('connectListToggled');
$(connectList).css('display', 'inline-block');
} else {
$(connectBtn).removeClass('connectListToggled');
$(connectList).css('display', 'none');
}
});
/* Easter egg that nobody asked for */
// Counters
let clickCounter = 0;
let countdownCounter = 100;
// Checks for opacity countdown timer
function countdownCounterCheck(n){ return (n < 10 ? "0" : n == 10 ? "10" : "") + n; }
// Image variables
let clickableImg = $('#banner .banner-image-inner img');
let clickableImgParent = $('#banner .banner-image');
let clickableImgInnerParent = $('#banner .banner-image .banner-image-inner');
// Easter egg magic
$(clickableImg).on('click', function() {
clickCounter++;
countdownCounter--;
$(clickableImg).css('opacity', '0.'+countdownCounterCheck(countdownCounter));
if ( clickCounter <= 1 ) $(clickableImgParent).prepend('<div class="easter-egg-container"><p></p></div>');
if ( clickCounter >= 5 ) {
$(clickableImgParent).find('.easter-egg-container').fadeIn(300);
$('.easter-egg-container p').text('So you decided to click on my image');
}
if ( clickCounter >= 10 ) { $('.easter-egg-container p').text('You really wanna do this?'); }
if ( clickCounter >= 25 )
|
if ( clickCounter >= 35 ) { $('.easter-egg-container p').text('I would get bored at this point but go on champ \u{1f64c}'); }
if ( clickCounter >= 50 ) { $('.easter-egg-container p').text('Meh, continue I guess'); }
if ( clickCounter >= 65 ) { $('.easter-egg-container p').text('The person in front of the screen still clicking like crazy :D'); }
if ( clickCounter >= 75 ) { $('.easter-egg-container p').text('Ok, not much left'); }
if ( clickCounter >= 90 ) { $('.easter-egg-container p').text('Just a few more clicks'); }
if ( clickCounter == 97 ) { $('.easter-egg-container p').text('Just 3 more clicks'); }
if ( clickCounter == 98 ) { $('.easter-egg-container p').text('Just 2 more clicks'); }
if ( clickCounter == 99 ) { $('.easter-egg-container p').text('Just 1 more click dude!!'); }
if ( clickCounter
|
{ $('.easter-egg-container p').text('Does this look fun to you?'); }
|
conditional_block
|
finetuning.py
|
kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
"""**MODEL**"""
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=0): #SAREBBE num_classes=10
self.inplanes = 16
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1,bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, layers[0])
self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, toExtract):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if toExtract==True:
return x
else:
x = self.fc(x)
return x
def resnet32(pretrained=False, **kwargs):
n = 5
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
"""**Set Argument**"""
DEVICE = 'cuda'
BATCH_SIZE = 128
LR = 2
NUM_EPOCHS = 70
MILESTONE=[49,63]
WEIGHT_DECAY = 0.00001
GAMMA = 0.2
MOMENTUM=0.9
class iCaRLNet(nn.Module):
def __init__(self):
|
def forward(self, x, toExtract=False):
x = self.net(x, toExtract)
return x
@torch.no_grad()
def classify(self,images):
self.net.train(False)
_, preds=torch.max(self.forward(images,False), dim=1)
mapped=[]
for pred in preds:
mapped.append(list(self.dic.keys())[list(self.dic.values()).index(pred.item())])
tensore=torch.tensor(mapped)
return tensore
def increment_classes(self, new_classes):
self.new_classes=[]
for classe in new_classes:
if classe not in self.classes_known:
self.classes_known.append(classe)
self.n_classes += 1
self.new_classes.append(classe)
in_features = self.net.fc.in_features
out_features = self.net.fc.out_features
weight = self.net.fc.weight.data
bias = self.net.fc.bias.data
self.net.fc = nn.Linear(in_features, out_features+len(self.new_classes), bias=True)
self.net.fc.weight.data[:out_features] = weight
self.net.fc.bias.data[:out_features] = bias
def update_representation(self, dataset):
classes = list(set(dataset.targets))
self.increment_classes(classes)
self.cuda()
print ("Now there are %d classes" % (self.n_classes))
optimizer = optim.SGD(self.net.parameters(), lr=LR, weight_decay=WEIGHT_DECAY,momentum=MOMENTUM)
scheduler=optim.lr_scheduler.MultiStepLR(optimizer, milestones=MILESTONE, gamma=GAMMA)
loader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE,shuffle=True, num_workers=2)
order_label=[]
for i, (indices, images, labels) in enumerate(loader):
for label in labels:
if label not in order_label:
order_label.append(label)
self.dic[label.item()]=self.count_per_dic
self.count_per_dic +=1
for epoch in range(NUM_EPOCHS):
for i, (indices, images, labels) in enumerate(loader):
indices = indices.cuda()
images = images.cuda()
labels = labels.cuda()
mapped_labels=[]
for label in labels:
mapped_labels.append(self.dic[label.item()])
oneHot=torch.nn.functional.one_hot(torch.tensor(mapped_labels),self.n_classes)
oneHot=oneHot.type(torch.FloatTensor)
oneHot=oneHot.cuda()
self.net.train()
optimizer.zero_grad()
g = self.forward(images)
lista_map=[]
for classe in self.new_classes:
lista_map.append(self.dic[classe])
loss=self.loss(g[:,lista_map],oneHot[:,lista_map])
loss.backward()
optimizer.step()
if (i+1) % 10 == 0:
print(f"Epoch: {epoch+1}/{NUM_EPOCHS}, Iter: {i+1}/{math.ceil(len(dataset)/BATCH_SIZE)}, Loss: {loss.item():.4f}, lr={scheduler.get_last_lr()[0]} ")
scheduler.step()
"""**MAIN**"""
def give_split():
x=np.arange(0,100)
x=x.tolist()
random.seed(34)
random.shuffle(x)
total_classes=[]
for i in range(0,100,10):
lista=x[i:i+10]
total_classes.append(lista)
return total_classes
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0
|
super(iCaRLNet, self).__init__()
self.net = resnet32()
self.n_classes = 0
self.n_known = 0
self.classes_known=[]
self.new_classes=[]
self.dic={}
self.count_per_dic=0
self.loss=BCEWithLogitsLoss()
|
identifier_body
|
finetuning.py
|
_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
"""**MODEL**"""
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=0): #SAREBBE num_classes=10
self.inplanes = 16
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1,bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, layers[0])
self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, toExtract):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if toExtract==True:
return x
else:
x = self.fc(x)
return x
def resnet32(pretrained=False, **kwargs):
n = 5
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
"""**Set Argument**"""
DEVICE = 'cuda'
BATCH_SIZE = 128
LR = 2
NUM_EPOCHS = 70
MILESTONE=[49,63]
WEIGHT_DECAY = 0.00001
GAMMA = 0.2
MOMENTUM=0.9
class iCaRLNet(nn.Module):
def __init__(self):
super(iCaRLNet, self).__init__()
self.net = resnet32()
self.n_classes = 0
self.n_known = 0
self.classes_known=[]
self.new_classes=[]
self.dic={}
self.count_per_dic=0
self.loss=BCEWithLogitsLoss()
def forward(self, x, toExtract=False):
x = self.net(x, toExtract)
return x
@torch.no_grad()
def classify(self,images):
self.net.train(False)
_, preds=torch.max(self.forward(images,False), dim=1)
mapped=[]
for pred in preds:
mapped.append(list(self.dic.keys())[list(self.dic.values()).index(pred.item())])
tensore=torch.tensor(mapped)
return tensore
def increment_classes(self, new_classes):
self.new_classes=[]
for classe in new_classes:
if classe not in self.classes_known:
self.classes_known.append(classe)
self.n_classes += 1
self.new_classes.append(classe)
in_features = self.net.fc.in_features
out_features = self.net.fc.out_features
weight = self.net.fc.weight.data
bias = self.net.fc.bias.data
self.net.fc = nn.Linear(in_features, out_features+len(self.new_classes), bias=True)
self.net.fc.weight.data[:out_features] = weight
self.net.fc.bias.data[:out_features] = bias
def update_representation(self, dataset):
classes = list(set(dataset.targets))
self.increment_classes(classes)
self.cuda()
print ("Now there are %d classes" % (self.n_classes))
optimizer = optim.SGD(self.net.parameters(), lr=LR, weight_decay=WEIGHT_DECAY,momentum=MOMENTUM)
scheduler=optim.lr_scheduler.MultiStepLR(optimizer, milestones=MILESTONE, gamma=GAMMA)
loader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE,shuffle=True, num_workers=2)
order_label=[]
for i, (indices, images, labels) in enumerate(loader):
for label in labels:
if label not in order_label:
order_label.append(label)
self.dic[label.item()]=self.count_per_dic
self.count_per_dic +=1
for epoch in range(NUM_EPOCHS):
for i, (indices, images, labels) in enumerate(loader):
indices = indices.cuda()
images = images.cuda()
labels = labels.cuda()
mapped_labels=[]
for label in labels:
mapped_labels.append(self.dic[label.item()])
oneHot=torch.nn.functional.one_hot(torch.tensor(mapped_labels),self.n_classes)
oneHot=oneHot.type(torch.FloatTensor)
oneHot=oneHot.cuda()
self.net.train()
optimizer.zero_grad()
g = self.forward(images)
lista_map=[]
for classe in self.new_classes:
lista_map.append(self.dic[classe])
loss=self.loss(g[:,lista_map],oneHot[:,lista_map])
loss.backward()
optimizer.step()
if (i+1) % 10 == 0:
print(f"Epoch: {epoch+1}/{NUM_EPOCHS}, Iter: {i+1}/{math.ceil(len(dataset)/BATCH_SIZE)}, Loss: {loss.item():.4f}, lr={scheduler.get_last_lr()[0]} ")
scheduler.step()
"""**MAIN**"""
def give_split():
x=np.arange(0,100)
x=x.tolist()
random.seed(34)
random.shuffle(x)
total_classes=[]
for i in range(0,100,10):
lista=x[i:i+10]
total_classes.append(lista)
return total_classes
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
icarl = iCaRLNet()
icarl.cuda()
list_classes=give_split()
lista_tot=[]
list_train_acc=[]
list_test_acc=[]
for s in range(0,len(list_classes)):
for elem in list_classes[s]:
lista_tot.append(elem)
print("Loading training examples for classes", list_classes[s])
print(f"In train {list_classes[s]}")
|
print(f"In test {lista_tot}")
train_set = iCIFAR100(root='./data',train=True,classes=list_classes[s],download=True,transform=transform_train)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=128,shuffle=True, num_workers=2)
|
random_line_split
|
|
finetuning.py
|
iz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
"""**MODEL**"""
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def
|
(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=0): #SAREBBE num_classes=10
self.inplanes = 16
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1,bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, layers[0])
self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, toExtract):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if toExtract==True:
return x
else:
x = self.fc(x)
return x
def resnet32(pretrained=False, **kwargs):
n = 5
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
"""**Set Argument**"""
DEVICE = 'cuda'
BATCH_SIZE = 128
LR = 2
NUM_EPOCHS = 70
MILESTONE=[49,63]
WEIGHT_DECAY = 0.00001
GAMMA = 0.2
MOMENTUM=0.9
class iCaRLNet(nn.Module):
def __init__(self):
super(iCaRLNet, self).__init__()
self.net = resnet32()
self.n_classes = 0
self.n_known = 0
self.classes_known=[]
self.new_classes=[]
self.dic={}
self.count_per_dic=0
self.loss=BCEWithLogitsLoss()
def forward(self, x, toExtract=False):
x = self.net(x, toExtract)
return x
@torch.no_grad()
def classify(self,images):
self.net.train(False)
_, preds=torch.max(self.forward(images,False), dim=1)
mapped=[]
for pred in preds:
mapped.append(list(self.dic.keys())[list(self.dic.values()).index(pred.item())])
tensore=torch.tensor(mapped)
return tensore
def increment_classes(self, new_classes):
self.new_classes=[]
for classe in new_classes:
if classe not in self.classes_known:
self.classes_known.append(classe)
self.n_classes += 1
self.new_classes.append(classe)
in_features = self.net.fc.in_features
out_features = self.net.fc.out_features
weight = self.net.fc.weight.data
bias = self.net.fc.bias.data
self.net.fc = nn.Linear(in_features, out_features+len(self.new_classes), bias=True)
self.net.fc.weight.data[:out_features] = weight
self.net.fc.bias.data[:out_features] = bias
def update_representation(self, dataset):
classes = list(set(dataset.targets))
self.increment_classes(classes)
self.cuda()
print ("Now there are %d classes" % (self.n_classes))
optimizer = optim.SGD(self.net.parameters(), lr=LR, weight_decay=WEIGHT_DECAY,momentum=MOMENTUM)
scheduler=optim.lr_scheduler.MultiStepLR(optimizer, milestones=MILESTONE, gamma=GAMMA)
loader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE,shuffle=True, num_workers=2)
order_label=[]
for i, (indices, images, labels) in enumerate(loader):
for label in labels:
if label not in order_label:
order_label.append(label)
self.dic[label.item()]=self.count_per_dic
self.count_per_dic +=1
for epoch in range(NUM_EPOCHS):
for i, (indices, images, labels) in enumerate(loader):
indices = indices.cuda()
images = images.cuda()
labels = labels.cuda()
mapped_labels=[]
for label in labels:
mapped_labels.append(self.dic[label.item()])
oneHot=torch.nn.functional.one_hot(torch.tensor(mapped_labels),self.n_classes)
oneHot=oneHot.type(torch.FloatTensor)
oneHot=oneHot.cuda()
self.net.train()
optimizer.zero_grad()
g = self.forward(images)
lista_map=[]
for classe in self.new_classes:
lista_map.append(self.dic[classe])
loss=self.loss(g[:,lista_map],oneHot[:,lista_map])
loss.backward()
optimizer.step()
if (i+1) % 10 == 0:
print(f"Epoch: {epoch+1}/{NUM_EPOCHS}, Iter: {i+1}/{math.ceil(len(dataset)/BATCH_SIZE)}, Loss: {loss.item():.4f}, lr={scheduler.get_last_lr()[0]} ")
scheduler.step()
"""**MAIN**"""
def give_split():
x=np.arange(0,100)
x=x.tolist()
random.seed(34)
random.shuffle(x)
total_classes=[]
for i in range(0,100,10):
lista=x[i:i+10]
total_classes.append(lista)
return total_classes
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0
|
forward
|
identifier_name
|
finetuning.py
|
return index, img, target
def __len__(self):
return len(self.data)
def get_image_class(self, label):
return self.data[np.array(self.targets) == label]
def append(self, images, labels):
self.data = np.concatenate((self.data, images), axis=0)
self.targets = self.targets + labels
class iCIFAR100(iCIFAR10):
base_folder = 'cifar-100-python'
url = "http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
"""**MODEL**"""
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=0): #SAREBBE num_classes=10
self.inplanes = 16
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1,bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, layers[0])
self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, toExtract):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if toExtract==True:
return x
else:
x = self.fc(x)
return x
def resnet32(pretrained=False, **kwargs):
n = 5
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
"""**Set Argument**"""
DEVICE = 'cuda'
BATCH_SIZE = 128
LR = 2
NUM_EPOCHS = 70
MILESTONE=[49,63]
WEIGHT_DECAY = 0.00001
GAMMA = 0.2
MOMENTUM=0.9
class iCaRLNet(nn.Module):
def __init__(self):
super(iCaRLNet, self).__init__()
self.net = resnet32()
self.n_classes = 0
self.n_known = 0
self.classes_known=[]
self.new_classes=[]
self.dic={}
self.count_per_dic=0
self.loss=BCEWithLogitsLoss()
def forward(self, x, toExtract=False):
x = self.net(x, toExtract)
return x
@torch.no_grad()
def classify(self,images):
self.net.train(False)
_, preds=torch.max(self.forward(images,False), dim=1)
mapped=[]
for pred in preds:
mapped.append(list(self.dic.keys())[list(self.dic.values()).index(pred.item())])
tensore=torch.tensor(mapped)
return tensore
def increment_classes(self, new_classes):
self.new_classes=[]
for classe in new_classes:
if classe not in self.classes_known:
self.classes_known.append(classe)
self.n_classes += 1
self.new_classes.append(classe)
in_features = self.net.fc.in_features
out_features = self.net.fc.out_features
weight = self.net.fc.weight.data
bias = self.net.fc.bias.data
self.net.fc = nn.Linear(in_features, out_features+len(self.new_classes), bias=True)
self.net.fc.weight.data[:out_features] = weight
self.net.fc.bias.data[:out_features] = bias
def update_representation(self, dataset):
classes = list(set(dataset.targets))
self.increment_classes(classes)
self.cuda()
print ("Now there are %d classes" % (self.n_classes))
optimizer = optim.SGD(self.net.parameters(), lr=LR, weight_decay=WEIGHT_DECAY,momentum=MOMENTUM)
scheduler=optim.lr_scheduler.MultiStepLR(optimizer, milestones=MILESTONE, gamma=GAMMA)
loader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE,shuffle=True, num_workers=2)
order_label=[]
for i, (indices, images, labels) in enumerate(loader):
for label in labels:
if label not in order_label:
order_label.append(label)
self.dic[label.item()]=self.count_per_dic
self.count_per_dic +=1
for epoch in range(NUM_EPOCHS):
for i, (indices, images, labels) in enumerate(loader):
indices = indices.cuda()
images = images.cuda()
labels = labels.cuda()
mapped_labels=[]
for label in labels:
mapped_labels.append(self.dic[label.item()])
oneHot=torch.nn.functional.one_hot(torch.tensor(mapped_labels),self.n_classes)
oneHot=oneHot.type(torch.FloatTensor)
oneHot=oneHot.cuda()
self.net.train()
optimizer.zero_grad()
g = self.forward(images)
lista_map=[]
for classe in self.new_classes:
lista_map.append(self.dic[classe])
loss=self.loss(g[:,lista_map],oneHot[:,lista_map])
loss.backward()
optimizer.step()
if (i+1) % 10 == 0:
print(f"Epoch: {epoch+1}/{NUM_EPOCHS}, Iter: {i+1}/{math.ceil(len(dataset)/BATCH_SIZE)}, Loss: {loss.item():.4f}, lr={scheduler.get_last_lr()[0]} ")
scheduler.step()
"""**MAIN**"""
def give_split():
x=np.arange(0,100)
x=x.tolist()
random.seed(34)
random.shuffle(x)
total_classes=[]
for i in range(0,100,10):
lista=x[i:i+10]
total_classes.append(lista)
return total_classes
transform_train = transforms
|
target = self.target_transform(target)
|
conditional_block
|
|
produce_evaluation.py
|
, ax = plt.subplots()
ax.set_xscale('log')
methods = results.opf_method.unique()
min_val = np.min(results.time_taken)
max_val = np.max(results.time_taken)
n_bins = 100
# bin_lims = np.linspace(min_val, max_val, n_bins + 1)
bin_lims = np.logspace(np.log10(min_val), np.log10(max_val), n_bins)
bin_centers = 0.5 * (bin_lims[:-1] + bin_lims[1:])
bin_widths = bin_lims[1:] - bin_lims[:-1]
for m in sorted(methods):
mask = results.opf_method == m
vals = results.time_taken[mask]
hist, _ = np.histogram(vals, bins=bin_lims)
hist = hist / np.max(hist)
ax.bar(bin_centers, hist, width=bin_widths, align='center', label=m,
alpha=0.5)
ax.legend()
plt.show()
return fig
def strfdelta(tdelta, fmt):
class DeltaTemplate(Template):
delimiter = "%"
d = {"D": tdelta.days}
hours, rem = divmod(tdelta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
d["H"] = '{:02d}'.format(hours)
d["M"] = '{:02d}'.format(minutes)
d["S"] = '{:02d}'.format(seconds)
t = DeltaTemplate(fmt)
return t.substitute(**d)
def format_time(seconds):
time = datetime.timedelta(seconds=seconds)
if seconds == 0:
return "< 1s"
if seconds < 60:
return str(seconds) + "s"
elif seconds < 3600:
time_format = "%M:%S min"
else:
time_format = "%H:%M:%S h"
timestring = strfdelta(time, time_format)
return timestring
def plot_relative_time(results, filter_by_solved, filter_by_methods=None):
filter_by_methods = filter_by_methods or None
if filter_by_methods:
results = results[~results.opf_method.isin(filter_by_methods)]
if filter_by_solved:
results = results[results["solved"] == 1]
fig, ax = plt.subplots()
methods = sorted(results.opf_method.unique())
runtimes = []
for m in methods:
mask = results.opf_method == m
vals = results.time_taken[mask]
mean_val = np.mean(vals)
runtimes.append(mean_val)
runtimes, methods = zip(*sorted(zip(runtimes, methods), reverse=True))
pos = np.arange(len(methods))
rects = ax.barh(pos, runtimes,
align='center',
height=0.5,
tick_label=[PRETTY_NAMES.get(m, m) for m in methods])
rect_labels = []
for rect in rects:
# Rectangle widths are already integer-valued but are floating
# type, so it helps to remove the trailing decimal point and 0 by
# converting width to int type
width = int(rect.get_width())
rankStr = format_time(width)
# The bars aren't wide enough to print the ranking inside
if width < 40:
# Shift the text to the right side of the right edge
xloc = 5
# Black against white background
clr = 'black'
align = 'left'
else:
# Shift the text to the left side of the right edge
xloc = -5
# White on magenta
clr = 'white'
align = 'right'
# Center the text vertically in the bar
yloc = rect.get_y() + rect.get_height() / 2
label = ax.annotate(rankStr, xy=(width, yloc), xytext=(xloc, 0),
textcoords="offset points",
ha=align, va='center',
color=clr, weight='bold', clip_on=True)
rect_labels.append(label)
ax.set_xlabel('Mean Runtime (s)')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
#for tick in ax.get_yticklabels():
# tick.set_rotation(45)
plt.show()
fig.set_size_inches(23, 10.5)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + rect_labels +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(26)
fig.savefig("./relative_runtimes.svg", bbox_inches='tight')
fig.savefig("./relative_runtimes.pdf", bbox_inches='tight')
fig.savefig("./relative_runtimes.png", dpi=200, bbox_inches='tight')
return fig
def solved_in_n_seconds(n_seconds, results, filter_by_methods=None):
filter_by_methods = filter_by_methods or None
if filter_by_methods:
results = results[~results.opf_method.isin(filter_by_methods)]
results = results[results["solved"] == 1]
methods = results.opf_method.unique()
for m in sorted(methods):
mask = results.opf_method == m
vals = results.time_taken[mask]
mask_time = vals <= n_seconds
print(f"{m} solved {np.sum(mask_time)} in {n_seconds}")
def percentile(n):
def perc
|
return np.percentile(x, n)
percentile_.__name__ = 'percentile_%s' % n
return percentile_
def statistical_summary(results, filter_by_solved):
group_by = ["opf_method"]
if filter_by_solved:
results = results[results["solved"] == 1]
grouped_results = results.groupby(group_by)
agg_dict = {c: ["mean", "std", "median", "max", percentile(95)] for c in list(results.columns.values)
if c not in group_by + ["scenario_id", "solved"]}
agg_dict["solved"] = ["mean", "sum"]
statistics_df = grouped_results.agg(agg_dict)
# statistics_df = statistics_df.unstack(level=[1]).reorder_levels([2, 0, 1], axis=1)
# sort whole group according to test acc
statistics_df = statistics_df.sort_values(by=[("time_taken", "mean")], ascending=True)
return statistics_df
def pretty_statistics(results, filter_by_solved):
with pd.option_context('display.max_rows', None, 'display.max_columns', None,
"display.width", 400):
pretty_statistic_string = str(statistical_summary(results, filter_by_solved))
return pretty_statistic_string
def compute_time_improvement(results, filter_by_solved, base_method="ac_opf"):
improvements = {}
if filter_by_solved:
results = results[results["solved"] == 1]
base_df = results[results.opf_method == base_method]
methods = results.opf_method.unique()
for m in sorted(methods):
if m == base_method:
continue
method_df = results[results.opf_method == m]
merged_df = method_df.merge(base_df, left_on="scenario_id",
right_on="scenario_id")
diff = merged_df.time_taken_y - merged_df.time_taken_x
improvements[m] = {
"mean improvement": np.mean(diff),
"most improvement": np.max(diff),
"least improvement": np.min(diff),
"median improvement": np.median(diff),
"relative improvement": np.mean(merged_df.time_taken_y) / np.mean(merged_df.time_taken_x)
}
return improvements
def create_best_ac_and_model(results):
# creates a hypothetical case in which we can choose the best of warm-starting acopf and
# not warmstarting it.
acopf_results = results[results.opf_method == "ac_opf"]
model_ac_opf_results = results[results.opf_method == "model_ac_opf"]
dcacopf_results = results[results.opf_method == "dcac_opf"]
for scen in acopf_results.scenario_id.unique():
acopf = acopf_results[acopf_results.scenario_id == scen]
modelacopf = model_ac_opf_results[model_ac_opf_results.scenario_id == scen]
dcacopf = dcacopf_results[dcacopf_results.scenario_id == scen]
if acopf.time_taken.iloc[0] < model_ac_opf_results.time_taken.iloc[0]:
chosen = acopf
else:
chosen = modelacopf
if dcacopf.time_taken.iloc[0] < chosen.time_taken.iloc[0]:
chosen = dcacopf
chosen = chosen.iloc[0].to_dict()
chosen = pd.DataFrame.from_dict({k: [v] for k, v in chosen.items()})
chosen["opf_method"] = ["best"]
results = pd.concat([results, chosen], ignore_index=True, axis=0, sort=False)
print(results)
return results
def main(results_file, base_csv=None, take_from_base=None):
pp = pprint.PrettyPrinter(indent=4)
results = pd.read_csv(results_file)
|
entile_(x):
|
identifier_name
|
produce_evaluation.py
|
, ax = plt.subplots()
ax.set_xscale('log')
methods = results.opf_method.unique()
min_val = np.min(results.time_taken)
max_val = np.max(results.time_taken)
n_bins = 100
# bin_lims = np.linspace(min_val, max_val, n_bins + 1)
bin_lims = np.logspace(np.log10(min_val), np.log10(max_val), n_bins)
bin_centers = 0.5 * (bin_lims[:-1] + bin_lims[1:])
bin_widths = bin_lims[1:] - bin_lims[:-1]
for m in sorted(methods):
mask = results.opf_method == m
vals = results.time_taken[mask]
hist, _ = np.histogram(vals, bins=bin_lims)
hist = hist / np.max(hist)
ax.bar(bin_centers, hist, width=bin_widths, align='center', label=m,
alpha=0.5)
ax.legend()
plt.show()
return fig
def strfdelta(tdelta, fmt):
class DeltaTemplate(Template):
delimiter = "%"
d = {"D": tdelta.days}
hours, rem = divmod(tdelta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
d["H"] = '{:02d}'.format(hours)
d["M"] = '{:02d}'.format(minutes)
d["S"] = '{:02d}'.format(seconds)
t = DeltaTemplate(fmt)
return t.substitute(**d)
def format_time(seconds):
time = datetime.timedelta(seconds=seconds)
if seconds == 0:
return "< 1s"
if seconds < 60:
return str(seconds) + "s"
elif seconds < 3600:
time_format = "%M:%S min"
else:
time_format = "%H:%M:%S h"
timestring = strfdelta(time, time_format)
return timestring
def plot_relative_time(results, filter_by_solved, filter_by_methods=None):
filter_by_methods = filter_by_methods or None
if filter_by_methods:
results = results[~results.opf_method.isin(filter_by_methods)]
if filter_by_solved:
results = results[results["solved"] == 1]
fig, ax = plt.subplots()
methods = sorted(results.opf_method.unique())
runtimes = []
for m in methods:
mask = results.opf_method == m
vals = results.time_taken[mask]
mean_val = np.mean(vals)
runtimes.append(mean_val)
runtimes, methods = zip(*sorted(zip(runtimes, methods), reverse=True))
pos = np.arange(len(methods))
rects = ax.barh(pos, runtimes,
align='center',
height=0.5,
tick_label=[PRETTY_NAMES.get(m, m) for m in methods])
rect_labels = []
for rect in rects:
# Rectangle widths are already integer-valued but are floating
# type, so it helps to remove the trailing decimal point and 0 by
# converting width to int type
width = int(rect.get_width())
rankStr = format_time(width)
# The bars aren't wide enough to print the ranking inside
if width < 40:
# Shift the text to the right side of the right edge
xloc = 5
# Black against white background
clr = 'black'
align = 'left'
else:
# Shift the text to the left side of the right edge
xloc = -5
# White on magenta
clr = 'white'
align = 'right'
# Center the text vertically in the bar
yloc = rect.get_y() + rect.get_height() / 2
label = ax.annotate(rankStr, xy=(width, yloc), xytext=(xloc, 0),
textcoords="offset points",
ha=align, va='center',
color=clr, weight='bold', clip_on=True)
rect_labels.append(label)
ax.set_xlabel('Mean Runtime (s)')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
#for tick in ax.get_yticklabels():
# tick.set_rotation(45)
plt.show()
fig.set_size_inches(23, 10.5)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + rect_labels +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(26)
fig.savefig("./relative_runtimes.svg", bbox_inches='tight')
fig.savefig("./relative_runtimes.pdf", bbox_inches='tight')
fig.savefig("./relative_runtimes.png", dpi=200, bbox_inches='tight')
return fig
def solved_in_n_seconds(n_seconds, results, filter_by_methods=None):
filter_by_methods = filter_by_methods or None
if filter_by_methods:
results = results[~results.opf_method.isin(filter_by_methods)]
results = results[results["solved"] == 1]
methods = results.opf_method.unique()
for m in sorted(methods):
mask = results.opf_method == m
vals = results.time_taken[mask]
mask_time = vals <= n_seconds
print(f"{m} solved {np.sum(mask_time)} in {n_seconds}")
def percentile(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'percentile_%s' % n
return percentile_
def statistical_summary(results, filter_by_solved):
group_by = ["opf_method"]
if filter_by_solved:
results = results[results["solved"] == 1]
grouped_results = results.groupby(group_by)
agg_dict = {c: ["mean", "std", "median", "max", percentile(95)] for c in list(results.columns.values)
if c not in group_by + ["scenario_id", "solved"]}
agg_dict["solved"] = ["mean", "sum"]
statistics_df = grouped_results.agg(agg_dict)
# statistics_df = statistics_df.unstack(level=[1]).reorder_levels([2, 0, 1], axis=1)
# sort whole group according to test acc
statistics_df = statistics_df.sort_values(by=[("time_taken", "mean")], ascending=True)
return statistics_df
def pretty_statistics(results, filter_by_solved):
with pd.option_context('display.max_rows', None, 'display.max_columns', None,
"display.width", 400):
pretty_statistic_string = str(statistical_summary(results, filter_by_solved))
return pretty_statistic_string
def compute_time_improvement(results, filter_by_solved, base_method="ac_opf"):
improvements = {}
if filter_by_solved:
results = results[results["solved"] == 1]
base_df = results[results.opf_method == base_method]
methods = results.opf_method.unique()
for m in sorted(methods):
if m == base_method:
continue
method_df = results[results.opf_method == m]
merged_df = method_df.merge(base_df, left_on="scenario_id",
right_on="scenario_id")
diff = merged_df.time_taken_y - merged_df.time_taken_x
improvements[m] = {
"mean improvement": np.mean(diff),
"most improvement": np.max(diff),
"least improvement": np.min(diff),
"median improvement": np.median(diff),
"relative improvement": np.mean(merged_df.time_taken_y) / np.mean(merged_df.time_taken_x)
}
return improvements
def create_best_ac_and_model(results):
# creates a hypothetical case in which we can choose the best of warm-starting acopf and
# not warmstarting it.
acop
|
d
ef main(results_file, base_csv=None, take_from_base=None):
pp = pprint.PrettyPrinter(indent=4)
results = pd.read_csv(results_file)
|
f_results = results[results.opf_method == "ac_opf"]
model_ac_opf_results = results[results.opf_method == "model_ac_opf"]
dcacopf_results = results[results.opf_method == "dcac_opf"]
for scen in acopf_results.scenario_id.unique():
acopf = acopf_results[acopf_results.scenario_id == scen]
modelacopf = model_ac_opf_results[model_ac_opf_results.scenario_id == scen]
dcacopf = dcacopf_results[dcacopf_results.scenario_id == scen]
if acopf.time_taken.iloc[0] < model_ac_opf_results.time_taken.iloc[0]:
chosen = acopf
else:
chosen = modelacopf
if dcacopf.time_taken.iloc[0] < chosen.time_taken.iloc[0]:
chosen = dcacopf
chosen = chosen.iloc[0].to_dict()
chosen = pd.DataFrame.from_dict({k: [v] for k, v in chosen.items()})
chosen["opf_method"] = ["best"]
results = pd.concat([results, chosen], ignore_index=True, axis=0, sort=False)
print(results)
return results
|
identifier_body
|
produce_evaluation.py
|
format(hours)
d["M"] = '{:02d}'.format(minutes)
d["S"] = '{:02d}'.format(seconds)
t = DeltaTemplate(fmt)
return t.substitute(**d)
def format_time(seconds):
time = datetime.timedelta(seconds=seconds)
if seconds == 0:
return "< 1s"
if seconds < 60:
return str(seconds) + "s"
elif seconds < 3600:
time_format = "%M:%S min"
else:
time_format = "%H:%M:%S h"
timestring = strfdelta(time, time_format)
return timestring
def plot_relative_time(results, filter_by_solved, filter_by_methods=None):
filter_by_methods = filter_by_methods or None
if filter_by_methods:
results = results[~results.opf_method.isin(filter_by_methods)]
if filter_by_solved:
results = results[results["solved"] == 1]
fig, ax = plt.subplots()
methods = sorted(results.opf_method.unique())
runtimes = []
for m in methods:
mask = results.opf_method == m
vals = results.time_taken[mask]
mean_val = np.mean(vals)
runtimes.append(mean_val)
runtimes, methods = zip(*sorted(zip(runtimes, methods), reverse=True))
pos = np.arange(len(methods))
rects = ax.barh(pos, runtimes,
align='center',
height=0.5,
tick_label=[PRETTY_NAMES.get(m, m) for m in methods])
rect_labels = []
for rect in rects:
# Rectangle widths are already integer-valued but are floating
# type, so it helps to remove the trailing decimal point and 0 by
# converting width to int type
width = int(rect.get_width())
rankStr = format_time(width)
# The bars aren't wide enough to print the ranking inside
if width < 40:
# Shift the text to the right side of the right edge
xloc = 5
# Black against white background
clr = 'black'
align = 'left'
else:
# Shift the text to the left side of the right edge
xloc = -5
# White on magenta
clr = 'white'
align = 'right'
# Center the text vertically in the bar
yloc = rect.get_y() + rect.get_height() / 2
label = ax.annotate(rankStr, xy=(width, yloc), xytext=(xloc, 0),
textcoords="offset points",
ha=align, va='center',
color=clr, weight='bold', clip_on=True)
rect_labels.append(label)
ax.set_xlabel('Mean Runtime (s)')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
#for tick in ax.get_yticklabels():
# tick.set_rotation(45)
plt.show()
fig.set_size_inches(23, 10.5)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + rect_labels +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(26)
fig.savefig("./relative_runtimes.svg", bbox_inches='tight')
fig.savefig("./relative_runtimes.pdf", bbox_inches='tight')
fig.savefig("./relative_runtimes.png", dpi=200, bbox_inches='tight')
return fig
def solved_in_n_seconds(n_seconds, results, filter_by_methods=None):
filter_by_methods = filter_by_methods or None
if filter_by_methods:
results = results[~results.opf_method.isin(filter_by_methods)]
results = results[results["solved"] == 1]
methods = results.opf_method.unique()
for m in sorted(methods):
mask = results.opf_method == m
vals = results.time_taken[mask]
mask_time = vals <= n_seconds
print(f"{m} solved {np.sum(mask_time)} in {n_seconds}")
def percentile(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'percentile_%s' % n
return percentile_
def statistical_summary(results, filter_by_solved):
group_by = ["opf_method"]
if filter_by_solved:
results = results[results["solved"] == 1]
grouped_results = results.groupby(group_by)
agg_dict = {c: ["mean", "std", "median", "max", percentile(95)] for c in list(results.columns.values)
if c not in group_by + ["scenario_id", "solved"]}
agg_dict["solved"] = ["mean", "sum"]
statistics_df = grouped_results.agg(agg_dict)
# statistics_df = statistics_df.unstack(level=[1]).reorder_levels([2, 0, 1], axis=1)
# sort whole group according to test acc
statistics_df = statistics_df.sort_values(by=[("time_taken", "mean")], ascending=True)
return statistics_df
def pretty_statistics(results, filter_by_solved):
with pd.option_context('display.max_rows', None, 'display.max_columns', None,
"display.width", 400):
pretty_statistic_string = str(statistical_summary(results, filter_by_solved))
return pretty_statistic_string
def compute_time_improvement(results, filter_by_solved, base_method="ac_opf"):
improvements = {}
if filter_by_solved:
results = results[results["solved"] == 1]
base_df = results[results.opf_method == base_method]
methods = results.opf_method.unique()
for m in sorted(methods):
if m == base_method:
continue
method_df = results[results.opf_method == m]
merged_df = method_df.merge(base_df, left_on="scenario_id",
right_on="scenario_id")
diff = merged_df.time_taken_y - merged_df.time_taken_x
improvements[m] = {
"mean improvement": np.mean(diff),
"most improvement": np.max(diff),
"least improvement": np.min(diff),
"median improvement": np.median(diff),
"relative improvement": np.mean(merged_df.time_taken_y) / np.mean(merged_df.time_taken_x)
}
return improvements
def create_best_ac_and_model(results):
# creates a hypothetical case in which we can choose the best of warm-starting acopf and
# not warmstarting it.
acopf_results = results[results.opf_method == "ac_opf"]
model_ac_opf_results = results[results.opf_method == "model_ac_opf"]
dcacopf_results = results[results.opf_method == "dcac_opf"]
for scen in acopf_results.scenario_id.unique():
acopf = acopf_results[acopf_results.scenario_id == scen]
modelacopf = model_ac_opf_results[model_ac_opf_results.scenario_id == scen]
dcacopf = dcacopf_results[dcacopf_results.scenario_id == scen]
if acopf.time_taken.iloc[0] < model_ac_opf_results.time_taken.iloc[0]:
chosen = acopf
else:
chosen = modelacopf
if dcacopf.time_taken.iloc[0] < chosen.time_taken.iloc[0]:
chosen = dcacopf
chosen = chosen.iloc[0].to_dict()
chosen = pd.DataFrame.from_dict({k: [v] for k, v in chosen.items()})
chosen["opf_method"] = ["best"]
results = pd.concat([results, chosen], ignore_index=True, axis=0, sort=False)
print(results)
return results
def main(results_file, base_csv=None, take_from_base=None):
pp = pprint.PrettyPrinter(indent=4)
results = pd.read_csv(results_file)
if base_csv is not None:
added_results = pd.read_csv(base_csv)
for method in take_from_base:
method_results = added_results[added_results["opf_method"] == method]
results = pd.concat([results, method_results], ignore_index=True, axis=0, sort=False)
# results = create_best_ac_and_model(results)
print("All")
print(pretty_statistics(results, filter_by_solved=False))
pp.pprint(compute_time_improvement(results, filter_by_solved=False))
print("\nSolved")
print(pretty_statistics(results, filter_by_solved=True))
solved_in_n_seconds(300, results)
solved_in_n_seconds(60, results)
pp.pprint(compute_time_improvement(results, filter_by_solved=True))
#fig = plot_histogram(results, filter_by_solved=False, filter_by_methods=["model", "dc_opf"])
#plt.show()
fig = plot_relative_time(results, filter_by_solved=False)
# print(results)
def eval_2k(exp_path):
subpath = Path("results/results.csv")
results_file = exp_path / subpath
main(results_file)
def eval_200(exp_path):
subpath = Path("results/results.csv")
results_file = exp_path / subpath
main(results_file)
def eval_on_params():
from argparse import ArgumentParser
|
parser = ArgumentParser()
|
random_line_split
|
|
produce_evaluation.py
|
fig, ax = plt.subplots()
ax.set_xscale('log')
methods = results.opf_method.unique()
min_val = np.min(results.time_taken)
max_val = np.max(results.time_taken)
n_bins = 100
# bin_lims = np.linspace(min_val, max_val, n_bins + 1)
bin_lims = np.logspace(np.log10(min_val), np.log10(max_val), n_bins)
bin_centers = 0.5 * (bin_lims[:-1] + bin_lims[1:])
bin_widths = bin_lims[1:] - bin_lims[:-1]
for m in sorted(methods):
mask = results.opf_method == m
vals = results.time_taken[mask]
hist, _ = np.histogram(vals, bins=bin_lims)
hist = hist / np.max(hist)
ax.bar(bin_centers, hist, width=bin_widths, align='center', label=m,
alpha=0.5)
ax.legend()
plt.show()
return fig
def strfdelta(tdelta, fmt):
class DeltaTemplate(Template):
delimiter = "%"
d = {"D": tdelta.days}
hours, rem = divmod(tdelta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
d["H"] = '{:02d}'.format(hours)
d["M"] = '{:02d}'.format(minutes)
d["S"] = '{:02d}'.format(seconds)
t = DeltaTemplate(fmt)
return t.substitute(**d)
def format_time(seconds):
time = datetime.timedelta(seconds=seconds)
if seconds == 0:
return "< 1s"
if seconds < 60:
return str(seconds) + "s"
elif seconds < 3600:
time_format = "%M:%S min"
else:
time_format = "%H:%M:%S h"
timestring = strfdelta(time, time_format)
return timestring
def plot_relative_time(results, filter_by_solved, filter_by_methods=None):
filter_by_methods = filter_by_methods or None
if filter_by_methods:
results = results[~results.opf_method.isin(filter_by_methods)]
if filter_by_solved:
results = results[results["solved"] == 1]
fig, ax = plt.subplots()
methods = sorted(results.opf_method.unique())
runtimes = []
for m in methods:
mask = results.opf_method == m
vals = results.time_taken[mask]
mean_val = np.mean(vals)
runtimes.append(mean_val)
runtimes, methods = zip(*sorted(zip(runtimes, methods), reverse=True))
pos = np.arange(len(methods))
rects = ax.barh(pos, runtimes,
align='center',
height=0.5,
tick_label=[PRETTY_NAMES.get(m, m) for m in methods])
rect_labels = []
for rect in rects:
# Rectangle widths are already integer-valued but are floating
# type, so it helps to remove the trailing decimal point and 0 by
# converting width to int type
width = int(rect.get_width())
rankStr = format_time(width)
# The bars aren't wide enough to print the ranking inside
if width < 40:
# Shift the text to the right side of the right edge
xloc = 5
# Black against white background
clr = 'black'
align = 'left'
else:
# Shift the text to the left side of the right edge
xloc = -5
# White on magenta
clr = 'white'
align = 'right'
# Center the text vertically in the bar
yloc = rect.get_y() + rect.get_height() / 2
label = ax.annotate(rankStr, xy=(width, yloc), xytext=(xloc, 0),
textcoords="offset points",
ha=align, va='center',
color=clr, weight='bold', clip_on=True)
rect_labels.append(label)
ax.set_xlabel('Mean Runtime (s)')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
#for tick in ax.get_yticklabels():
# tick.set_rotation(45)
plt.show()
fig.set_size_inches(23, 10.5)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + rect_labels +
ax.get_xticklabels() + ax.get_yticklabels()):
item
|
fig.savefig("./relative_runtimes.svg", bbox_inches='tight')
fig.savefig("./relative_runtimes.pdf", bbox_inches='tight')
fig.savefig("./relative_runtimes.png", dpi=200, bbox_inches='tight')
return fig
def solved_in_n_seconds(n_seconds, results, filter_by_methods=None):
filter_by_methods = filter_by_methods or None
if filter_by_methods:
results = results[~results.opf_method.isin(filter_by_methods)]
results = results[results["solved"] == 1]
methods = results.opf_method.unique()
for m in sorted(methods):
mask = results.opf_method == m
vals = results.time_taken[mask]
mask_time = vals <= n_seconds
print(f"{m} solved {np.sum(mask_time)} in {n_seconds}")
def percentile(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'percentile_%s' % n
return percentile_
def statistical_summary(results, filter_by_solved):
group_by = ["opf_method"]
if filter_by_solved:
results = results[results["solved"] == 1]
grouped_results = results.groupby(group_by)
agg_dict = {c: ["mean", "std", "median", "max", percentile(95)] for c in list(results.columns.values)
if c not in group_by + ["scenario_id", "solved"]}
agg_dict["solved"] = ["mean", "sum"]
statistics_df = grouped_results.agg(agg_dict)
# statistics_df = statistics_df.unstack(level=[1]).reorder_levels([2, 0, 1], axis=1)
# sort whole group according to test acc
statistics_df = statistics_df.sort_values(by=[("time_taken", "mean")], ascending=True)
return statistics_df
def pretty_statistics(results, filter_by_solved):
with pd.option_context('display.max_rows', None, 'display.max_columns', None,
"display.width", 400):
pretty_statistic_string = str(statistical_summary(results, filter_by_solved))
return pretty_statistic_string
def compute_time_improvement(results, filter_by_solved, base_method="ac_opf"):
improvements = {}
if filter_by_solved:
results = results[results["solved"] == 1]
base_df = results[results.opf_method == base_method]
methods = results.opf_method.unique()
for m in sorted(methods):
if m == base_method:
continue
method_df = results[results.opf_method == m]
merged_df = method_df.merge(base_df, left_on="scenario_id",
right_on="scenario_id")
diff = merged_df.time_taken_y - merged_df.time_taken_x
improvements[m] = {
"mean improvement": np.mean(diff),
"most improvement": np.max(diff),
"least improvement": np.min(diff),
"median improvement": np.median(diff),
"relative improvement": np.mean(merged_df.time_taken_y) / np.mean(merged_df.time_taken_x)
}
return improvements
def create_best_ac_and_model(results):
# creates a hypothetical case in which we can choose the best of warm-starting acopf and
# not warmstarting it.
acopf_results = results[results.opf_method == "ac_opf"]
model_ac_opf_results = results[results.opf_method == "model_ac_opf"]
dcacopf_results = results[results.opf_method == "dcac_opf"]
for scen in acopf_results.scenario_id.unique():
acopf = acopf_results[acopf_results.scenario_id == scen]
modelacopf = model_ac_opf_results[model_ac_opf_results.scenario_id == scen]
dcacopf = dcacopf_results[dcacopf_results.scenario_id == scen]
if acopf.time_taken.iloc[0] < model_ac_opf_results.time_taken.iloc[0]:
chosen = acopf
else:
chosen = modelacopf
if dcacopf.time_taken.iloc[0] < chosen.time_taken.iloc[0]:
chosen = dcacopf
chosen = chosen.iloc[0].to_dict()
chosen = pd.DataFrame.from_dict({k: [v] for k, v in chosen.items()})
chosen["opf_method"] = ["best"]
results = pd.concat([results, chosen], ignore_index=True, axis=0, sort=False)
print(results)
return results
def main(results_file, base_csv=None, take_from_base=None):
pp = pprint.PrettyPrinter(indent=4)
results = pd.read_csv(results_file)
if base
|
.set_fontsize(26)
|
conditional_block
|
main.rs
|
// Get the size of the given type in bytes
fn size_of<T>() -> i32 {
mem::size_of::<T>() as i32
}
// Get an offset in bytes for n units of type T
fn offset<T>(n: u32) -> *const c_void {
(n * mem::size_of::<T>() as u32) as *const T as *const c_void
}
fn read_triangles_from_file() -> Result<Vec<f32>, ()> {
// Takes in an arbitraray amount of trinagles from a file
let mut vertices: Vec<f32>;
match File::open(".\\src\\triangles.txt") {
Ok(mut file) => {
let mut content = String::new();
// Read all the file content into a variable
file.read_to_string(&mut content).unwrap();
vertices = content
.split(" ")
.map(|x| x.parse::<f32>().unwrap())
.collect();
println!("{}", content);
Ok(vertices)
}
// Error handling
Err(error) => {
println!("Error message: {}", error);
std::process::exit(1);
}
}
}
// Get a null pointer (equivalent to an offset of 0)
// ptr::null()
// let p = 0 as *const c_void
// == // Modify and complete the function below for the first task
unsafe fn init_vao(vertices: &Vec<f32>, indices: &Vec<u32>, colors: &Vec<f32>) -> u32 {
// Returns the ID of the newly instantiated vertex array object upon its creation
// VAO - way to bind vbo with spesification
let mut vao: u32 = 0; // Create
gl::GenVertexArrays(1, &mut vao); // Generate
gl::BindVertexArray(vao); // Bind
// VBO - buffer for the vertices/positions
let mut vbo: u32 = 0;
gl::GenBuffers(1, &mut vbo); // creates buffer, generates an id for the vertex buffer - stored on vram
gl::BindBuffer(gl::ARRAY_BUFFER, vbo); // Binding is sort of like creating layers in photoshop
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&vertices),
pointer_to_array(&vertices),
gl::STATIC_DRAW,
);
// Vaa = Vertex attrib array
gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, 0, 0 as *const c_void);
gl::EnableVertexAttribArray(0);
// CBO - vbo for the color buffer, RGBA
let mut cbo: u32 = 1;
gl::GenBuffers(1, &mut cbo);
gl::BindBuffer(gl::ARRAY_BUFFER, cbo);
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&colors),
pointer_to_array(&colors),
gl::STATIC_DRAW,
);
// 2nd attribute buffer is for colors
gl::VertexAttribPointer(1, 4, gl::FLOAT, gl::FALSE, size_of::<f32>() * 4, 0 as *const c_void);
gl::EnableVertexAttribArray(1);
// Index buffer object = connect the dots, multiple usecases for same vertices.
let mut ibo: u32 = 0;
gl::GenBuffers(1, &mut ibo);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ibo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
byte_size_of_array(&indices),
pointer_to_array(&indices),
gl::STATIC_DRAW,
);
vao
}
fn main() {
// Set up the necessary objects to deal with windows and event handling
let el = glutin::event_loop::EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("Gloom-rs")
.with_resizable(false)
.with_inner_size(glutin::dpi::LogicalSize::new(SCREEN_W, SCREEN_H));
let cb = glutin::ContextBuilder::new().with_vsync(true);
let windowed_context = cb.build_windowed(wb, &el).unwrap();
// Uncomment these if you want to use the mouse for controls, but want it to be confined to the screen and/or invisible.
// windowed_context.window().set_cursor_grab(true).expect("failed to grab cursor");
// windowed_context.window().set_cursor_visible(false);
// Set up a shared vector for keeping track of currently pressed keys
let arc_pressed_keys = Arc::new(Mutex::new(Vec::<VirtualKeyCode>::with_capacity(10)));
// Make a reference of this vector to send to the render thread
let pressed_keys = Arc::clone(&arc_pressed_keys);
// Set up shared tuple for tracking mouse movement between frames
let arc_mouse_delta = Arc::new(Mutex::new((0f32, 0f32)));
// Make a reference of this tuple to send to the render thread
let mouse_delta = Arc::clone(&arc_mouse_delta);
// Spawn a separate thread for rendering, so event handling doesn't block rendering
let render_thread = thread::spawn(move || {
// Acquire the OpenGL Context and load the function pointers. This has to be done inside of the rendering thread, because
// an active OpenGL context cannot safely traverse a thread boundary
let context = unsafe {
let c = windowed_context.make_current().unwrap();
gl::load_with(|symbol| c.get_proc_address(symbol) as *const _);
c
};
// Set up openGL
unsafe {
gl::Enable(gl::DEPTH_TEST);
gl::DepthFunc(gl::LESS);
gl::Enable(gl::CULL_FACE);
gl::Disable(gl::MULTISAMPLE);
gl::Enable(gl::BLEND);
gl::BlendFunc(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
gl::Enable(gl::DEBUG_OUTPUT_SYNCHRONOUS);
gl::DebugMessageCallback(Some(util::debug_callback), ptr::null());
// Print some diagnostics
println!(
"{}: {}",
util::get_gl_string(gl::VENDOR),
util::get_gl_string(gl::RENDERER)
);
println!("OpenGL\t: {}", util::get_gl_string(gl::VERSION));
println!(
"GLSL\t: {}",
util::get_gl_string(gl::SHADING_LANGUAGE_VERSION)
);
}
let c: Vec<f32> = vec![
-0.8, -0.6, 0.0,
-0.5, -0.6, 0.0,
-0.65, -0.2, 0.0,
0.5, -0.6, 0.0,
0.8, -0.6, 0.0,
0.65, -0.2, 0.0,
-0.2, 0.3, 0.0,
0.2, 0.6, 0.0,
0.0, 0.6, 0.0,
];
let i: Vec<u32> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8];
let col: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
];
let overLappingCoordinates: Vec<f32> = vec![
-0.3, 0.0, 0.7,
0.3, 0.0, 0.7,
0.0, 0.5, 0.7,
-0.1, 0.3, 0.8,
0.3, 0.0, 0.8,
0.3, 0.6, 0.8,
-0.4, 0.6, 0.6,
-0.4, 0.0, 0.6,
0.2, 0.3, 0.6
];
let overLappingColors: Vec<f32> = vec![
1.0, 0
|
{
&val[0] as *const T as *const c_void
}
|
identifier_body
|
|
main.rs
|
}
}
// Get a null pointer (equivalent to an offset of 0)
// ptr::null()
// let p = 0 as *const c_void
// == // Modify and complete the function below for the first task
unsafe fn init_vao(vertices: &Vec<f32>, indices: &Vec<u32>, colors: &Vec<f32>) -> u32 {
// Returns the ID of the newly instantiated vertex array object upon its creation
// VAO - way to bind vbo with spesification
let mut vao: u32 = 0; // Create
gl::GenVertexArrays(1, &mut vao); // Generate
gl::BindVertexArray(vao); // Bind
// VBO - buffer for the vertices/positions
let mut vbo: u32 = 0;
gl::GenBuffers(1, &mut vbo); // creates buffer, generates an id for the vertex buffer - stored on vram
gl::BindBuffer(gl::ARRAY_BUFFER, vbo); // Binding is sort of like creating layers in photoshop
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&vertices),
pointer_to_array(&vertices),
gl::STATIC_DRAW,
);
// Vaa = Vertex attrib array
gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, 0, 0 as *const c_void);
gl::EnableVertexAttribArray(0);
// CBO - vbo for the color buffer, RGBA
let mut cbo: u32 = 1;
gl::GenBuffers(1, &mut cbo);
gl::BindBuffer(gl::ARRAY_BUFFER, cbo);
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&colors),
pointer_to_array(&colors),
gl::STATIC_DRAW,
);
// 2nd attribute buffer is for colors
gl::VertexAttribPointer(1, 4, gl::FLOAT, gl::FALSE, size_of::<f32>() * 4, 0 as *const c_void);
gl::EnableVertexAttribArray(1);
// Index buffer object = connect the dots, multiple usecases for same vertices.
let mut ibo: u32 = 0;
gl::GenBuffers(1, &mut ibo);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ibo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
byte_size_of_array(&indices),
pointer_to_array(&indices),
gl::STATIC_DRAW,
);
vao
}
fn main() {
// Set up the necessary objects to deal with windows and event handling
let el = glutin::event_loop::EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("Gloom-rs")
.with_resizable(false)
.with_inner_size(glutin::dpi::LogicalSize::new(SCREEN_W, SCREEN_H));
let cb = glutin::ContextBuilder::new().with_vsync(true);
let windowed_context = cb.build_windowed(wb, &el).unwrap();
// Uncomment these if you want to use the mouse for controls, but want it to be confined to the screen and/or invisible.
// windowed_context.window().set_cursor_grab(true).expect("failed to grab cursor");
// windowed_context.window().set_cursor_visible(false);
// Set up a shared vector for keeping track of currently pressed keys
let arc_pressed_keys = Arc::new(Mutex::new(Vec::<VirtualKeyCode>::with_capacity(10)));
// Make a reference of this vector to send to the render thread
let pressed_keys = Arc::clone(&arc_pressed_keys);
// Set up shared tuple for tracking mouse movement between frames
let arc_mouse_delta = Arc::new(Mutex::new((0f32, 0f32)));
// Make a reference of this tuple to send to the render thread
let mouse_delta = Arc::clone(&arc_mouse_delta);
// Spawn a separate thread for rendering, so event handling doesn't block rendering
let render_thread = thread::spawn(move || {
// Acquire the OpenGL Context and load the function pointers. This has to be done inside of the rendering thread, because
// an active OpenGL context cannot safely traverse a thread boundary
let context = unsafe {
let c = windowed_context.make_current().unwrap();
gl::load_with(|symbol| c.get_proc_address(symbol) as *const _);
c
};
// Set up openGL
unsafe {
gl::Enable(gl::DEPTH_TEST);
gl::DepthFunc(gl::LESS);
gl::Enable(gl::CULL_FACE);
gl::Disable(gl::MULTISAMPLE);
gl::Enable(gl::BLEND);
gl::BlendFunc(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
gl::Enable(gl::DEBUG_OUTPUT_SYNCHRONOUS);
gl::DebugMessageCallback(Some(util::debug_callback), ptr::null());
// Print some diagnostics
println!(
"{}: {}",
util::get_gl_string(gl::VENDOR),
util::get_gl_string(gl::RENDERER)
);
println!("OpenGL\t: {}", util::get_gl_string(gl::VERSION));
println!(
"GLSL\t: {}",
util::get_gl_string(gl::SHADING_LANGUAGE_VERSION)
);
}
let c: Vec<f32> = vec![
-0.8, -0.6, 0.0,
-0.5, -0.6, 0.0,
-0.65, -0.2, 0.0,
0.5, -0.6, 0.0,
0.8, -0.6, 0.0,
0.65, -0.2, 0.0,
-0.2, 0.3, 0.0,
0.2, 0.6, 0.0,
0.0, 0.6, 0.0,
];
let i: Vec<u32> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8];
let col: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
];
let overLappingCoordinates: Vec<f32> = vec![
-0.3, 0.0, 0.7,
0.3, 0.0, 0.7,
0.0, 0.5, 0.7,
-0.1, 0.3, 0.8,
0.3, 0.0, 0.8,
0.3, 0.6, 0.8,
-0.4, 0.6, 0.6,
-0.4, 0.0, 0.6,
0.2, 0.3, 0.6
];
let overLappingColors: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.9,
0.0, 0.0, 1.0, 0.9,
0.0, 0.0, 1.0, 0.9,
];
let coordinates: Vec<f32> = vec![
-0.6, -0.6, 0.0,
0.6, -0.6, 0.0,
0.0, 0.6, 0.0
];
|
{
println!("Error message: {}", error);
std::process::exit(1);
}
|
conditional_block
|
|
main.rs
|
::clone(&arc_mouse_delta);
// Spawn a separate thread for rendering, so event handling doesn't block rendering
let render_thread = thread::spawn(move || {
// Acquire the OpenGL Context and load the function pointers. This has to be done inside of the rendering thread, because
// an active OpenGL context cannot safely traverse a thread boundary
let context = unsafe {
let c = windowed_context.make_current().unwrap();
gl::load_with(|symbol| c.get_proc_address(symbol) as *const _);
c
};
// Set up openGL
unsafe {
gl::Enable(gl::DEPTH_TEST);
gl::DepthFunc(gl::LESS);
gl::Enable(gl::CULL_FACE);
gl::Disable(gl::MULTISAMPLE);
gl::Enable(gl::BLEND);
gl::BlendFunc(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
gl::Enable(gl::DEBUG_OUTPUT_SYNCHRONOUS);
gl::DebugMessageCallback(Some(util::debug_callback), ptr::null());
// Print some diagnostics
println!(
"{}: {}",
util::get_gl_string(gl::VENDOR),
util::get_gl_string(gl::RENDERER)
);
println!("OpenGL\t: {}", util::get_gl_string(gl::VERSION));
println!(
"GLSL\t: {}",
util::get_gl_string(gl::SHADING_LANGUAGE_VERSION)
);
}
let c: Vec<f32> = vec![
-0.8, -0.6, 0.0,
-0.5, -0.6, 0.0,
-0.65, -0.2, 0.0,
0.5, -0.6, 0.0,
0.8, -0.6, 0.0,
0.65, -0.2, 0.0,
-0.2, 0.3, 0.0,
0.2, 0.6, 0.0,
0.0, 0.6, 0.0,
];
let i: Vec<u32> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8];
let col: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
];
let overLappingCoordinates: Vec<f32> = vec![
-0.3, 0.0, 0.7,
0.3, 0.0, 0.7,
0.0, 0.5, 0.7,
-0.1, 0.3, 0.8,
0.3, 0.0, 0.8,
0.3, 0.6, 0.8,
-0.4, 0.6, 0.6,
-0.4, 0.0, 0.6,
0.2, 0.3, 0.6
];
let overLappingColors: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.9,
0.0, 0.0, 1.0, 0.9,
0.0, 0.0, 1.0, 0.9,
];
let coordinates: Vec<f32> = vec![
-0.6, -0.6, 0.0,
0.6, -0.6, 0.0,
0.0, 0.6, 0.0
];
let triangle_indices: Vec<u32> = vec![0, 1, 2];
let colors: Vec<f32> = vec![
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0
];
// == // Set up your VAO here
unsafe {
let vao = init_vao(&overLappingCoordinates, &i, &overLappingColors);
}
// Setup uniform locations
let trans_loc: i32;
let time_loc: i32;
let opacity_loc: i32;
unsafe {
// Creates shader. using multiple attaches since they return self, and link them all together at the end
let shdr = shader::ShaderBuilder::new()
.attach_file(".\\shaders\\simple.vert")
.attach_file(".\\shaders\\simple.frag")
.link();
// Get uniform locations
trans_loc = shdr.get_uniform_location("transformation");
time_loc = shdr.get_uniform_location("time");
opacity_loc = shdr.get_uniform_location("opacity");
shdr.activate();
}
// Used to demonstrate keyboard handling -- feel free to remove
let mut _arbitrary_number = 0.0;
let first_frame_time = std::time::Instant::now();
let mut last_frame_time = first_frame_time;
// The main rendering loop
let persp_mat: glm::Mat4 = glm::perspective(
(SCREEN_H as f32) / (SCREEN_W as f32),
90.0,
1.0,
100.0
);
let persp_trans: glm::Mat4 = glm::translation(
&glm::vec3(0.0, 0.0, -2.0)
);
let mut proj: glm::Mat4 = persp_mat * persp_trans;
let model: glm::Mat4 = glm::identity();
let mut trans_matrix: glm::Mat4 = glm::identity();
let mut rot_x = 0.0;
let mut rot_y = 0.0;
let rot_step: f32 = 2.0;
let mut opacity: f32 = 0.0;
let mut v_time:f32 = 0.0;
let mut trans_x = 0.0;
let mut trans_y = 0.0;
let mut trans_z = -4.0;
let trans_step: f32 = 0.1;
let mut view: glm::Mat4 = glm::identity();
loop {
let now = std::time::Instant::now();
let elapsed = now.duration_since(first_frame_time).as_secs_f32();
let delta_time = now.duration_since(last_frame_time).as_secs_f32();
last_frame_time = now;
// Handle keyboard input
if let Ok(keys) = pressed_keys.lock() {
for key in keys.iter() {
// I'm using WASDEQ to handle inputs
// Also use arrowkeys for rotation
match key {
VirtualKeyCode::W => {
trans_z += trans_step;
},
VirtualKeyCode::A => {
trans_x += trans_step;
},
VirtualKeyCode::S => {
trans_z -= trans_step;
},
VirtualKeyCode::D => {
trans_x -= trans_step;
},
VirtualKeyCode::E => {
trans_y -= trans_step;
},
VirtualKeyCode::Q => {
trans_y += trans_step;
},
VirtualKeyCode::R => {
// Reset camera
view = glm::identity();
},
VirtualKeyCode::Up => {
rot_x -= rot_step;
},
VirtualKeyCode::Down => {
rot_x += rot_step;
},
|
VirtualKeyCode::Left => {
rot_y -= rot_step;
},
|
random_line_split
|
|
main.rs
|
<T>(n: u32) -> *const c_void {
(n * mem::size_of::<T>() as u32) as *const T as *const c_void
}
fn read_triangles_from_file() -> Result<Vec<f32>, ()> {
// Takes in an arbitraray amount of trinagles from a file
let mut vertices: Vec<f32>;
match File::open(".\\src\\triangles.txt") {
Ok(mut file) => {
let mut content = String::new();
// Read all the file content into a variable
file.read_to_string(&mut content).unwrap();
vertices = content
.split(" ")
.map(|x| x.parse::<f32>().unwrap())
.collect();
println!("{}", content);
Ok(vertices)
}
// Error handling
Err(error) => {
println!("Error message: {}", error);
std::process::exit(1);
}
}
}
// Get a null pointer (equivalent to an offset of 0)
// ptr::null()
// let p = 0 as *const c_void
// == // Modify and complete the function below for the first task
unsafe fn init_vao(vertices: &Vec<f32>, indices: &Vec<u32>, colors: &Vec<f32>) -> u32 {
// Returns the ID of the newly instantiated vertex array object upon its creation
// VAO - way to bind vbo with spesification
let mut vao: u32 = 0; // Create
gl::GenVertexArrays(1, &mut vao); // Generate
gl::BindVertexArray(vao); // Bind
// VBO - buffer for the vertices/positions
let mut vbo: u32 = 0;
gl::GenBuffers(1, &mut vbo); // creates buffer, generates an id for the vertex buffer - stored on vram
gl::BindBuffer(gl::ARRAY_BUFFER, vbo); // Binding is sort of like creating layers in photoshop
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&vertices),
pointer_to_array(&vertices),
gl::STATIC_DRAW,
);
// Vaa = Vertex attrib array
gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, 0, 0 as *const c_void);
gl::EnableVertexAttribArray(0);
// CBO - vbo for the color buffer, RGBA
let mut cbo: u32 = 1;
gl::GenBuffers(1, &mut cbo);
gl::BindBuffer(gl::ARRAY_BUFFER, cbo);
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&colors),
pointer_to_array(&colors),
gl::STATIC_DRAW,
);
// 2nd attribute buffer is for colors
gl::VertexAttribPointer(1, 4, gl::FLOAT, gl::FALSE, size_of::<f32>() * 4, 0 as *const c_void);
gl::EnableVertexAttribArray(1);
// Index buffer object = connect the dots, multiple usecases for same vertices.
let mut ibo: u32 = 0;
gl::GenBuffers(1, &mut ibo);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ibo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
byte_size_of_array(&indices),
pointer_to_array(&indices),
gl::STATIC_DRAW,
);
vao
}
fn main() {
// Set up the necessary objects to deal with windows and event handling
let el = glutin::event_loop::EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("Gloom-rs")
.with_resizable(false)
.with_inner_size(glutin::dpi::LogicalSize::new(SCREEN_W, SCREEN_H));
let cb = glutin::ContextBuilder::new().with_vsync(true);
let windowed_context = cb.build_windowed(wb, &el).unwrap();
// Uncomment these if you want to use the mouse for controls, but want it to be confined to the screen and/or invisible.
// windowed_context.window().set_cursor_grab(true).expect("failed to grab cursor");
// windowed_context.window().set_cursor_visible(false);
// Set up a shared vector for keeping track of currently pressed keys
let arc_pressed_keys = Arc::new(Mutex::new(Vec::<VirtualKeyCode>::with_capacity(10)));
// Make a reference of this vector to send to the render thread
let pressed_keys = Arc::clone(&arc_pressed_keys);
// Set up shared tuple for tracking mouse movement between frames
let arc_mouse_delta = Arc::new(Mutex::new((0f32, 0f32)));
// Make a reference of this tuple to send to the render thread
let mouse_delta = Arc::clone(&arc_mouse_delta);
// Spawn a separate thread for rendering, so event handling doesn't block rendering
let render_thread = thread::spawn(move || {
// Acquire the OpenGL Context and load the function pointers. This has to be done inside of the rendering thread, because
// an active OpenGL context cannot safely traverse a thread boundary
let context = unsafe {
let c = windowed_context.make_current().unwrap();
gl::load_with(|symbol| c.get_proc_address(symbol) as *const _);
c
};
// Set up openGL
unsafe {
gl::Enable(gl::DEPTH_TEST);
gl::DepthFunc(gl::LESS);
gl::Enable(gl::CULL_FACE);
gl::Disable(gl::MULTISAMPLE);
gl::Enable(gl::BLEND);
gl::BlendFunc(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
gl::Enable(gl::DEBUG_OUTPUT_SYNCHRONOUS);
gl::DebugMessageCallback(Some(util::debug_callback), ptr::null());
// Print some diagnostics
println!(
"{}: {}",
util::get_gl_string(gl::VENDOR),
util::get_gl_string(gl::RENDERER)
);
println!("OpenGL\t: {}", util::get_gl_string(gl::VERSION));
println!(
"GLSL\t: {}",
util::get_gl_string(gl::SHADING_LANGUAGE_VERSION)
);
}
let c: Vec<f32> = vec![
-0.8, -0.6, 0.0,
-0.5, -0.6, 0.0,
-0.65, -0.2, 0.0,
0.5, -0.6, 0.0,
0.8, -0.6, 0.0,
0.65, -0.2, 0.0,
-0.2, 0.3, 0.0,
0.2, 0.6, 0.0,
0.0, 0.6, 0.0,
];
let i: Vec<u32> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8];
let col: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
];
let overLappingCoordinates: Vec<f32> = vec![
-0.3, 0.0, 0.7,
0.3, 0.0, 0.7,
0.0, 0.5, 0.7,
-0.1, 0.3, 0.8,
0.3, 0.0, 0.8,
0.3, 0.6, 0.8,
-0.4, 0.6, 0.6,
-0.4, 0.0, 0.6,
0.2, 0.3, 0.6
];
let overLappingColors: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
0.0, 1.0
|
offset
|
identifier_name
|
|
index.js
|
-post-btn").hide();
$("#save-post-btn").show();
$("#write-modal").addClass("is-active");
});
//์ฒดํฌ๋ฐ์ค ํ๋ ์ ํํด์ ๊ฒ์๋ฌผ ์ญ์
$("#delete-btn").click(function() {
let checked_count = $("input:checkbox[name=check]:checked").length;
if(checked_count > 1) {
alert("ํ๋๋ง ์ ํํด์ฃผ์ธ์");
$("input:checkbox[name=check]").prop("checked", false);
return;
}
if(checked_count == 0) {
alert("์ญ์ ํ ๊ฒ์๋ฌผ์ ์ ํํด์ฃผ์ธ์");
return;
}
if(confirm("์ญ์ ํ์๊ฒ ์ต๋๊น?")) {
let checked_value = $("input:checkbox[name=check]:checked").val();
$.ajax({
type:"DELETE",
url:`/api/posts/${checked_value}`,
success: function() {
window.location.reload();
}
});
}
});
//๊ธ ์ ์ฅ(๊ฒ์๋ฌผ ์ฌ๋ฆฌ๊ธฐ)
$("#save-post-btn").click(function() {
writePost();
});
//์์ค ๋๋ฅด๋ฉด ๊ธ์ฐ๊ธฐ ๋ชจ๋ฌ์ฐฝ ๊บผ์ง
$("#close-write-modal").click(function() {
$("#write-modal").removeClass("is-active");
});
//๋ฐฐ๊ฒฝ ๋๋ฅด๋ฉด ๊ธ์ฐ๊ธฐ ๋ชจ๋ฌ์ฐฝ ๊บผ์ง
$("#write-modal-background").click(function() {
$("#write-modal").removeClass("is-active");
});
//์์ค ๋๋ฅด๋ฉด ์์ธ๋ณด๊ธฐ ๋ชจ๋ฌ์ฐฝ ๊บผ์ง
$("#close-detail-modal").click(function() {
$("#detail-modal").removeClass("is-active");
});
//๋ฐฐ๊ฒฝ ๋๋ฅด๋ฉด ์์ธ๋ณด๊ธฐ ๋ชจ๋ฌ์ฐฝ ๊บผ์ง
$("#detail-modal-background").click(function() {
$("#detail-modal").removeClass("is-active");
});
//์์ธ๋ณด๊ธฐ ๋ชจ๋ฌ) ๊ธ ์์ ๋ฒํผ ๋๋ฅด๊ธฐ๊ธฐ
$("#modify-btn").click(function() {
$("#detail-modal").removeClass("is-active");
getModifyData();
$("#save-post-btn").hide();
$("#write-name").attr("disabled", true); //์์ฑ์ ์ด๋ฆ ๋ชป ๋ฐ๊พธ๊ฒ ๋ง๊ธฐ
$("#modal-name").text("๊ธ์์ ");
$("#modify-post-btn").show();
$("#write-modal").addClass("is-active");
});
//์์ ์๋ฃ
$("#modify-post-btn").click(function() {
modifyPost();
});
//๊ธ์ฐ๊ธฐ/์์ ๋ชจ๋ฌ ์ทจ์๋ฒํผ: input๊ฐ ์ด๊ธฐํ / ๋ชจ๋ฌ ๋ซ๊ธฐ
$("#cancel-btn").click(function () {
if(confirm("์ทจ์ํ์๊ฒ ์ต๋๊น?")) {
$("#write-title").val("");
$("#write-name").val("");
$("#write-content").val("");
$("#write-modal").removeClass("is-active");
}
});
//๋ค์ ํ์ด์ง ๋จ์๋ก ์ด๋
//previous ๋ฒํผ์ ์๋ ๊ฐ์ ํ์ด์ง ์ฒ๋ฆฌ ์ ํ์ด์ง ๋ฒํธ์ ์ญ์์๋ฆฌ ์ซ์์
$("#pagination-next").click(function() {
let current_previous_val = parseInt($("#pagination-previous").attr("value"));
getPagingAndPostsData(url, current_previous_val + 1, (current_previous_val + 1)*10);
});
//์ด์ ํ์ด์ง ๋จ์๋ก ์ด๋
$("#pagination-previous").click(function() {
let current_previous_val = parseInt($("#pagination-previous").attr("value"));
getPagingAndPostsData(url, current_previous_val - 1, (current_previous_val - 1)*10);
});
});
//ํ์ด์ง์ ๊ตฌ์ฑํ๊ณ ๋ฐ์ดํฐ๋ฅผ ๊ฐ์ ธ์ด
function getPagingAndPostsData(url, current_page, select_page) {
let url_include_page = "";
if(url.includes("?")) {
url_include_page = url + "&page=" + current_page;
} else {
url_include_page = url + "?page=" + current_page;
}
$.ajax({
type:"GET",
async: false, //์ ์ญ๋ณ์์ ๊ฐ์ ๋ด๊ธฐ ์ํจ
url:url_include_page,
success: function(response) {
console.log(response);
let result_length = response["content"].length;
let result_count = response["totalElements"].toString(); //๊ฒ์๋ฌผ ์ด ๊ฐ์
let page_number = response["pageable"]["pageNumber"].toString(); //ํ์ฌ ํ์ด์ง(0๋ถํฐ ์์)
let data_per_page = response["pageable"]["pageSize"].toString(); //ํ ํ์ด์ง๋น ๊ฒ์๋ฌผ ๊ฐ์
|
let total_pages = response["totalPages"].toString(); //์ด ํ์ด์ง ์
paging(current_page, result_count, result_length, page_number, total_pages);
selectPage(select_page);
}
});
}
//๊ฒ์๋ฌผ ๋ฐ์ดํฐ ๋ชจ๋ ๊ฐ์ ธ์ ๋ชฉ๋ก ๋ง๋ค๊ธฐ
function getPosts(url, current_page) {
let url_include_page = "";
if(url.includes("?")) {
url_include_page = url + "&page=" + current_page;
} else {
url_include_page = url + "?page=" + current_page;
}
$.ajax({
type:"GET",
async: false, //์ ์ญ๋ณ์์ ๊ฐ์ ๋ด๊ธฐ ์ํจ
url:url_include_page,
success: function(response) {
console.log(response);
let result = response["content"]; //๊ฒ์๋ฌผ ๋ฐ์ดํฐ
console.log(result);
if (result.length > 0) {
$("#table-tbody").empty();
for (let i = 0; i < result.length; i++) {
let id = result[i]["id"];
let title = result[i]["title"];
let name = result[i]["name"];
let modified_at_date = result[i]["modifiedAt"].substr(0,10);
let temp_html =
`<tr onclick="showDetail('${id}')">
<td onclick="event.cancelBubble=true">
<input id="${id}-checkbox" name="check" type="checkbox" value="${id}">
</td>
<th id="${id}">${id}</th>
<td id="${id}-title">${title}</td>
<td id="${id}-name">${name}</td>
<td id="${id}-modifieddate">${modified_at_date}</td>
</tr>`;
$("#table-tbody").append(temp_html);
}
} else {
$("#table-tbody").empty();
let temp_html =`<tr><td id="table-empty" colspan="5">์์ฑํ ๊ธ์ด ์์ต๋๋ค.</td></tr>`;
$("#table-tbody").append(temp_html);
}
}
});
}
//ํ์ด์ง์ฒ๋ฆฌ
function paging(previous, result_count, result_length, page_number, total_pages) {
let next = previous + 1; //Next page ๋ฒํผ๊ฐ
let remainder = total_pages % 10;
let end_page = Math.floor(total_pages / 10);
let for_start = (previous * 10) + 1;
let for_end;
if(previous == end_page) {
for_end = (previous * 10) + remainder;
} else {
for_end = next * 10;
}
$("#pagination-list").empty();
for(let i=for_start; i<=for_end; i++) {
let page_tag = `<li><a id="page-${i-1}" class="pagination-link"
aria-label="Goto page ${i}" onclick="selectPage('${i-1}')">${i}</a></li>`;
$("#pagination-list").append(page_tag);
}
$("#pagination-previous").attr("value", previous);
$("#pagination-next").attr("value", next);
//๋งจ ์ฒซ๋ฒ์งธ ํ์ด์ง์ ์์ ๋ previous ๋ฒํผ ์จ๊ธฐ๊ธฐ
if($("#pagination-previous").attr("value") == "0" || result_length == 0) {
$("#pagination-previous").hide();
} else {
$("#pagination-previous").show();
}
//๊ฐ์ฅ ๋ง์ง๋ง ํ์ด์ง์ ์์ ๋ Next page ๋ฒํผ ์จ๊ธฐ๊ธฐ
if($("#pagination-next").attr("value") == Math.ceil(total_pages / 10) || result_length == 0) {
$("#pagination-next").hide();
} else {
$("#pagination-next").show();
}
}
//์ ํํ ํ์ด์ง๋ก ์ด๋
function selectPage(page_number) {
$(".pagination-link").removeClass("is-current");
$(`#page-${page_number}`).addClass("is-current");
getPosts(url, page_number);
}
//๊ฒ์
function search() {
if(window.event.keyCode == 13) {
let search_select = $("#search-select").val().trim();
let search_input = $("#search-input").val().trim();
url = "";
if($("#search-input").val() == "") {
url = "/api/posts";
} else {
url = `/api/posts?${search_select}=${search_input}`;
}
getPagingAndPostsData(url, 0, 0);
}
}
//ํด๋ฆญํ ๊ฒ์๋ฌผ์ ์์ธ๋ด์ฉ ๋ณด์ฌ์ฃผ๊ธฐ
function showDetail(id) {
let post_id = id;
$.ajax({
type:"GET",
url:"/api/posts/" + post_id,
success: function(response) {
console.log(response);
let id = response["id"];
let title = response["title"];
let name = response["name"];
let created_at_date = response["createdAt"].substr(0,10);
let modified_at_date = response["modifiedAt"].substr(0,10);
let content = response["content"];
if(created_at
|
random_line_split
|
|
index.js
|
-post-btn").hide();
$("#save-post-btn").show();
$("#write-modal").addClass("is-active");
});
//์ฒดํฌ๋ฐ์ค ํ๋ ์ ํํด์ ๊ฒ์๋ฌผ ์ญ์
$("#delete-btn").click(function() {
let checked_count = $("input:checkbox[name=check]:checked").length;
if(checked_count > 1) {
alert("ํ๋๋ง ์ ํํด์ฃผ์ธ์");
$("input:checkbox[name=check]").prop("checked", false);
return;
}
if(checked_count == 0) {
alert("์ญ์ ํ ๊ฒ์๋ฌผ์ ์ ํํด์ฃผ์ธ์");
return;
}
if(confirm("์ญ์ ํ์๊ฒ ์ต๋๊น?")) {
let checked_value = $("input:checkbox[name=check]:checked").val();
$.ajax({
type:"DELETE",
url:`/api/posts/${checked_value}`,
success: function() {
window.location.reload();
}
});
}
});
//๊ธ ์ ์ฅ(๊ฒ์๋ฌผ ์ฌ๋ฆฌ๊ธฐ)
$("#save-post-btn").click(function() {
writePost();
});
//์์ค ๋๋ฅด๋ฉด ๊ธ์ฐ๊ธฐ ๋ชจ๋ฌ์ฐฝ ๊บผ์ง
$("#close-write-modal").click(function() {
$("#write-modal").removeClass("is-active");
});
//๋ฐฐ๊ฒฝ ๋๋ฅด๋ฉด ๊ธ์ฐ๊ธฐ ๋ชจ๋ฌ์ฐฝ ๊บผ์ง
$("#write-modal-background").click(function() {
$("#write-modal").removeClass("is-active");
});
//์์ค ๋๋ฅด๋ฉด ์์ธ๋ณด๊ธฐ ๋ชจ๋ฌ์ฐฝ ๊บผ์ง
$("#close-detail-modal").click(function() {
$("#detail-modal").removeClass("is-active");
});
//๋ฐฐ๊ฒฝ ๋๋ฅด๋ฉด ์์ธ๋ณด๊ธฐ ๋ชจ๋ฌ์ฐฝ ๊บผ์ง
$("#detail-modal-background").click(function() {
$("#detail-modal").removeClass("is-active");
});
//์์ธ๋ณด๊ธฐ ๋ชจ๋ฌ) ๊ธ ์์ ๋ฒํผ ๋๋ฅด๊ธฐ๊ธฐ
$("#modify-btn").click(function() {
$("#detail-modal").removeClass("is-active");
getModifyData();
$("#save-post-btn").hide();
$("#write-name").attr("disabled", true); //์์ฑ์ ์ด๋ฆ ๋ชป ๋ฐ๊พธ๊ฒ ๋ง๊ธฐ
$("#modal-name").text("๊ธ์์ ");
$("#modify-post-btn").show();
$("#write-modal").addClass("is-active");
});
//์์ ์๋ฃ
$("#modify-post-btn").click(function() {
modifyPost();
});
//๊ธ์ฐ๊ธฐ/์์ ๋ชจ๋ฌ ์ทจ์๋ฒํผ: input๊ฐ ์ด๊ธฐํ / ๋ชจ๋ฌ ๋ซ๊ธฐ
$("#cancel-btn").click(function () {
if(confirm("์ทจ์ํ์๊ฒ ์ต๋๊น?")) {
$("#write-title").val("");
$("#write-name").val("");
$("#write-content").val("");
$("#write-modal").removeClass("is-active");
}
});
//๋ค์ ํ์ด์ง ๋จ์๋ก ์ด๋
//previous ๋ฒํผ์ ์๋ ๊ฐ์ ํ์ด์ง ์ฒ๋ฆฌ ์ ํ์ด์ง ๋ฒํธ์ ์ญ์์๋ฆฌ ์ซ์์
$("#pagination-next").click(function() {
let current_previous_val = parseInt($("#pagination-previous").attr("value"));
getPagingAndPostsData(url, current_previous_val + 1, (current_previous_val + 1)*10);
});
//์ด์ ํ์ด์ง ๋จ์๋ก ์ด๋
$("#pagination-previous").click(function() {
let current_previous_val = parseInt($("#pagination-previous").attr("value"));
getPagingAndPostsData(url, current_previous_val - 1, (current_previous_val - 1)*10);
});
});
//ํ์ด์ง์ ๊ตฌ์ฑํ๊ณ ๋ฐ์ดํฐ๋ฅผ ๊ฐ์ ธ์ด
function getPagingAndPostsData(url, current_page, select_page) {
let url_include_page = "";
if(url.includes("?")) {
url_include_page = url + "&page=" + current_page;
} else {
url_include_page = url + "?page=" + current_page;
}
$.ajax({
type:"GET",
async: false, //์ ์ญ๋ณ์์ ๊ฐ์ ๋ด๊ธฐ ์ํจ
url:url_include_page,
success: function(response) {
console.log(response);
let result_length = response["content"].length;
let result_count = response["totalElements"].toString(); //๊ฒ์๋ฌผ ์ด ๊ฐ์
let page_number = response["pageable"]["pageNumber"].toString(); //ํ์ฌ ํ์ด์ง(0๋ถํฐ ์์)
let data_per_page = response["pageable"]["pageSize"].toString(); //ํ ํ์ด์ง๋น ๊ฒ์๋ฌผ ๊ฐ์
let total_pages = response["totalPages"].toString(); //์ด ํ์ด์ง ์
paging(current_page, result_count, result_length, page_number, total_pages);
selectPage(select_page);
}
});
}
//๊ฒ์๋ฌผ ๋ฐ์ดํฐ ๋ชจ๋ ๊ฐ์ ธ์ ๋ชฉ๋ก ๋ง๋ค๊ธฐ
function getPosts(url, current_page) {
let url_include_page = "";
if(url.includes("?")) {
url_include_page = url + "&page=" + current_page;
} else {
url_include_page = url + "?page=" + current_page;
}
$.ajax({
type:"GET",
async: false, //์ ์ญ๋ณ์์ ๊ฐ์ ๋ด๊ธฐ ์ํจ
url:url_include_page,
success: function(response) {
console.log(response);
let result = response["content"]; //๊ฒ์๋ฌผ ๋ฐ์ดํฐ
console.log(result);
if (result.length > 0) {
$("#table-tbody").empty();
for (let i = 0; i < result.length; i++) {
let id = result[i]["id"];
let title = result[i]["title"];
let name = result[i]["name"];
let modified_at_date = result[i]["modifiedAt"].substr(0,10);
let temp_html =
`<tr onclick="showDetail('${id}')">
<td onclick="event.cancelBubble=true">
<input id="${id}-checkbox" name="check" type="checkbox" value="${id}">
</td>
<th id="${id}">${id}</th>
<td id="${id}-title">${title}</td>
<td id="${id}-name">${name}</td>
<td id="${id}-modifieddate">${modified_at_date}</td>
</tr>`;
$("#table-tbody").append(temp_html);
}
} else {
$("#table-tbody").empty();
let temp_html =`<tr><td id="table-empty" colspan="5">์์ฑํ ๊ธ์ด ์์ต๋๋ค.</td></tr>`;
$("#table-tbody").append(temp_html);
}
}
});
}
//ํ์ด์ง์ฒ๋ฆฌ
function paging(previous, result_count, result_length, page_number, total_pages) {
let next = previous + 1; //Next page ๋ฒํผ๊ฐ
let remainder = total_pages % 10;
let end_page = Math.floor(total_pages / 10);
let for_start = (previous * 10) + 1;
let for_end;
if(previous == end_page) {
for_end = (previous * 10) + remainder;
} else {
for_end = next * 10;
}
$("#pagination-list").empty();
for(let i=for_start; i<=for_end; i++) {
let page_tag = `<li><a id="page-${i-1}" class="pagination-link"
aria-label="Goto page ${i}" onclick="selectPage('${i-1}')">${i}</a></li>`;
$("#pagination-list").append(page_tag);
}
$("#pagination-previous").attr("value", previous);
$("#pagination-next").attr("value", next);
//๋งจ ์ฒซ๋ฒ์งธ ํ์ด์ง์ ์์ ๋ previous ๋ฒํผ ์จ๊ธฐ๊ธฐ
if($("#pagination-previous").attr("value") == "0" || result_length == 0) {
$("#pagination-previous").hide();
} else {
$("#pagination-previous").show();
}
//๊ฐ์ฅ ๋ง์ง๋ง ํ์ด์ง์ ์์ ๋ Next page ๋ฒํผ ์จ๊ธฐ๊ธฐ
if($("#pagination-next").attr("value") == Math.ceil(total_pages / 10) || result_length == 0) {
$("#pagination-next").hide();
} else {
$("#pagination-next").show();
}
}
//์ ํํ ํ์ด์ง๋ก ์ด๋
function selectPage(page_number) {
$(".pagination-link").removeClass("is-current");
$(`#page-${page_number}`).addClass("is-current");
getPosts(url, page_number);
}
//๊ฒ์
function search() {
if(window.event.keyCode == 13) {
let search_select = $("#search-select").val().trim();
let search_input = $("#search-input").val().trim();
url = "";
if($("#search-input").val() == "") {
url = "/api/posts";
} else {
url = `/api/posts?${search_select}=${search_input}`;
}
getPagingAndPostsData(url, 0, 0);
}
}
//ํด๋ฆญํ ๊ฒ์๋ฌผ์ ์์ธ๋ด์ฉ ๋ณด์ฌ์ฃผ๊ธฐ
function showDetail(id) {
let post_id = id;
$.ajax({
type:"GET",
url:"/api/posts/" + post_id,
success: function(response) {
console.log(response);
let id = response["id"];
let title = response["title"];
let name = response[
|
(0,10);
let content = response["content"];
if(created
|
"name"];
let created_at_date = response["createdAt"].substr(0,10);
let modified_at_date = response["modifiedAt"].substr
|
identifier_body
|
index.js
|
-btn").hide();
$("#save-post-btn").show();
$("#write-modal").addClass("is-active");
});
//์ฒดํฌ๋ฐ์ค ํ๋ ์ ํํด์ ๊ฒ์๋ฌผ ์ญ์
$("#delete-btn").click(function() {
let checked_count = $("input:checkbox[name=check]:checked").length;
if(checked_count > 1) {
alert("ํ๋๋ง ์ ํํด์ฃผ์ธ์");
$("input:checkbox[name=check]").prop("checked", false);
return;
}
if(checked_count == 0) {
alert("์ญ์ ํ ๊ฒ์๋ฌผ์ ์ ํํด์ฃผ์ธ์");
return;
}
if(confirm("์ญ์ ํ์๊ฒ ์ต๋๊น?")) {
let checked_value = $("input:checkbox[name=check]:checked").val();
$.ajax({
type:"DELETE",
url:`/api/posts/${checked_value}`,
success: function() {
window.location.reload();
}
});
}
});
//๊ธ ์ ์ฅ(๊ฒ์๋ฌผ ์ฌ๋ฆฌ๊ธฐ)
$("#save-post-btn").click(function() {
writePost();
});
//์์ค ๋๋ฅด๋ฉด ๊ธ์ฐ๊ธฐ ๋ชจ๋ฌ์ฐฝ ๊บผ์ง
$("#close-write-modal").click(function() {
$("#write-modal").removeClass("is-active");
});
//๋ฐฐ๊ฒฝ ๋๋ฅด๋ฉด ๊ธ์ฐ๊ธฐ ๋ชจ๋ฌ์ฐฝ ๊บผ์ง
$("#write-modal-background").click(function() {
$("#write-modal").removeClass("is-active");
});
//์์ค ๋๋ฅด๋ฉด ์์ธ๋ณด๊ธฐ ๋ชจ๋ฌ์ฐฝ ๊บผ์ง
$("#close-detail-modal").click(function() {
$("#detail-modal").removeClass("is-active");
});
//๋ฐฐ๊ฒฝ ๋๋ฅด๋ฉด ์์ธ๋ณด๊ธฐ ๋ชจ๋ฌ์ฐฝ ๊บผ์ง
$("#detail-modal-background").click(function() {
$("#detail-modal").removeClass("is-active");
});
//์์ธ๋ณด๊ธฐ ๋ชจ๋ฌ) ๊ธ ์์ ๋ฒํผ ๋๋ฅด๊ธฐ๊ธฐ
$("#modify-btn").click(function() {
$("#detail-modal").removeClass("is-active");
getModifyData();
$("#save-post-btn").hide();
$("#write-name").attr("disabled", true); //์์ฑ์ ์ด๋ฆ ๋ชป ๋ฐ๊พธ๊ฒ ๋ง๊ธฐ
$("#modal-name").text("๊ธ์์ ");
$("#modify-post-btn").show();
$("#write-modal").addClass("is-active");
});
//์์ ์๋ฃ
$("#modify-post-btn").click(function() {
modifyPost();
});
//๊ธ์ฐ๊ธฐ/์์ ๋ชจ๋ฌ ์ทจ์๋ฒํผ: input๊ฐ ์ด๊ธฐํ / ๋ชจ๋ฌ ๋ซ๊ธฐ
$("#cancel-btn").click(function () {
if(confirm("์ทจ์ํ์๊ฒ ์ต๋๊น?")) {
$("#write-title").val("");
$("#write-name").val("");
$("#write-content").val("");
$("#write-modal").removeClass("is-active");
}
});
//๋ค์ ํ์ด์ง ๋จ์๋ก ์ด๋
//previous ๋ฒํผ์ ์๋ ๊ฐ์ ํ์ด์ง ์ฒ๋ฆฌ ์ ํ์ด์ง ๋ฒํธ์ ์ญ์์๋ฆฌ ์ซ์์
$("#pagination-next").click(function() {
let current_previous_val = parseInt($("#pagination-previous").attr("value"));
getPagingAndPostsData(url, current_previous_val + 1, (current_previous_val + 1)*10);
});
//์ด์ ํ์ด์ง ๋จ์๋ก ์ด๋
$("#pagination-previous").click(function() {
let current_previous_val = parseInt($("#pagination-previous").attr("value"));
getPagingAndPostsData(url, current_previous_val - 1, (current_previous_val - 1)*10);
});
});
//ํ์ด์ง์ ๊ตฌ์ฑํ๊ณ ๋ฐ์ดํฐ๋ฅผ ๊ฐ์ ธ์ด
function getPagingAndPostsData(url, current_page, select_page) {
let url_include_page = "";
if(url.includes("?")) {
url_include_page = url + "&page=" + current_page;
} else {
url_include_page = url + "?page=" + current_page;
}
$.ajax({
type:"GET",
async: false, //์ ์ญ๋ณ์์ ๊ฐ์ ๋ด๊ธฐ ์ํจ
url:url_include_page,
success: function(response) {
console.log(response);
let result_length = response["content"].length;
let result_count = response["totalElements"].toString(); //๊ฒ์๋ฌผ ์ด ๊ฐ์
let page_number = response["pageable"]["pageNumber"].toString(); //ํ์ฌ ํ์ด์ง(0๋ถํฐ ์์)
let data_per_page = response["pageable"]["pageSize"].toString(); //ํ ํ์ด์ง๋น ๊ฒ์๋ฌผ ๊ฐ์
let total_pages = response["totalPages"].toString(); //์ด ํ์ด์ง ์
paging(current_page, result_count, result_length, page_number, total_pages);
selectPage(select_page);
}
});
}
//๊ฒ์๋ฌผ ๋ฐ์ดํฐ ๋ชจ๋ ๊ฐ์ ธ์ ๋ชฉ๋ก ๋ง๋ค๊ธฐ
function getPosts(url, current_page) {
let url_include_page = "";
if(url.includes("?")) {
url_include_page = url + "&page=" + current_page;
} else {
url_include_page = url + "?page=" + current_page;
}
$.ajax({
type:"GET",
async: false, //์ ์ญ๋ณ์์ ๊ฐ์ ๋ด๊ธฐ ์ํจ
url:url_include_page,
success: function(response) {
console.log(response);
let result = response["content"]; //๊ฒ์๋ฌผ ๋ฐ์ดํฐ
console.log(result);
if (result.length > 0) {
$("#table-tbody").empty();
for (let i = 0; i < result.length; i++) {
let id = result[i]["id"];
let title = result[i]["title"];
let name = result[i]["name"];
let modified_at_date = result[i]["modifiedAt"].substr(0,10);
let temp_html =
|
<td onclick="event.cancelBubble=true">
<input id="${id}-checkbox" name="check" type="checkbox" value="${id}">
</td>
<th id="${id}">${id}</th>
<td id="${id}-title">${title}</td>
<td id="${id}-name">${name}</td>
<td id="${id}-modifieddate">${modified_at_date}</td>
</tr>`;
$("#table-tbody").append(temp_html);
}
} else {
$("#table-tbody").empty();
let temp_html =`<tr><td id="table-empty" colspan="5">์์ฑํ ๊ธ์ด ์์ต๋๋ค.</td></tr>`;
$("#table-tbody").append(temp_html);
}
}
});
}
//ํ์ด์ง์ฒ๋ฆฌ
function paging(previous, result_count, result_length, page_number, total_pages) {
let next = previous + 1; //Next page ๋ฒํผ๊ฐ
let remainder = total_pages % 10;
let end_page = Math.floor(total_pages / 10);
let for_start = (previous * 10) + 1;
let for_end;
if(previous == end_page) {
for_end = (previous * 10) + remainder;
} else {
for_end = next * 10;
}
$("#pagination-list").empty();
for(let i=for_start; i<=for_end; i++) {
let page_tag = `<li><a id="page-${i-1}" class="pagination-link"
aria-label="Goto page ${i}" onclick="selectPage('${i-1}')">${i}</a></li>`;
$("#pagination-list").append(page_tag);
}
$("#pagination-previous").attr("value", previous);
$("#pagination-next").attr("value", next);
//๋งจ ์ฒซ๋ฒ์งธ ํ์ด์ง์ ์์ ๋ previous ๋ฒํผ ์จ๊ธฐ๊ธฐ
if($("#pagination-previous").attr("value") == "0" || result_length == 0) {
$("#pagination-previous").hide();
} else {
$("#pagination-previous").show();
}
//๊ฐ์ฅ ๋ง์ง๋ง ํ์ด์ง์ ์์ ๋ Next page ๋ฒํผ ์จ๊ธฐ๊ธฐ
if($("#pagination-next").attr("value") == Math.ceil(total_pages / 10) || result_length == 0) {
$("#pagination-next").hide();
} else {
$("#pagination-next").show();
}
}
//์ ํํ ํ์ด์ง๋ก ์ด๋
function selectPage(page_number) {
$(".pagination-link").removeClass("is-current");
$(`#page-${page_number}`).addClass("is-current");
getPosts(url, page_number);
}
//๊ฒ์
function search() {
if(window.event.keyCode == 13) {
let search_select = $("#search-select").val().trim();
let search_input = $("#search-input").val().trim();
url = "";
if($("#search-input").val() == "") {
url = "/api/posts";
} else {
url = `/api/posts?${search_select}=${search_input}`;
}
getPagingAndPostsData(url, 0, 0);
}
}
//ํด๋ฆญํ ๊ฒ์๋ฌผ์ ์์ธ๋ด์ฉ ๋ณด์ฌ์ฃผ๊ธฐ
function showDetail(id) {
let post_id = id;
$.ajax({
type:"GET",
url:"/api/posts/" + post_id,
success: function(response) {
console.log(response);
let id = response["id"];
let title = response["title"];
let name = response["name"];
let created_at_date = response["createdAt"].substr(0,10);
let modified_at_date = response["modifiedAt"].substr(0,10);
let content = response["content"];
if
|
`<tr onclick="showDetail('${id}')">
|
conditional_block
|
index.js
|
").val("");
$("#write-content").val("");
$("#write-modal").removeClass("is-active");
}
});
//๋ค์ ํ์ด์ง ๋จ์๋ก ์ด๋
//previous ๋ฒํผ์ ์๋ ๊ฐ์ ํ์ด์ง ์ฒ๋ฆฌ ์ ํ์ด์ง ๋ฒํธ์ ์ญ์์๋ฆฌ ์ซ์์
$("#pagination-next").click(function() {
let current_previous_val = parseInt($("#pagination-previous").attr("value"));
getPagingAndPostsData(url, current_previous_val + 1, (current_previous_val + 1)*10);
});
//์ด์ ํ์ด์ง ๋จ์๋ก ์ด๋
$("#pagination-previous").click(function() {
let current_previous_val = parseInt($("#pagination-previous").attr("value"));
getPagingAndPostsData(url, current_previous_val - 1, (current_previous_val - 1)*10);
});
});
//ํ์ด์ง์ ๊ตฌ์ฑํ๊ณ ๋ฐ์ดํฐ๋ฅผ ๊ฐ์ ธ์ด
function getPagingAndPostsData(url, current_page, select_page) {
let url_include_page = "";
if(url.includes("?")) {
url_include_page = url + "&page=" + current_page;
} else {
url_include_page = url + "?page=" + current_page;
}
$.ajax({
type:"GET",
async: false, //์ ์ญ๋ณ์์ ๊ฐ์ ๋ด๊ธฐ ์ํจ
url:url_include_page,
success: function(response) {
console.log(response);
let result_length = response["content"].length;
let result_count = response["totalElements"].toString(); //๊ฒ์๋ฌผ ์ด ๊ฐ์
let page_number = response["pageable"]["pageNumber"].toString(); //ํ์ฌ ํ์ด์ง(0๋ถํฐ ์์)
let data_per_page = response["pageable"]["pageSize"].toString(); //ํ ํ์ด์ง๋น ๊ฒ์๋ฌผ ๊ฐ์
let total_pages = response["totalPages"].toString(); //์ด ํ์ด์ง ์
paging(current_page, result_count, result_length, page_number, total_pages);
selectPage(select_page);
}
});
}
//๊ฒ์๋ฌผ ๋ฐ์ดํฐ ๋ชจ๋ ๊ฐ์ ธ์ ๋ชฉ๋ก ๋ง๋ค๊ธฐ
function getPosts(url, current_page) {
let url_include_page = "";
if(url.includes("?")) {
url_include_page = url + "&page=" + current_page;
} else {
url_include_page = url + "?page=" + current_page;
}
$.ajax({
type:"GET",
async: false, //์ ์ญ๋ณ์์ ๊ฐ์ ๋ด๊ธฐ ์ํจ
url:url_include_page,
success: function(response) {
console.log(response);
let result = response["content"]; //๊ฒ์๋ฌผ ๋ฐ์ดํฐ
console.log(result);
if (result.length > 0) {
$("#table-tbody").empty();
for (let i = 0; i < result.length; i++) {
let id = result[i]["id"];
let title = result[i]["title"];
let name = result[i]["name"];
let modified_at_date = result[i]["modifiedAt"].substr(0,10);
let temp_html =
`<tr onclick="showDetail('${id}')">
<td onclick="event.cancelBubble=true">
<input id="${id}-checkbox" name="check" type="checkbox" value="${id}">
</td>
<th id="${id}">${id}</th>
<td id="${id}-title">${title}</td>
<td id="${id}-name">${name}</td>
<td id="${id}-modifieddate">${modified_at_date}</td>
</tr>`;
$("#table-tbody").append(temp_html);
}
} else {
$("#table-tbody").empty();
let temp_html =`<tr><td id="table-empty" colspan="5">์์ฑํ ๊ธ์ด ์์ต๋๋ค.</td></tr>`;
$("#table-tbody").append(temp_html);
}
}
});
}
//ํ์ด์ง์ฒ๋ฆฌ
function paging(previous, result_count, result_length, page_number, total_pages) {
let next = previous + 1; //Next page ๋ฒํผ๊ฐ
let remainder = total_pages % 10;
let end_page = Math.floor(total_pages / 10);
let for_start = (previous * 10) + 1;
let for_end;
if(previous == end_page) {
for_end = (previous * 10) + remainder;
} else {
for_end = next * 10;
}
$("#pagination-list").empty();
for(let i=for_start; i<=for_end; i++) {
let page_tag = `<li><a id="page-${i-1}" class="pagination-link"
aria-label="Goto page ${i}" onclick="selectPage('${i-1}')">${i}</a></li>`;
$("#pagination-list").append(page_tag);
}
$("#pagination-previous").attr("value", previous);
$("#pagination-next").attr("value", next);
//๋งจ ์ฒซ๋ฒ์งธ ํ์ด์ง์ ์์ ๋ previous ๋ฒํผ ์จ๊ธฐ๊ธฐ
if($("#pagination-previous").attr("value") == "0" || result_length == 0) {
$("#pagination-previous").hide();
} else {
$("#pagination-previous").show();
}
//๊ฐ์ฅ ๋ง์ง๋ง ํ์ด์ง์ ์์ ๋ Next page ๋ฒํผ ์จ๊ธฐ๊ธฐ
if($("#pagination-next").attr("value") == Math.ceil(total_pages / 10) || result_length == 0) {
$("#pagination-next").hide();
} else {
$("#pagination-next").show();
}
}
//์ ํํ ํ์ด์ง๋ก ์ด๋
function selectPage(page_number) {
$(".pagination-link").removeClass("is-current");
$(`#page-${page_number}`).addClass("is-current");
getPosts(url, page_number);
}
//๊ฒ์
function search() {
if(window.event.keyCode == 13) {
let search_select = $("#search-select").val().trim();
let search_input = $("#search-input").val().trim();
url = "";
if($("#search-input").val() == "") {
url = "/api/posts";
} else {
url = `/api/posts?${search_select}=${search_input}`;
}
getPagingAndPostsData(url, 0, 0);
}
}
//ํด๋ฆญํ ๊ฒ์๋ฌผ์ ์์ธ๋ด์ฉ ๋ณด์ฌ์ฃผ๊ธฐ
function showDetail(id) {
let post_id = id;
$.ajax({
type:"GET",
url:"/api/posts/" + post_id,
success: function(response) {
console.log(response);
let id = response["id"];
let title = response["title"];
let name = response["name"];
let created_at_date = response["createdAt"].substr(0,10);
let modified_at_date = response["modifiedAt"].substr(0,10);
let content = response["content"];
if(created_at_date != modified_at_date) {
$("#date").text("์์ ์ผ");
}
$("#detail-title").text(title);
$("#detail-name").text(name);
$("#detail-createdate").text(modified_at_date);
$("#detail-content").text(content);
$("#detail-id").val(id);
$("#detail-content").css("white-space", "pre");
$("#detail-modal").addClass("is-active");
},
error: function(error) {
alert("๊ฒ์๋ฌผ์ ์กฐํํ ์ ์์ต๋๋ค");
console.log("error" + error);
}
});
}
//๊ธ ์์ฑ
function writePost() {
let title = $("#write-title").val().trim();
let name = $("#write-name").val().trim();
let content = $("#write-content").val().trim();
if(title == "") {
alert("์ ๋ชฉ์ ์
๋ ฅํ์ธ์");
$("#write-title").focus();
return;
}
if(name == "") {
alert("์์ฑ์ ์ด๋ฆ์ ์
๋ ฅํ์ธ์");
$("#write-name").focus();
return;
}
if(content == "") {
alert("๋ด์ฉ์ ์
๋ ฅํ์ธ์");
$("#write-content").focus();
return;
}
//๋ฒํผ ์ฌ๋ฌ๋ฒ ๋๋ฅด๋ ๊ฒ ๋ฐฉ์ง
$("#save-post-btn").attr("disabled", true);
let data = {title:title, name:name, content:content};
$.ajax({
type:"POST",
url:"/api/posts",
contentType:"application/json",
data: JSON.stringify(data),
success: function(response) {
window.location.reload();
}
});
}
//๊ธ ์์ ๋ชจ๋ฌ์ฐฝ์ ๊ฐ ๋ถ๋ฌ์ค๊ธฐ
function getModifyData() {
let title = $("#detail-title").text();
let name = $("#detail-name").text();
let content = $("#detail-content").text();
let id = $("#detail-id").val();
$("#write-title").val(title);
$("#write-name").val(name);
$("#write-content").val(content);
$("#write-id").val(id);
}
//๊ธ ์์ ์๋ฃํ๊ธฐ
function modifyPost() {
let post_id = $("#write-id").val();
let title = $("#write-title").val();
let name = $("#write-name").val();
let content = $("#write-content").val();
//๋ฒํผ ์ฌ๋ฌ๋ฒ ๋๋ฅด๋ ๊ฒ ๋ฐฉ์ง
$("#modify-post-btn").attr("disabled", true);
let data = {title:title, name:name, content:content}
$.ajax({
type:"PUT",
url:"/api/posts/" + post_id,
contentType:"application/json",
data: JSON.stringify(data),
success: function(response) {
alert("์์ ๋์์ต๋๋ค");
window.location.reload();
}
});
}
|
identifier_name
|
||
tgsrv.go
|
+ strings.Replace(v, "delete ", "", -1) + "\n"
}
msgList.Text = txt + "\n"
}
bot.Send(msgList)
}
}
default:
bot.Send(msgCancel)
}
} else {
//unknown cmd
bot.Send(msgCancel)
}
}
}
} else {
if update.Message == nil {
continue
}
switch update.Message.Text {
case "/start":
user := update.Message.From
if userNew(user) {
m := tgbotapi.NewMessage(update.Message.Chat.ID, params.Hello)
m.DisableWebPagePreview = true
bot.Send(m)
} else {
bot.Send(tgbotapi.NewMessage(update.Message.Chat.ID, params.SomeErr))
}
case "/top":
m := tgbotapi.NewMessage(update.Message.Chat.ID, params.TopLinks)
m.DisableWebPagePreview = true
bot.Send(m)
case "/rateme":
m := tgbotapi.NewMessage(update.Message.Chat.ID, params.Rate)
m.DisableWebPagePreview = true
bot.Send(m)
case "/help":
bot.Send(tgbotapi.NewMessage(update.Message.Chat.ID, params.Help))
case "/donate":
bot.Send(tgbotapi.NewMessage(update.Message.Chat.ID, params.Donate))
case "/channels":
var cmds = make(map[string]string)
cmds["channel_!_new"] = "new channel"
url := params.Channels + strconv.FormatInt(update.Message.Chat.ID, 10)
body := httputils.HttpGet(url, nil)
channels := make(map[int64]*tgbotapi.Chat)
json.Unmarshal(body, &channels)
for _, channel := range channels {
cmds["channel_!_delete_!_"+channel.UserName] = "delete @" + channel.UserName
cmds["channel_!_list_!_"+channel.UserName] = "list of urls of @" + channel.UserName
}
msg := tgbotapi.NewMessage(update.Message.Chat.ID, "Instruction: http://telegra.ph/telefeedbot-05-12\n\nYour channels:\n")
msg.DisableWebPagePreview = true
msg.ReplyMarkup = createButtons(cmds)
msg.ReplyToMessageID = update.Message.MessageID
bot.Send(msg)
case "/list":
//botYa.Track(update.Message.From.ID, nil, "list")
//var cmds = make(map[string]string)
//fmt.Printf("fromid:%d: %d\n", update.Message.From.ID, update.Message.Chat.ID)
subs := usersub("", int64(update.Message.From.ID), true)
//var s = "Subscriptions (send 'delete http://..' - for unsubscribe):\n"
cmds := subs2cmds(subs)
if len(cmds) == 0 {
m := tgbotapi.NewMessage(update.Message.Chat.ID, "No feeds..\n\n"+params.Hello)
m.DisableWebPagePreview = true
bot.Send(m)
} else {
msg := tgbotapi.NewMessage(update.Message.Chat.ID, "Subscriptions (press button bellow for unsubscribe):\n")
msg.ReplyMarkup = createButtons(cmds)
msg.ReplyToMessageID = update.Message.MessageID
bot.Send(msg)
}
case "/subs":
subs := usersub("", int64(update.Message.From.ID), true)
cmds := subs2cmds(subs)
msgList := tgbotapi.NewMessage(update.Message.Chat.ID, "")
var txt = "List of urls:\nSend delete url(s) for unsubscribe\n\n"
for _, v := range cmds {
txt = txt + strings.Replace(v, "delete ", "", -1) + "\n"
}
msgList.Text = txt + "\n"
bot.Send(msgList)
default:
msg := update.Message.Text
pubFind(update.Message, msg, int64(update.Message.From.ID))
}
}
}
}
func subs2cmds(subs map[string]bool) map[string]string {
var cmds = make(map[string]string)
for k, _ := range subs {
log.Println(k)
if strings.Contains(k, params.PubNames) {
cmd := "delete https://vk.com/" + strings.Replace(k, params.PubNames, "", -1)
key := "delete" + k
cmds[key] = cmd
}
if strings.Contains(k, params.Feed) {
b := httputils.HttpGet(params.Api+k, nil)
if b != nil {
cmd := "delete " + string(b)
key := "delete" + k
cmds[key] = cmd
}
}
}
log.Println("cmds:", cmds)
return cmds
}
func createButtons(buttonsCmds map[string]string) tgbotapi.InlineKeyboardMarkup {
var buttons [][]tgbotapi.InlineKeyboardButton
var keys []string
for k := range buttonsCmds {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
choice := buttonsCmds[k]
cleanedChoice := strings.TrimSpace(choice)
cleanedChoice = strings.Replace(cleanedChoice, "\n", "", -1)
button := tgbotapi.NewInlineKeyboardButtonData(cleanedChoice, k)
buttons = append(buttons, tgbotapi.NewInlineKeyboardRow(button))
}
buttonCancel := tgbotapi.NewInlineKeyboardButtonData("Cancel", "cancel")
buttons = append(buttons, tgbotapi.NewInlineKeyboardRow(buttonCancel))
buttonsRow := tgbotapi.NewInlineKeyboardMarkup(buttons...)
return buttonsRow
}
func userNew(user *tgbotapi.User) bool {
urlUsr := params.Users + strconv.Itoa(user.ID)
log.Println("userNew", urlUsr)
b, _ := json.Marshal(user)
httputils.HttpPut(params.UserName+user.UserName, nil, b)
res := httputils.HttpPut(urlUsr, nil, b)
//telefeedbot
if user.ID > 0 {
pubSubTgAdd(146445941, "telefeedbot", nil, false, int64(user.ID))
}
return res
}
func channelNew(chat *tgbotapi.Chat) bool {
url := params.Users + strconv.FormatInt(chat.ID, 10)
log.Println("channelNew", url)
b, _ := json.Marshal(chat)
httputils.HttpPut(params.UserName+chat.UserName, nil, b)
return httputils.HttpPut(url, nil, b)
}
func pubFind(msg *tgbotapi.Message, txt string, userid int64) {
log.Println("pubFind")
var delete = false
var tmp = strings.Replace(txt, "\n", " ", -1)
tmp = strings.Replace(tmp, "\r", "", -1)
tmp = strings.TrimSpace(tmp)
words := strings.Split(tmp, " ")
for i := range words {
var word = strings.TrimSpace(words[i])
if word == "delete" || word == "Delete" {
delete = true
continue
}
if strings.HasPrefix(word, "@") {
chanName := strings.Replace(word, "@", "", -1)
url := params.UserName + chanName
b := httputils.HttpGet(url, nil)
var chat *tgbotapi.Chat
json.Unmarshal(b, &chat)
if chat != nil {
userChannelsUrl := params.Channels + strconv.FormatInt(userid, 10)
userChannelsbody := httputils.HttpGet(userChannelsUrl, nil)
userChannels := make(map[int64]*tgbotapi.Chat)
json.Unmarshal(userChannelsbody, &userChannels)
if userChannels[chat.ID] != nil {
userid = chat.ID
} else {
bot.Send(tgbotapi.NewMessage(userid, chanName+" not yours"))
}
}
continue
}
if strings.HasPrefix(word, "http") == false {
//default sheme is https
word = "https://" + word
}
urls, err := url.Parse(word)
if err != nil {
bot.Send(tgbotapi.NewMessage(msg.Chat.ID, "Rss feed on domain:'"+word+"'\n"+params.NotFound+params.Example))
return
}
mainDomain, _ := publicsuffix.EffectiveTLDPlusOne(urls.Host)
switch mainDomain {
case "t.me":
parts := strings.Split(urls.Path, "/")
if len(parts) > 1
|
{
channelName := "@" + parts[len(parts)-1]
m := tgbotapi.NewMessageToChannel(channelName, "Ok")
m.DisableWebPagePreview = true
reply, err := bot.Send(m)
if err != nil {
s := err.Error()
if strings.Contains(s, "orbidden") {
m := tgbotapi.NewMessage(msg.Chat.ID, "Add @telefeedbot as admin 2 channel: "+channelName)
bot.Send(m)
} else {
m := tgbotapi.NewMessage(msg.Chat.ID, s)
bot.Send(m)
}
} else {
channel := reply.Chat
addChannel(msg.Chat.ID, channel, false)
}
}
|
conditional_block
|
|
tgsrv.go
|
msgCancel := tgbotapi.NewEditMessageText(update.CallbackQuery.Message.Chat.ID,
update.CallbackQuery.Message.MessageID,
"ZzZzZzzz ...")
if strings.HasPrefix(data, "delete"+params.Feed) {
feed := strings.Replace(data, "delete"+params.Feed, "", -1)
b := httputils.HttpGet(params.Feeds+feed, nil)
if b != nil {
url := string(b)
log.Println("delete " + url)
pubFind(update.CallbackQuery.Message, "delete "+url, int64(update.CallbackQuery.From.ID))
bot.Send(msgCancel)
}
} else {
if strings.HasPrefix(data, "delete"+params.PubNames) {
screenname := strings.Replace(data, "delete"+params.PubNames, "", -1)
pubFind(update.CallbackQuery.Message, "delete https://vk.com/"+screenname, int64(update.CallbackQuery.From.ID))
log.Println("update.CallbackQuery.From.ID", update.CallbackQuery.From.ID)
bot.Send(msgCancel)
} else {
if strings.Contains(data, "_!_") {
parts := strings.Split(data, "_!_")
cmd := parts[0]
cmdval := parts[1]
switch cmd {
case "channel":
switch cmdval {
case "new":
msgNewCh := tgbotapi.NewEditMessageText(update.CallbackQuery.Message.Chat.ID,
update.CallbackQuery.Message.MessageID,
params.NewChannel)
bot.Send(msgNewCh)
case "delete":
msgDel := tgbotapi.NewEditMessageText(update.CallbackQuery.Message.Chat.ID,
update.CallbackQuery.Message.MessageID,
"delete")
if len(parts) > 2 {
chanName := parts[2]
url := params.UserName + chanName
b := httputils.HttpGet(url, nil)
var chat *tgbotapi.Chat
json.Unmarshal(b, &chat)
if chat != nil {
subs := usersub("", chat.ID, true)
if len(subs) > 0 {
msgDel.Text = "Channel @" + chanName + " have subscriptions\nDelete urls before delete channel!"
} else {
addChannel(update.CallbackQuery.Message.Chat.ID, chat, true)
msgDel.Text = "deleted @" + chanName
}
} else {
msgDel.Text = "@" + chanName + " not found("
}
}
bot.Send(msgDel)
case "list":
msgList := tgbotapi.NewEditMessageText(update.CallbackQuery.Message.Chat.ID,
update.CallbackQuery.Message.MessageID,
"list\n")
msgList.DisableWebPagePreview = true
if len(parts) > 2 {
chanName := parts[2]
url := params.UserName + chanName
b := httputils.HttpGet(url, nil)
var chat *tgbotapi.Chat
json.Unmarshal(b, &chat)
if chat != nil {
subs := usersub("", chat.ID, true)
cmds := subs2cmds(subs)
var txt = strings.Replace(params.SubsHelp, "channelname", chanName, -1) + "\n\nList of urls of @" + chanName + ":\n\n"
for _, v := range cmds {
txt = txt + strings.Replace(v, "delete ", "", -1) + "\n"
}
msgList.Text = txt + "\n"
}
bot.Send(msgList)
}
}
default:
bot.Send(msgCancel)
}
} else {
//unknown cmd
bot.Send(msgCancel)
}
}
}
} else {
if update.Message == nil {
continue
}
switch update.Message.Text {
case "/start":
user := update.Message.From
if userNew(user) {
m := tgbotapi.NewMessage(update.Message.Chat.ID, params.Hello)
m.DisableWebPagePreview = true
bot.Send(m)
} else {
bot.Send(tgbotapi.NewMessage(update.Message.Chat.ID, params.SomeErr))
}
case "/top":
m := tgbotapi.NewMessage(update.Message.Chat.ID, params.TopLinks)
m.DisableWebPagePreview = true
bot.Send(m)
case "/rateme":
m := tgbotapi.NewMessage(update.Message.Chat.ID, params.Rate)
m.DisableWebPagePreview = true
bot.Send(m)
case "/help":
bot.Send(tgbotapi.NewMessage(update.Message.Chat.ID, params.Help))
case "/donate":
bot.Send(tgbotapi.NewMessage(update.Message.Chat.ID, params.Donate))
case "/channels":
var cmds = make(map[string]string)
cmds["channel_!_new"] = "new channel"
url := params.Channels + strconv.FormatInt(update.Message.Chat.ID, 10)
body := httputils.HttpGet(url, nil)
channels := make(map[int64]*tgbotapi.Chat)
json.Unmarshal(body, &channels)
for _, channel := range channels {
cmds["channel_!_delete_!_"+channel.UserName] = "delete @" + channel.UserName
cmds["channel_!_list_!_"+channel.UserName] = "list of urls of @" + channel.UserName
}
msg := tgbotapi.NewMessage(update.Message.Chat.ID, "Instruction: http://telegra.ph/telefeedbot-05-12\n\nYour channels:\n")
msg.DisableWebPagePreview = true
msg.ReplyMarkup = createButtons(cmds)
msg.ReplyToMessageID = update.Message.MessageID
bot.Send(msg)
case "/list":
//botYa.Track(update.Message.From.ID, nil, "list")
//var cmds = make(map[string]string)
//fmt.Printf("fromid:%d: %d\n", update.Message.From.ID, update.Message.Chat.ID)
subs := usersub("", int64(update.Message.From.ID), true)
//var s = "Subscriptions (send 'delete http://..' - for unsubscribe):\n"
cmds := subs2cmds(subs)
if len(cmds) == 0 {
m := tgbotapi.NewMessage(update.Message.Chat.ID, "No feeds..\n\n"+params.Hello)
m.DisableWebPagePreview = true
bot.Send(m)
} else {
msg := tgbotapi.NewMessage(update.Message.Chat.ID, "Subscriptions (press button bellow for unsubscribe):\n")
msg.ReplyMarkup = createButtons(cmds)
msg.ReplyToMessageID = update.Message.MessageID
bot.Send(msg)
}
case "/subs":
subs := usersub("", int64(update.Message.From.ID), true)
cmds := subs2cmds(subs)
msgList := tgbotapi.NewMessage(update.Message.Chat.ID, "")
var txt = "List of urls:\nSend delete url(s) for unsubscribe\n\n"
for _, v := range cmds {
txt = txt + strings.Replace(v, "delete ", "", -1) + "\n"
}
msgList.Text = txt + "\n"
bot.Send(msgList)
default:
msg := update.Message.Text
pubFind(update.Message, msg, int64(update.Message.From.ID))
}
}
}
}
func subs2cmds(subs map[string]bool) map[string]string {
var cmds = make(map[string]string)
for k, _ := range subs {
log.Println(k)
if strings.Contains(k, params.PubNames) {
cmd := "delete https://vk.com/" + strings.Replace(k, params.PubNames, "", -1)
key := "delete" + k
cmds[key] = cmd
}
if strings.Contains(k, params.Feed) {
b := httputils.HttpGet(params.Api+k, nil)
if b != nil {
cmd := "delete " + string(b)
key := "delete" + k
cmds[key] = cmd
}
}
}
log.Println("cmds:", cmds)
return cmds
}
func createButtons(buttonsCmds map[string]string) tgbotapi.InlineKeyboardMarkup {
var buttons [][]tgbotapi.InlineKeyboardButton
var keys []string
for k := range buttonsCmds {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
choice := buttonsCmds[k]
cleanedChoice := strings.TrimSpace(choice)
cleanedChoice = strings.Replace(cleanedChoice, "\n", "", -1)
button := tgbotapi
|
{
var err error
tlgrmtoken, err := ioutil.ReadFile(params.Telefeedfile)
catch(err)
tgtoken := strings.Replace(strings.Replace(string(tlgrmtoken), "\n", "", -1), "\r", "", -1)
bot, err = tgbotapi.NewBotAPI(tgtoken)
catch(err)
bot.Debug = false
log.Printf("Authorized on account %s", bot.Self.UserName)
u := tgbotapi.NewUpdate(0)
u.Timeout = 60
updates, err := bot.GetUpdatesChan(u)
for update := range updates {
if update.CallbackQuery != nil && update.CallbackQuery.Message != nil {
data := update.CallbackQuery.Data
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.