code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
4
991
language
stringclasses
9 values
license
stringclasses
15 values
size
int32
3
1.05M
/* * The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 * (the "License"). You may not use this work except in compliance with the License, which is * available at www.apache.org/licenses/LICENSE-2.0 * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied, as more fully set forth in the License. * * See the NOTICE file distributed with this work for information regarding copyright ownership. */ package alluxio.master.file; import static alluxio.metrics.MetricInfo.UFS_OP_SAVED_PREFIX; import alluxio.AlluxioURI; import alluxio.ClientContext; import alluxio.Constants; import alluxio.Server; import alluxio.client.job.JobMasterClient; import alluxio.client.job.JobMasterClientPool; import alluxio.clock.SystemClock; import alluxio.collections.Pair; import alluxio.collections.PrefixList; import alluxio.conf.PropertyKey; import alluxio.conf.ServerConfiguration; import alluxio.exception.AccessControlException; import alluxio.exception.AlluxioException; import alluxio.exception.BlockInfoException; import alluxio.exception.ConnectionFailedException; import alluxio.exception.DirectoryNotEmptyException; import alluxio.exception.ExceptionMessage; import alluxio.exception.FileAlreadyCompletedException; import alluxio.exception.FileAlreadyExistsException; import alluxio.exception.FileDoesNotExistException; import alluxio.exception.InvalidFileSizeException; import alluxio.exception.InvalidPathException; import alluxio.exception.PreconditionMessage; import alluxio.exception.UnexpectedAlluxioException; import alluxio.exception.status.FailedPreconditionException; import alluxio.exception.status.InvalidArgumentException; import alluxio.exception.status.NotFoundException; import alluxio.exception.status.PermissionDeniedException; import alluxio.exception.status.ResourceExhaustedException; import alluxio.exception.status.UnavailableException; import alluxio.file.options.DescendantType; import alluxio.grpc.DeletePOptions; import alluxio.grpc.FileSystemMasterCommonPOptions; import alluxio.grpc.GrpcService; import alluxio.grpc.GrpcUtils; import alluxio.grpc.LoadDescendantPType; import alluxio.grpc.LoadMetadataPOptions; import alluxio.grpc.LoadMetadataPType; import alluxio.grpc.MountPOptions; import alluxio.grpc.ServiceType; import alluxio.grpc.SetAclAction; import alluxio.grpc.SetAttributePOptions; import alluxio.grpc.TtlAction; import alluxio.heartbeat.HeartbeatContext; import alluxio.heartbeat.HeartbeatThread; import alluxio.job.plan.persist.PersistConfig; import alluxio.job.wire.JobInfo; import alluxio.master.file.contexts.CallTracker; import alluxio.master.CoreMaster; import alluxio.master.CoreMasterContext; import alluxio.master.ProtobufUtils; import alluxio.master.audit.AsyncUserAccessAuditLogWriter; import alluxio.master.audit.AuditContext; import alluxio.master.block.BlockId; import alluxio.master.block.BlockMaster; import alluxio.master.file.activesync.ActiveSyncManager; import alluxio.master.file.contexts.CheckAccessContext; import alluxio.master.file.contexts.CheckConsistencyContext; import alluxio.master.file.contexts.CompleteFileContext; import alluxio.master.file.contexts.CreateDirectoryContext; import alluxio.master.file.contexts.CreateFileContext; import alluxio.master.file.contexts.DeleteContext; import alluxio.master.file.contexts.FreeContext; import alluxio.master.file.contexts.GetStatusContext; import alluxio.master.file.contexts.InternalOperationContext; import alluxio.master.file.contexts.ListStatusContext; import alluxio.master.file.contexts.LoadMetadataContext; import alluxio.master.file.contexts.MountContext; import alluxio.master.file.contexts.OperationContext; import alluxio.master.file.contexts.RenameContext; import alluxio.master.file.contexts.ScheduleAsyncPersistenceContext; import alluxio.master.file.contexts.SetAclContext; import alluxio.master.file.contexts.SetAttributeContext; import alluxio.master.file.contexts.WorkerHeartbeatContext; import alluxio.master.file.meta.FileSystemMasterView; import alluxio.master.file.meta.Inode; import alluxio.master.file.meta.InodeDirectory; import alluxio.master.file.meta.InodeDirectoryIdGenerator; import alluxio.master.file.meta.InodeDirectoryView; import alluxio.master.file.meta.InodeFile; import alluxio.master.file.meta.InodeLockManager; import alluxio.master.file.meta.InodePathPair; import alluxio.master.file.meta.InodeTree; import alluxio.master.file.meta.InodeTree.LockPattern; import alluxio.master.file.meta.LockedInodePath; import alluxio.master.file.meta.LockedInodePathList; import alluxio.master.file.meta.LockingScheme; import alluxio.master.file.meta.MountTable; import alluxio.master.file.meta.PersistenceState; import alluxio.master.file.meta.UfsAbsentPathCache; import alluxio.master.file.meta.UfsBlockLocationCache; import alluxio.master.file.meta.UfsSyncPathCache; import alluxio.master.file.meta.options.MountInfo; import alluxio.master.journal.DelegatingJournaled; import alluxio.master.journal.JournalContext; import alluxio.master.journal.Journaled; import alluxio.master.journal.JournaledGroup; import alluxio.master.journal.checkpoint.CheckpointName; import alluxio.master.metastore.DelegatingReadOnlyInodeStore; import alluxio.master.metastore.InodeStore; import alluxio.master.metastore.ReadOnlyInodeStore; import alluxio.master.metrics.TimeSeriesStore; import alluxio.metrics.Metric; import alluxio.metrics.MetricInfo; import alluxio.metrics.MetricKey; import alluxio.metrics.MetricsSystem; import alluxio.metrics.TimeSeries; import alluxio.proto.journal.File; import alluxio.proto.journal.File.NewBlockEntry; import alluxio.proto.journal.File.RenameEntry; import alluxio.proto.journal.File.SetAclEntry; import alluxio.proto.journal.File.UpdateInodeEntry; import alluxio.proto.journal.File.UpdateInodeFileEntry; import alluxio.proto.journal.File.UpdateInodeFileEntry.Builder; import alluxio.proto.journal.Journal.JournalEntry; import alluxio.resource.CloseableResource; import alluxio.resource.LockResource; import alluxio.retry.CountingRetry; import alluxio.retry.RetryPolicy; import alluxio.security.authentication.AuthType; import alluxio.security.authentication.AuthenticatedClientUser; import alluxio.security.authentication.ClientIpAddressInjector; import alluxio.security.authorization.AclEntry; import alluxio.security.authorization.AclEntryType; import alluxio.security.authorization.Mode; import alluxio.underfs.Fingerprint; import alluxio.underfs.MasterUfsManager; import alluxio.underfs.UfsManager; import alluxio.underfs.UfsMode; import alluxio.underfs.UfsStatus; import alluxio.underfs.UnderFileSystem; import alluxio.underfs.UnderFileSystemConfiguration; import alluxio.util.CommonUtils; import alluxio.util.IdUtils; import alluxio.util.LogUtils; import alluxio.util.ModeUtils; import alluxio.util.SecurityUtils; import alluxio.util.ThreadFactoryUtils; import alluxio.util.UnderFileSystemUtils; import alluxio.util.executor.ExecutorServiceFactories; import alluxio.util.executor.ExecutorServiceFactory; import alluxio.util.io.PathUtils; import alluxio.util.proto.ProtoUtils; import alluxio.wire.BlockInfo; import alluxio.wire.BlockLocation; import alluxio.wire.CommandType; import alluxio.wire.FileBlockInfo; import alluxio.wire.FileInfo; import alluxio.wire.FileSystemCommand; import alluxio.wire.FileSystemCommandOptions; import alluxio.wire.MountPointInfo; import alluxio.wire.PersistCommandOptions; import alluxio.wire.PersistFile; import alluxio.wire.SyncPointInfo; import alluxio.wire.UfsInfo; import alluxio.wire.WorkerInfo; import alluxio.worker.job.JobMasterClientContext; import com.codahale.metrics.Counter; import com.codahale.metrics.Gauge; import com.codahale.metrics.MetricRegistry; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; import com.google.common.collect.Sets; import io.grpc.ServerInterceptors; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.SortedMap; import java.util.Stack; import java.util.TreeMap; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; import javax.annotation.Nullable; import javax.annotation.concurrent.NotThreadSafe; /** * The master that handles all file system metadata management. */ @NotThreadSafe // TODO(jiri): make thread-safe (c.f. ALLUXIO-1664) public final class DefaultFileSystemMaster extends CoreMaster implements FileSystemMaster, DelegatingJournaled { private static final Logger LOG = LoggerFactory.getLogger(DefaultFileSystemMaster.class); private static final Set<Class<? extends Server>> DEPS = ImmutableSet.of(BlockMaster.class); /** The number of threads to use in the {@link #mPersistCheckerPool}. */ private static final int PERSIST_CHECKER_POOL_THREADS = 128; /** * Locking in DefaultFileSystemMaster * * Individual paths are locked in the inode tree. In order to read or write any inode, the path * must be locked. A path is locked via one of the lock methods in {@link InodeTree}, such as * {@link InodeTree#lockInodePath(AlluxioURI, LockMode)} or * {@link InodeTree#lockFullInodePath(AlluxioURI, LockMode)}. These lock methods return * an {@link LockedInodePath}, which represents a locked path of inodes. These locked paths * ({@link LockedInodePath}) must be unlocked. In order to ensure a locked * {@link LockedInodePath} is always unlocked, the following paradigm is recommended: * * <p><blockquote><pre> * try (LockedInodePath inodePath = mInodeTree.lockInodePath(path, LockPattern.READ)) { * ... * } * </pre></blockquote> * * When locking a path in the inode tree, it is possible that other concurrent operations have * modified the inode tree while a thread is waiting to acquire a lock on the inode. Lock * acquisitions throw {@link InvalidPathException} to indicate that the inode structure is no * longer consistent with what the caller original expected, for example if the inode * previously obtained at /pathA has been renamed to /pathB during the wait for the inode lock. * Methods which specifically act on a path will propagate this exception to the caller, while * methods which iterate over child nodes can safely ignore the exception and treat the inode * as no longer a child. * * JournalContext, BlockDeletionContext, and RpcContext * * RpcContext is an aggregator for various contexts which get passed around through file system * master methods. * * Currently there are two types of contexts that get passed around: {@link JournalContext} and * {@link BlockDeletionContext}. These contexts are used to register work that should be done when * the context closes. The journal context tracks journal entries which need to be flushed, while * the block deletion context tracks which blocks need to be deleted in the {@link BlockMaster}. * * File system master journal entries should be written before blocks are deleted in the block * master, so journal context should always be closed before block deletion context. In order to * ensure that contexts are closed and closed in the right order, the following paradign is * recommended: * * <p><blockquote><pre> * try (RpcContext rpcContext = createRpcContext()) { * // access journal context with rpcContext.getJournalContext() * // access block deletion context with rpcContext.getBlockDeletionContext() * ... * } * </pre></blockquote> * * When used in conjunction with {@link LockedInodePath} and {@link AuditContext}, the usage * should look like * * <p><blockquote><pre> * try (RpcContext rpcContext = createRpcContext(); * LockedInodePath inodePath = mInodeTree.lockInodePath(...); * FileSystemMasterAuditContext auditContext = createAuditContext(...)) { * ... * } * </pre></blockquote> * * NOTE: Because resources are released in the opposite order they are acquired, the * {@link JournalContext}, {@link BlockDeletionContext}, or {@link RpcContext} resources should be * always created before any {@link LockedInodePath} resources to avoid holding an inode path lock * while waiting for journal IO. * * User access audit logging in the FileSystemMaster * * User accesses to file system metadata should be audited. The intent to write audit log and the * actual writing of the audit log is decoupled so that operations are not holding metadata locks * waiting on the audit log IO. In particular {@link AsyncUserAccessAuditLogWriter} uses a * separate thread to perform actual audit log IO. In order for audit log entries to preserve * the order of file system operations, the intention of auditing should be submitted to * {@link AsyncUserAccessAuditLogWriter} while holding locks on the inode path. That said, the * {@link AuditContext} resources should always live within the scope of {@link LockedInodePath}, * i.e. created after {@link LockedInodePath}. Otherwise, the order of audit log entries may not * reflect the actual order of the user accesses. * Resources are released in the opposite order they are acquired, the * {@link AuditContext#close()} method is called before {@link LockedInodePath#close()}, thus * guaranteeing the order. * * Method Conventions in the FileSystemMaster * * All of the flow of the FileSystemMaster follow a convention. There are essentially 4 main * types of methods: * (A) public api methods * (B) private (or package private) internal methods * * (A) public api methods: * These methods are public and are accessed by the RPC and REST APIs. These methods lock all * the required paths, and also perform all permission checking. * (A) cannot call (A) * (A) can call (B) * * (B) private (or package private) internal methods: * These methods perform the rest of the work. The names of these * methods are suffixed by "Internal". These are typically called by the (A) methods. * (B) cannot call (A) * (B) can call (B) */ /** Handle to the block master. */ private final BlockMaster mBlockMaster; /** This manages the file system inode structure. This must be journaled. */ private final InodeTree mInodeTree; /** Store for holding inodes. */ private final ReadOnlyInodeStore mInodeStore; /** This manages inode locking. */ private final InodeLockManager mInodeLockManager; /** This manages the file system mount points. */ private final MountTable mMountTable; /** This generates unique directory ids. This must be journaled. */ private final InodeDirectoryIdGenerator mDirectoryIdGenerator; /** This checks user permissions on different operations. */ private final PermissionChecker mPermissionChecker; /** List of paths to always keep in memory. */ private final PrefixList mWhitelist; /** A pool of job master clients. */ private final JobMasterClientPool mJobMasterClientPool; /** Set of file IDs to persist. */ private final Map<Long, alluxio.time.ExponentialTimer> mPersistRequests; /** Map from file IDs to persist jobs. */ private final Map<Long, PersistJob> mPersistJobs; /** The manager of all ufs. */ private final MasterUfsManager mUfsManager; /** This caches absent paths in the UFS. */ private final UfsAbsentPathCache mUfsAbsentPathCache; /** This caches block locations in the UFS. */ private final UfsBlockLocationCache mUfsBlockLocationCache; /** This caches paths which have been synced with UFS. */ private final UfsSyncPathCache mUfsSyncPathCache; /** The {@link JournaledGroup} representing all the subcomponents which require journaling. */ private final JournaledGroup mJournaledGroup; /** List of strings which are blacklisted from async persist. */ private final List<String> mPersistBlacklist; /** Thread pool which asynchronously handles the completion of persist jobs. */ private java.util.concurrent.ThreadPoolExecutor mPersistCheckerPool; private ActiveSyncManager mSyncManager; /** Log writer for user access audit log. */ private AsyncUserAccessAuditLogWriter mAsyncAuditLogWriter; /** Stores the time series for various metrics which are exposed in the UI. */ private TimeSeriesStore mTimeSeriesStore; private AccessTimeUpdater mAccessTimeUpdater; /** Used to check pending/running backup from RPCs. */ private CallTracker mStateLockCallTracker; final ThreadPoolExecutor mSyncPrefetchExecutor = new ThreadPoolExecutor( ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_UFS_PREFETCH_POOL_SIZE), ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_UFS_PREFETCH_POOL_SIZE), 1, TimeUnit.MINUTES, new LinkedBlockingQueue<>(), ThreadFactoryUtils.build("alluxio-ufs-sync-prefetch-%d", false)); final ThreadPoolExecutor mSyncMetadataExecutor = new ThreadPoolExecutor( ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_EXECUTOR_POOL_SIZE), ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_EXECUTOR_POOL_SIZE), 1, TimeUnit.MINUTES, new LinkedBlockingQueue<>(), ThreadFactoryUtils.build("alluxio-ufs-sync-%d", false)); final ThreadPoolExecutor mActiveSyncMetadataExecutor = new ThreadPoolExecutor( ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_EXECUTOR_POOL_SIZE), ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_EXECUTOR_POOL_SIZE), 1, TimeUnit.MINUTES, new LinkedBlockingQueue<>(), ThreadFactoryUtils.build("alluxio-ufs-active-sync-%d", false)); /** * Creates a new instance of {@link DefaultFileSystemMaster}. * * @param blockMaster a block master handle * @param masterContext the context for Alluxio master */ public DefaultFileSystemMaster(BlockMaster blockMaster, CoreMasterContext masterContext) { this(blockMaster, masterContext, ExecutorServiceFactories.cachedThreadPool(Constants.FILE_SYSTEM_MASTER_NAME)); } /** * Creates a new instance of {@link DefaultFileSystemMaster}. * * @param blockMaster a block master handle * @param masterContext the context for Alluxio master * @param executorServiceFactory a factory for creating the executor service to use for running * maintenance threads */ public DefaultFileSystemMaster(BlockMaster blockMaster, CoreMasterContext masterContext, ExecutorServiceFactory executorServiceFactory) { super(masterContext, new SystemClock(), executorServiceFactory); mBlockMaster = blockMaster; mDirectoryIdGenerator = new InodeDirectoryIdGenerator(mBlockMaster); mUfsManager = masterContext.getUfsManager(); mMountTable = new MountTable(mUfsManager, getRootMountInfo(mUfsManager)); mInodeLockManager = new InodeLockManager(); InodeStore inodeStore = masterContext.getInodeStoreFactory().apply(mInodeLockManager); mInodeStore = new DelegatingReadOnlyInodeStore(inodeStore); mInodeTree = new InodeTree(inodeStore, mBlockMaster, mDirectoryIdGenerator, mMountTable, mInodeLockManager); // TODO(gene): Handle default config value for whitelist. mWhitelist = new PrefixList(ServerConfiguration.getList(PropertyKey.MASTER_WHITELIST, ",")); mPersistBlacklist = ServerConfiguration.isSet(PropertyKey.MASTER_PERSISTENCE_BLACKLIST) ? ServerConfiguration.getList(PropertyKey.MASTER_PERSISTENCE_BLACKLIST, ",") : Collections.emptyList(); mStateLockCallTracker = new CallTracker() { @Override public boolean isCancelled() { return masterContext.getStateLockManager().interruptCycleTicking(); } @Override public Type getType() { return Type.STATE_LOCK_TRACKER; } }; mPermissionChecker = new DefaultPermissionChecker(mInodeTree); mJobMasterClientPool = new JobMasterClientPool(JobMasterClientContext .newBuilder(ClientContext.create(ServerConfiguration.global())).build()); mPersistRequests = new java.util.concurrent.ConcurrentHashMap<>(); mPersistJobs = new java.util.concurrent.ConcurrentHashMap<>(); mUfsAbsentPathCache = UfsAbsentPathCache.Factory.create(mMountTable); mUfsBlockLocationCache = UfsBlockLocationCache.Factory.create(mMountTable); mUfsSyncPathCache = new UfsSyncPathCache(); mSyncManager = new ActiveSyncManager(mMountTable, this); mTimeSeriesStore = new TimeSeriesStore(); mAccessTimeUpdater = new AccessTimeUpdater(this, mInodeTree, masterContext.getJournalSystem()); // Sync executors should allow core threads to time out mSyncPrefetchExecutor.allowCoreThreadTimeOut(true); mSyncMetadataExecutor.allowCoreThreadTimeOut(true); mActiveSyncMetadataExecutor.allowCoreThreadTimeOut(true); // The mount table should come after the inode tree because restoring the mount table requires // that the inode tree is already restored. ArrayList<Journaled> journaledComponents = new ArrayList<Journaled>() { { add(mInodeTree); add(mDirectoryIdGenerator); add(mMountTable); add(mUfsManager); add(mSyncManager); } }; mJournaledGroup = new JournaledGroup(journaledComponents, CheckpointName.FILE_SYSTEM_MASTER); resetState(); Metrics.registerGauges(this, mUfsManager); } private static MountInfo getRootMountInfo(MasterUfsManager ufsManager) { try (CloseableResource<UnderFileSystem> resource = ufsManager.getRoot().acquireUfsResource()) { boolean shared = resource.get().isObjectStorage() && ServerConfiguration.getBoolean(PropertyKey.UNDERFS_OBJECT_STORE_MOUNT_SHARED_PUBLICLY); boolean readonly = ServerConfiguration.getBoolean( PropertyKey.MASTER_MOUNT_TABLE_ROOT_READONLY); String rootUfsUri = PathUtils.normalizePath( ServerConfiguration.get(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS), AlluxioURI.SEPARATOR); Map<String, String> rootUfsConf = ServerConfiguration.getNestedProperties(PropertyKey.MASTER_MOUNT_TABLE_ROOT_OPTION); MountPOptions mountOptions = MountContext .mergeFrom(MountPOptions.newBuilder().setShared(shared).setReadOnly(readonly) .putAllProperties(rootUfsConf)) .getOptions().build(); return new MountInfo(new AlluxioURI(MountTable.ROOT), new AlluxioURI(rootUfsUri), IdUtils.ROOT_MOUNT_ID, mountOptions); } } @Override public Map<ServiceType, GrpcService> getServices() { Map<ServiceType, GrpcService> services = new HashMap<>(); services.put(ServiceType.FILE_SYSTEM_MASTER_CLIENT_SERVICE, new GrpcService(ServerInterceptors .intercept(new FileSystemMasterClientServiceHandler(this), new ClientIpAddressInjector()))); services.put(ServiceType.FILE_SYSTEM_MASTER_JOB_SERVICE, new GrpcService(new FileSystemMasterJobServiceHandler(this))); services.put(ServiceType.FILE_SYSTEM_MASTER_WORKER_SERVICE, new GrpcService(new FileSystemMasterWorkerServiceHandler(this))); return services; } @Override public String getName() { return Constants.FILE_SYSTEM_MASTER_NAME; } @Override public Set<Class<? extends Server>> getDependencies() { return DEPS; } @Override public Journaled getDelegate() { return mJournaledGroup; } @Override public void start(Boolean isPrimary) throws IOException { super.start(isPrimary); if (isPrimary) { LOG.info("Starting fs master as primary"); InodeDirectory root = mInodeTree.getRoot(); if (root == null) { try (JournalContext context = createJournalContext()) { mInodeTree.initializeRoot( SecurityUtils.getOwner(mMasterContext.getUserState()), SecurityUtils.getGroup(mMasterContext.getUserState(), ServerConfiguration.global()), ModeUtils.applyDirectoryUMask(Mode.createFullAccess(), ServerConfiguration.get(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_UMASK)), context); } } else if (!ServerConfiguration.getBoolean(PropertyKey.MASTER_SKIP_ROOT_ACL_CHECK)) { // For backwards-compatibility: // Empty root owner indicates that previously the master had no security. In this case, the // master is allowed to be started with security turned on. String serverOwner = SecurityUtils.getOwner(mMasterContext.getUserState()); if (SecurityUtils.isSecurityEnabled(ServerConfiguration.global()) && !root.getOwner().isEmpty() && !root.getOwner().equals(serverOwner)) { // user is not the previous owner throw new PermissionDeniedException(ExceptionMessage.PERMISSION_DENIED.getMessage(String .format("Unauthorized user on root. inode owner: %s current user: %s", root.getOwner(), serverOwner))); } } // Initialize the ufs manager from the mount table. for (String key : mMountTable.getMountTable().keySet()) { if (key.equals(MountTable.ROOT)) { continue; } MountInfo mountInfo = mMountTable.getMountTable().get(key); UnderFileSystemConfiguration ufsConf = UnderFileSystemConfiguration.defaults(ServerConfiguration.global()) .createMountSpecificConf(mountInfo.getOptions().getPropertiesMap()) .setReadOnly(mountInfo.getOptions().getReadOnly()) .setShared(mountInfo.getOptions().getShared()); mUfsManager.addMount(mountInfo.getMountId(), mountInfo.getUfsUri(), ufsConf); } // Startup Checks and Periodic Threads. // Rebuild the list of persist jobs (mPersistJobs) and map of pending persist requests // (mPersistRequests) long persistInitialIntervalMs = ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS); long persistMaxIntervalMs = ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS); long persistMaxWaitMs = ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS); for (Long id : mInodeTree.getToBePersistedIds()) { Inode inode = mInodeStore.get(id).get(); if (inode.isDirectory() || !inode.asFile().isCompleted() // When file is completed it is added to persist reqs || inode.getPersistenceState() != PersistenceState.TO_BE_PERSISTED || inode.asFile().getShouldPersistTime() == Constants.NO_AUTO_PERSIST) { continue; } InodeFile inodeFile = inode.asFile(); if (inodeFile.getPersistJobId() == Constants.PERSISTENCE_INVALID_JOB_ID) { mPersistRequests.put(inodeFile.getId(), new alluxio.time.ExponentialTimer( persistInitialIntervalMs, persistMaxIntervalMs, getPersistenceWaitTime(inodeFile.getShouldPersistTime()), persistMaxWaitMs)); } else { AlluxioURI path; try { path = mInodeTree.getPath(inodeFile); } catch (FileDoesNotExistException e) { LOG.error("Failed to determine path for inode with id {}", id, e); continue; } addPersistJob(id, inodeFile.getPersistJobId(), getPersistenceWaitTime(inodeFile.getShouldPersistTime()), path, inodeFile.getTempUfsPath()); } } if (ServerConfiguration .getBoolean(PropertyKey.MASTER_STARTUP_BLOCK_INTEGRITY_CHECK_ENABLED)) { validateInodeBlocks(true); } int blockIntegrityCheckInterval = (int) ServerConfiguration .getMs(PropertyKey.MASTER_PERIODIC_BLOCK_INTEGRITY_CHECK_INTERVAL); if (blockIntegrityCheckInterval > 0) { // negative or zero interval implies disabled getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_BLOCK_INTEGRITY_CHECK, new BlockIntegrityChecker(this), blockIntegrityCheckInterval, ServerConfiguration.global(), mMasterContext.getUserState())); } getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_TTL_CHECK, new InodeTtlChecker(this, mInodeTree), (int) ServerConfiguration.getMs(PropertyKey.MASTER_TTL_CHECKER_INTERVAL_MS), ServerConfiguration.global(), mMasterContext.getUserState())); getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_LOST_FILES_DETECTION, new LostFileDetector(this, mInodeTree), (int) ServerConfiguration.getMs(PropertyKey .MASTER_LOST_WORKER_FILE_DETECTION_INTERVAL), ServerConfiguration.global(), mMasterContext.getUserState())); getExecutorService().submit(new HeartbeatThread( HeartbeatContext.MASTER_REPLICATION_CHECK, new alluxio.master.file.replication.ReplicationChecker(mInodeTree, mBlockMaster, mSafeModeManager, mJobMasterClientPool), (int) ServerConfiguration.getMs(PropertyKey.MASTER_REPLICATION_CHECK_INTERVAL_MS), ServerConfiguration.global(), mMasterContext.getUserState())); getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_PERSISTENCE_SCHEDULER, new PersistenceScheduler(), (int) ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_SCHEDULER_INTERVAL_MS), ServerConfiguration.global(), mMasterContext.getUserState())); mPersistCheckerPool = new java.util.concurrent.ThreadPoolExecutor(PERSIST_CHECKER_POOL_THREADS, PERSIST_CHECKER_POOL_THREADS, 1, java.util.concurrent.TimeUnit.MINUTES, new LinkedBlockingQueue<Runnable>(), alluxio.util.ThreadFactoryUtils.build("Persist-Checker-%d", true)); mPersistCheckerPool.allowCoreThreadTimeOut(true); getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_PERSISTENCE_CHECKER, new PersistenceChecker(), (int) ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_CHECKER_INTERVAL_MS), ServerConfiguration.global(), mMasterContext.getUserState())); getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_METRICS_TIME_SERIES, new TimeSeriesRecorder(), (int) ServerConfiguration.getMs(PropertyKey.MASTER_METRICS_TIME_SERIES_INTERVAL), ServerConfiguration.global(), mMasterContext.getUserState())); if (ServerConfiguration.getBoolean(PropertyKey.MASTER_AUDIT_LOGGING_ENABLED)) { mAsyncAuditLogWriter = new AsyncUserAccessAuditLogWriter(); mAsyncAuditLogWriter.start(); } if (ServerConfiguration.getBoolean(PropertyKey.UNDERFS_CLEANUP_ENABLED)) { getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_UFS_CLEANUP, new UfsCleaner(this), (int) ServerConfiguration.getMs(PropertyKey.UNDERFS_CLEANUP_INTERVAL), ServerConfiguration.global(), mMasterContext.getUserState())); } mAccessTimeUpdater.start(); mSyncManager.start(); } } @Override public void stop() throws IOException { if (mAsyncAuditLogWriter != null) { mAsyncAuditLogWriter.stop(); mAsyncAuditLogWriter = null; } mSyncManager.stop(); mAccessTimeUpdater.stop(); super.stop(); } @Override public void close() throws IOException { super.close(); mInodeTree.close(); mInodeLockManager.close(); try { mSyncMetadataExecutor.shutdownNow(); mSyncMetadataExecutor.awaitTermination(5, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOG.warn("Failed to wait for metadata sync executor to shut down."); } try { mSyncPrefetchExecutor.shutdownNow(); mSyncPrefetchExecutor.awaitTermination(5, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOG.warn("Failed to wait for ufs prefetch executor to shut down."); } try { mActiveSyncMetadataExecutor.shutdownNow(); mActiveSyncMetadataExecutor.awaitTermination(5, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOG.warn("Failed to wait for active sync executor to shut down."); } } @Override public void validateInodeBlocks(boolean repair) throws UnavailableException { mBlockMaster.validateBlocks((blockId) -> { long fileId = IdUtils.fileIdFromBlockId(blockId); return mInodeTree.inodeIdExists(fileId); }, repair); } @Override public void cleanupUfs() { for (Map.Entry<String, MountInfo> mountPoint : mMountTable.getMountTable().entrySet()) { MountInfo info = mountPoint.getValue(); if (info.getOptions().getReadOnly()) { continue; } try (CloseableResource<UnderFileSystem> ufsResource = mUfsManager.get(info.getMountId()).acquireUfsResource()) { ufsResource.get().cleanup(); } catch (UnavailableException | NotFoundException e) { LOG.error("No UFS cached for {}", info, e); } catch (IOException e) { LOG.error("Failed in cleanup UFS {}.", info, e); } } } @Override public long getFileId(AlluxioURI path) throws AccessControlException, UnavailableException { return getFileIdInternal(path, true); } private long getFileIdInternal(AlluxioURI path, boolean checkPermission) throws AccessControlException, UnavailableException { try (RpcContext rpcContext = createRpcContext()) { /* In order to prevent locking twice on RPCs where metadata does _not_ need to be loaded, we use a two-step scheme as an optimization to prevent the extra lock. loadMetadataIfNotExists requires a lock on the tree to determine if the path should be loaded before executing. To prevent the extra lock, we execute the RPC as normal and use a conditional check in the main body of the function to determine whether control flow should be shifted out of the RPC logic and back to the loadMetadataIfNotExists function. If loadMetadataIfNotExists runs, then the next pass into the main logic body should continue as normal. This may present a slight decrease in performance for newly-loaded metadata, but it is better than affecting the most common case where metadata is not being loaded. */ LoadMetadataContext lmCtx = LoadMetadataContext.mergeFrom( LoadMetadataPOptions.newBuilder().setCreateAncestors(true)); boolean run = true; boolean loadMetadata = false; while (run) { run = false; if (loadMetadata) { loadMetadataIfNotExist(rpcContext, path, lmCtx, false); } try (LockedInodePath inodePath = mInodeTree.lockInodePath(path, LockPattern.READ)) { if (checkPermission) { mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath); } if (!loadMetadata && shouldLoadMetadataIfNotExists(inodePath, lmCtx)) { loadMetadata = true; run = true; continue; } mInodeTree.ensureFullInodePath(inodePath); return inodePath.getInode().getId(); } catch (InvalidPathException | FileDoesNotExistException e) { return IdUtils.INVALID_FILE_ID; } } } catch (InvalidPathException e) { return IdUtils.INVALID_FILE_ID; } return IdUtils.INVALID_FILE_ID; } @Override public FileInfo getFileInfo(long fileId) throws FileDoesNotExistException, AccessControlException, UnavailableException { Metrics.GET_FILE_INFO_OPS.inc(); try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(fileId, LockPattern.READ)) { return getFileInfoInternal(inodePath); } } @Override public FileInfo getFileInfo(AlluxioURI path, GetStatusContext context) throws FileDoesNotExistException, InvalidPathException, AccessControlException, IOException { Metrics.GET_FILE_INFO_OPS.inc(); boolean ufsAccessed = false; long opTimeMs = System.currentTimeMillis(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("getFileInfo", path, null, null)) { if (syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), DescendantType.ONE, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkPermission(Mode.Bits.READ, inodePath), true)) { // If synced, do not load metadata. context.getOptions().setLoadMetadataType(LoadMetadataPType.NEVER); ufsAccessed = true; } LoadMetadataContext lmCtx = LoadMetadataContext.mergeFrom( LoadMetadataPOptions.newBuilder().setCreateAncestors(true).setCommonOptions( FileSystemMasterCommonPOptions.newBuilder() .setTtl(context.getOptions().getCommonOptions().getTtl()) .setTtlAction(context.getOptions().getCommonOptions().getTtlAction()))); /* See the comments in #getFileIdInternal for an explanation on why the loop here is required. */ boolean run = true; boolean loadMetadata = false; FileInfo ret = null; while (run) { run = false; if (loadMetadata) { checkLoadMetadataOptions(context.getOptions().getLoadMetadataType(), path); loadMetadataIfNotExist(rpcContext, path, lmCtx, true); ufsAccessed = true; } LockingScheme lockingScheme = new LockingScheme(path, LockPattern.READ, false); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { auditContext.setSrcInode(inodePath.getInodeOrNull()); try { mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } if (!loadMetadata && shouldLoadMetadataIfNotExists(inodePath, lmCtx)) { loadMetadata = true; run = true; continue; } ensureFullPathAndUpdateCache(inodePath); FileInfo fileInfo = getFileInfoInternal(inodePath); if (ufsAccessed) { MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); Metrics.getUfsCounter(mMountTable.getMountInfo( resolution.getMountId()).getUfsUri().toString(), Metrics.UFSOps.GET_FILE_INFO).dec(); } Mode.Bits accessMode = Mode.Bits.fromProto(context.getOptions().getAccessMode()); if (context.getOptions().getUpdateTimestamps() && context.getOptions().hasAccessMode() && (accessMode.imply(Mode.Bits.READ) || accessMode.imply(Mode.Bits.WRITE))) { mAccessTimeUpdater.updateAccessTime(rpcContext.getJournalContext(), inodePath.getInode(), opTimeMs); } auditContext.setSrcInode(inodePath.getInode()).setSucceeded(true); ret = fileInfo; } } return ret; } } /** * @param inodePath the {@link LockedInodePath} to get the {@link FileInfo} for * @return the {@link FileInfo} for the given inode */ private FileInfo getFileInfoInternal(LockedInodePath inodePath) throws FileDoesNotExistException, UnavailableException { Inode inode = inodePath.getInode(); AlluxioURI uri = inodePath.getUri(); FileInfo fileInfo = inode.generateClientFileInfo(uri.toString()); if (fileInfo.isFolder()) { fileInfo.setLength(inode.asDirectory().getChildCount()); } fileInfo.setInMemoryPercentage(getInMemoryPercentage(inode)); fileInfo.setInAlluxioPercentage(getInAlluxioPercentage(inode)); if (inode.isFile()) { try { fileInfo.setFileBlockInfos(getFileBlockInfoListInternal(inodePath)); } catch (InvalidPathException e) { throw new FileDoesNotExistException(e.getMessage(), e); } } // Rehydrate missing block-infos for persisted files. if (fileInfo.getBlockIds().size() > fileInfo.getFileBlockInfos().size() && inode.isPersisted()) { List<Long> missingBlockIds = fileInfo.getBlockIds().stream() .filter((bId) -> fileInfo.getFileBlockInfo(bId) != null).collect(Collectors.toList()); LOG.warn("BlockInfo missing for file: {}. BlockIdsWithMissingInfos: {}", inodePath.getUri(), missingBlockIds.stream().map(Object::toString).collect(Collectors.joining(","))); // Remove old block metadata from block-master before re-committing. mBlockMaster.removeBlocks(fileInfo.getBlockIds(), true); // Commit all the file blocks (without locations) so the metadata for the block exists. commitBlockInfosForFile( fileInfo.getBlockIds(), fileInfo.getLength(), fileInfo.getBlockSizeBytes()); // Reset file-block-info list with the new list. try { fileInfo.setFileBlockInfos(getFileBlockInfoListInternal(inodePath)); } catch (InvalidPathException e) { throw new FileDoesNotExistException( String.format("Hydration failed for file: %s", inodePath.getUri()), e); } } fileInfo.setXAttr(inode.getXAttr()); MountTable.Resolution resolution; try { resolution = mMountTable.resolve(uri); } catch (InvalidPathException e) { throw new FileDoesNotExistException(e.getMessage(), e); } AlluxioURI resolvedUri = resolution.getUri(); fileInfo.setUfsPath(resolvedUri.toString()); fileInfo.setMountId(resolution.getMountId()); Metrics.getUfsCounter(mMountTable.getMountInfo(resolution.getMountId()).getUfsUri().toString(), Metrics.UFSOps.GET_FILE_INFO).inc(); Metrics.FILE_INFOS_GOT.inc(); return fileInfo; } @Override public PersistenceState getPersistenceState(long fileId) throws FileDoesNotExistException { try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(fileId, LockPattern.READ)) { return inodePath.getInode().getPersistenceState(); } } @Override public void listStatus(AlluxioURI path, ListStatusContext context, ResultStream<FileInfo> resultStream) throws AccessControlException, FileDoesNotExistException, InvalidPathException, IOException { Metrics.GET_FILE_INFO_OPS.inc(); LockingScheme lockingScheme = new LockingScheme(path, LockPattern.READ, false); boolean ufsAccessed = false; try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("listStatus", path, null, null)) { DescendantType descendantType = context.getOptions().getRecursive() ? DescendantType.ALL : DescendantType.ONE; if (syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), descendantType, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkPermission(Mode.Bits.READ, inodePath))) { // If synced, do not load metadata. context.getOptions().setLoadMetadataType(LoadMetadataPType.NEVER); ufsAccessed = true; } /* See the comments in #getFileIdInternal for an explanation on why the loop here is required. */ DescendantType loadDescendantType; if (context.getOptions().getLoadMetadataType() == LoadMetadataPType.NEVER) { loadDescendantType = DescendantType.NONE; } else if (context.getOptions().getRecursive()) { loadDescendantType = DescendantType.ALL; } else { loadDescendantType = DescendantType.ONE; } // load metadata for 1 level of descendants, or all descendants if recursive LoadMetadataContext loadMetadataContext = LoadMetadataContext.mergeFrom( LoadMetadataPOptions.newBuilder().setCreateAncestors(true) .setLoadDescendantType(GrpcUtils.toProto(loadDescendantType)).setCommonOptions( FileSystemMasterCommonPOptions.newBuilder() .setTtl(context.getOptions().getCommonOptions().getTtl()) .setTtlAction(context.getOptions().getCommonOptions().getTtlAction()))); boolean loadMetadata = false; boolean run = true; while (run) { run = false; if (loadMetadata) { loadMetadataIfNotExist(rpcContext, path, loadMetadataContext, false); ufsAccessed = true; } // We just synced; the new lock pattern should not sync. try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { auditContext.setSrcInode(inodePath.getInodeOrNull()); try { mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } if (!loadMetadata) { Inode inode; boolean isLoaded = true; if (inodePath.fullPathExists()) { inode = inodePath.getInode(); if (inode.isDirectory() && context.getOptions().getLoadMetadataType() != LoadMetadataPType.ALWAYS) { InodeDirectory inodeDirectory = inode.asDirectory(); isLoaded = inodeDirectory.isDirectChildrenLoaded(); if (context.getOptions().getRecursive()) { isLoaded = areDescendantsLoaded(inodeDirectory); } if (isLoaded) { // no need to load again. loadMetadataContext.getOptions().setLoadDescendantType(LoadDescendantPType.NONE); } } } else { checkLoadMetadataOptions(context.getOptions().getLoadMetadataType(), inodePath.getUri()); } if (shouldLoadMetadataIfNotExists(inodePath, loadMetadataContext)) { loadMetadata = true; run = true; continue; } } ensureFullPathAndUpdateCache(inodePath); auditContext.setSrcInode(inodePath.getInode()); if (context.getOptions().getResultsRequired()) { DescendantType descendantTypeForListStatus = (context.getOptions().getRecursive()) ? DescendantType.ALL : DescendantType.ONE; listStatusInternal(context, rpcContext, inodePath, auditContext, descendantTypeForListStatus, resultStream, 0); } auditContext.setSucceeded(true); Metrics.FILE_INFOS_GOT.inc(); if (!ufsAccessed) { MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); Metrics.getUfsCounter(mMountTable.getMountInfo(resolution.getMountId()) .getUfsUri().toString(), Metrics.UFSOps.LIST_STATUS).inc(); } } } } } @Override public List<FileInfo> listStatus(AlluxioURI path, ListStatusContext context) throws AccessControlException, FileDoesNotExistException, InvalidPathException, IOException { final List<FileInfo> fileInfos = new ArrayList<>(); listStatus(path, context, (item) -> fileInfos.add(item)); return fileInfos; } /** * Lists the status of the path in {@link LockedInodePath}, possibly recursively depending on the * descendantType. The result is returned via a list specified by statusList, in postorder * traversal order. * * @param context call context * @param rpcContext the context for the RPC call * @param currInodePath the inode path to find the status * @param auditContext the audit context to return any access exceptions * @param descendantType if the currInodePath is a directory, how many levels of its descendant * should be returned * @param resultStream the stream to receive individual results * @param depth internal use field for tracking depth relative to root item */ private void listStatusInternal(ListStatusContext context, RpcContext rpcContext, LockedInodePath currInodePath, AuditContext auditContext, DescendantType descendantType, ResultStream<FileInfo> resultStream, int depth) throws FileDoesNotExistException, UnavailableException, AccessControlException, InvalidPathException { rpcContext.throwIfCancelled(); Inode inode = currInodePath.getInode(); if (inode.isDirectory() && descendantType != DescendantType.NONE) { try { // TODO(david): Return the error message when we do not have permission mPermissionChecker.checkPermission(Mode.Bits.EXECUTE, currInodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); if (descendantType == DescendantType.ALL) { return; } else { throw e; } } mAccessTimeUpdater.updateAccessTime(rpcContext.getJournalContext(), inode, CommonUtils.getCurrentMs()); DescendantType nextDescendantType = (descendantType == DescendantType.ALL) ? DescendantType.ALL : DescendantType.NONE; // This is to generate a parsed child path components to be passed to lockChildPath String [] childComponentsHint = null; for (Inode child : mInodeStore.getChildren(inode.asDirectory())) { if (childComponentsHint == null) { String[] parentComponents = PathUtils.getPathComponents(currInodePath.getUri().getPath()); childComponentsHint = new String[parentComponents.length + 1]; System.arraycopy(parentComponents, 0, childComponentsHint, 0, parentComponents.length); } // TODO(david): Make extending InodePath more efficient childComponentsHint[childComponentsHint.length - 1] = child.getName(); try (LockedInodePath childInodePath = currInodePath.lockChild(child, LockPattern.READ, childComponentsHint)) { listStatusInternal(context, rpcContext, childInodePath, auditContext, nextDescendantType, resultStream, depth + 1); } catch (InvalidPathException | FileDoesNotExistException e) { LOG.debug("Path \"{}\" is invalid, has been ignored.", PathUtils.concatPath("/", childComponentsHint)); } } } // Listing a directory should not emit item for the directory itself. if (depth != 0 || inode.isFile()) { resultStream.submit(getFileInfoInternal(currInodePath)); } } /** * Checks the {@link LoadMetadataPType} to determine whether or not to proceed in loading * metadata. This method assumes that the path does not exist in Alluxio namespace, and will * throw an exception if metadata should not be loaded. * * @param loadMetadataType the {@link LoadMetadataPType} to check * @param path the path that does not exist in Alluxio namespace (used for exception message) */ private void checkLoadMetadataOptions(LoadMetadataPType loadMetadataType, AlluxioURI path) throws FileDoesNotExistException { if (loadMetadataType == LoadMetadataPType.NEVER || (loadMetadataType == LoadMetadataPType.ONCE && mUfsAbsentPathCache.isAbsent(path))) { throw new FileDoesNotExistException(ExceptionMessage.PATH_DOES_NOT_EXIST.getMessage(path)); } } private boolean areDescendantsLoaded(InodeDirectoryView inode) { if (!inode.isDirectChildrenLoaded()) { return false; } for (Inode child : mInodeStore.getChildren(inode)) { if (child.isDirectory()) { if (!areDescendantsLoaded(child.asDirectory())) { return false; } } } return true; } /** * Checks to see if the entire path exists in Alluxio. Updates the absent cache if it does not * exist. * * @param inodePath the path to ensure */ private void ensureFullPathAndUpdateCache(LockedInodePath inodePath) throws InvalidPathException, FileDoesNotExistException { boolean exists = false; try { mInodeTree.ensureFullInodePath(inodePath); exists = true; } finally { if (!exists) { mUfsAbsentPathCache.process(inodePath.getUri(), inodePath.getInodeList()); } } } @Override public FileSystemMasterView getFileSystemMasterView() { return new FileSystemMasterView(this); } @Override public void checkAccess(AlluxioURI path, CheckAccessContext context) throws FileDoesNotExistException, InvalidPathException, AccessControlException, IOException { try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("checkAccess", path, null, null)) { Mode.Bits bits = Mode.Bits.fromProto(context.getOptions().getBits()); syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), DescendantType.NONE, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkPermission(bits, inodePath) ); LockingScheme lockingScheme = createLockingScheme(path, context.getOptions().getCommonOptions(), LockPattern.READ); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { mPermissionChecker.checkPermission(bits, inodePath); if (!inodePath.fullPathExists()) { throw new FileDoesNotExistException(ExceptionMessage .PATH_DOES_NOT_EXIST.getMessage(path)); } auditContext.setSucceeded(true); } } } @Override public List<AlluxioURI> checkConsistency(AlluxioURI path, CheckConsistencyContext context) throws AccessControlException, FileDoesNotExistException, InvalidPathException, IOException { List<AlluxioURI> inconsistentUris = new ArrayList<>(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("checkConsistency", path, null, null)) { syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), DescendantType.ALL, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkPermission(Mode.Bits.READ, inodePath)); LockingScheme lockingScheme = createLockingScheme(path, context.getOptions().getCommonOptions(), LockPattern.READ); try (LockedInodePath parent = mInodeTree.lockInodePath( lockingScheme.getPath(), lockingScheme.getPattern())) { auditContext.setSrcInode(parent.getInodeOrNull()); try { mPermissionChecker.checkPermission(Mode.Bits.READ, parent); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } checkConsistencyRecursive(parent, inconsistentUris); auditContext.setSucceeded(true); } } return inconsistentUris; } private void checkConsistencyRecursive(LockedInodePath inodePath, List<AlluxioURI> inconsistentUris) throws IOException, FileDoesNotExistException { Inode inode = inodePath.getInode(); try { if (!checkConsistencyInternal(inodePath)) { inconsistentUris.add(inodePath.getUri()); } if (inode.isDirectory()) { InodeDirectory inodeDir = inode.asDirectory(); for (Inode child : mInodeStore.getChildren(inodeDir)) { try (LockedInodePath childPath = inodePath.lockChild(child, LockPattern.READ)) { checkConsistencyRecursive(childPath, inconsistentUris); } } } } catch (InvalidPathException e) { LOG.debug("Path \"{}\" is invalid, has been ignored.", PathUtils.concatPath(inodePath.getUri().getPath())); } } /** * Checks if a path is consistent between Alluxio and the underlying storage. * <p> * A path without a backing under storage is always consistent. * <p> * A not persisted path is considered consistent if: * 1. It does not shadow an object in the underlying storage. * <p> * A persisted path is considered consistent if: * 1. An equivalent object exists for its under storage path. * 2. The metadata of the Alluxio and under storage object are equal. * * @param inodePath the path to check. This must exist and be read-locked * @return true if the path is consistent, false otherwise */ private boolean checkConsistencyInternal(LockedInodePath inodePath) throws InvalidPathException, IOException { Inode inode; try { inode = inodePath.getInode(); } catch (FileDoesNotExistException e) { throw new RuntimeException(e); // already checked existence when creating the inodePath } MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); String ufsPath = resolution.getUri().getPath(); if (ufs == null) { return true; } if (!inode.isPersisted()) { return !ufs.exists(ufsPath); } UfsStatus ufsStatus; try { ufsStatus = ufs.getStatus(ufsPath); } catch (FileNotFoundException e) { return !inode.isPersisted(); } // TODO(calvin): Evaluate which other metadata fields should be validated. if (inode.isDirectory()) { return ufsStatus.isDirectory(); } else { String ufsFingerprint = Fingerprint.create(ufs.getUnderFSType(), ufsStatus).serialize(); return ufsStatus.isFile() && (ufsFingerprint.equals(inode.asFile().getUfsFingerprint())); } } } @Override public void completeFile(AlluxioURI path, CompleteFileContext context) throws BlockInfoException, FileDoesNotExistException, InvalidPathException, InvalidFileSizeException, FileAlreadyCompletedException, AccessControlException, UnavailableException { Metrics.COMPLETE_FILE_OPS.inc(); // No need to syncMetadata before complete. try (RpcContext rpcContext = createRpcContext(context); LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.WRITE_INODE); FileSystemMasterAuditContext auditContext = createAuditContext("completeFile", path, null, inodePath.getInodeOrNull())) { try { mPermissionChecker.checkPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } // Even readonly mount points should be able to complete a file, for UFS reads in CACHE mode. completeFileInternal(rpcContext, inodePath, context); // Schedule async persistence if requested. if (context.getOptions().hasAsyncPersistOptions()) { scheduleAsyncPersistenceInternal(inodePath, ScheduleAsyncPersistenceContext .create(context.getOptions().getAsyncPersistOptionsBuilder()), rpcContext); } auditContext.setSucceeded(true); } } /** * Completes a file. After a file is completed, it cannot be written to. * * @param rpcContext the rpc context * @param inodePath the {@link LockedInodePath} to complete * @param context the method context */ void completeFileInternal(RpcContext rpcContext, LockedInodePath inodePath, CompleteFileContext context) throws InvalidPathException, FileDoesNotExistException, BlockInfoException, FileAlreadyCompletedException, InvalidFileSizeException, UnavailableException { Inode inode = inodePath.getInode(); if (!inode.isFile()) { throw new FileDoesNotExistException( ExceptionMessage.PATH_MUST_BE_FILE.getMessage(inodePath.getUri())); } InodeFile fileInode = inode.asFile(); List<Long> blockIdList = fileInode.getBlockIds(); List<BlockInfo> blockInfoList = mBlockMaster.getBlockInfoList(blockIdList); if (!fileInode.isPersisted() && blockInfoList.size() != blockIdList.size()) { throw new BlockInfoException("Cannot complete a file without all the blocks committed"); } // Iterate over all file blocks committed to Alluxio, computing the length and verify that all // the blocks (except the last one) is the same size as the file block size. long inAlluxioLength = 0; long fileBlockSize = fileInode.getBlockSizeBytes(); for (int i = 0; i < blockInfoList.size(); i++) { BlockInfo blockInfo = blockInfoList.get(i); inAlluxioLength += blockInfo.getLength(); if (i < blockInfoList.size() - 1 && blockInfo.getLength() != fileBlockSize) { throw new BlockInfoException( "Block index " + i + " has a block size smaller than the file block size (" + fileInode .getBlockSizeBytes() + ")"); } } // If the file is persisted, its length is determined by UFS. Otherwise, its length is // determined by its size in Alluxio. long length = fileInode.isPersisted() ? context.getOptions().getUfsLength() : inAlluxioLength; String ufsFingerprint = Constants.INVALID_UFS_FINGERPRINT; if (fileInode.isPersisted()) { UfsStatus ufsStatus = context.getUfsStatus(); // Retrieve the UFS fingerprint for this file. MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); AlluxioURI resolvedUri = resolution.getUri(); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); if (ufsStatus == null) { ufsFingerprint = ufs.getFingerprint(resolvedUri.toString()); } else { ufsFingerprint = Fingerprint.create(ufs.getUnderFSType(), ufsStatus).serialize(); } } } completeFileInternal(rpcContext, inodePath, length, context.getOperationTimeMs(), ufsFingerprint); } /** * @param rpcContext the rpc context * @param inodePath the {@link LockedInodePath} to complete * @param length the length to use * @param opTimeMs the operation time (in milliseconds) * @param ufsFingerprint the ufs fingerprint */ private void completeFileInternal(RpcContext rpcContext, LockedInodePath inodePath, long length, long opTimeMs, String ufsFingerprint) throws FileDoesNotExistException, InvalidPathException, InvalidFileSizeException, FileAlreadyCompletedException, UnavailableException { Preconditions.checkState(inodePath.getLockPattern().isWrite()); InodeFile inode = inodePath.getInodeFile(); if (inode.isCompleted() && inode.getLength() != Constants.UNKNOWN_SIZE) { throw new FileAlreadyCompletedException("File " + getName() + " has already been completed."); } if (length < 0 && length != Constants.UNKNOWN_SIZE) { throw new InvalidFileSizeException( "File " + inode.getName() + " cannot have negative length: " + length); } Builder entry = UpdateInodeFileEntry.newBuilder() .setId(inode.getId()) .setPath(inodePath.getUri().getPath()) .setCompleted(true) .setLength(length); if (length == Constants.UNKNOWN_SIZE) { // TODO(gpang): allow unknown files to be multiple blocks. // If the length of the file is unknown, only allow 1 block to the file. length = inode.getBlockSizeBytes(); } int sequenceNumber = 0; long remainingBytes = length; while (remainingBytes > 0) { entry.addSetBlocks(BlockId.createBlockId(inode.getBlockContainerId(), sequenceNumber)); remainingBytes -= Math.min(remainingBytes, inode.getBlockSizeBytes()); sequenceNumber++; } if (inode.isPersisted()) { // Commit all the file blocks (without locations) so the metadata for the block exists. commitBlockInfosForFile(entry.getSetBlocksList(), length, inode.getBlockSizeBytes()); // The path exists in UFS, so it is no longer absent mUfsAbsentPathCache.processExisting(inodePath.getUri()); } // We could introduce a concept of composite entries, so that these two entries could // be applied in a single call to applyAndJournal. mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder() .setId(inode.getId()) .setUfsFingerprint(ufsFingerprint) .setLastModificationTimeMs(opTimeMs) .setLastAccessTimeMs(opTimeMs) .setOverwriteModificationTime(true) .build()); mInodeTree.updateInodeFile(rpcContext, entry.build()); Metrics.FILES_COMPLETED.inc(); } /** * Commits blocks to BlockMaster for given block list. * * @param blockIds the list of block ids * @param fileLength length of the file in bytes * @param blockSize the block size in bytes */ private void commitBlockInfosForFile(List<Long> blockIds, long fileLength, long blockSize) throws UnavailableException { long currLength = fileLength; for (long blockId : blockIds) { long currentBlockSize = Math.min(currLength, blockSize); mBlockMaster.commitBlockInUFS(blockId, currentBlockSize); currLength -= currentBlockSize; } } @Override public FileInfo createFile(AlluxioURI path, CreateFileContext context) throws AccessControlException, InvalidPathException, FileAlreadyExistsException, BlockInfoException, IOException, FileDoesNotExistException { Metrics.CREATE_FILES_OPS.inc(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("createFile", path, null, null)) { syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), DescendantType.ONE, auditContext, (inodePath) -> context.getOptions().getRecursive() ? inodePath.getLastExistingInode() : inodePath.getParentInodeOrNull(), (inodePath, permChecker) -> permChecker .checkParentPermission(Mode.Bits.WRITE, inodePath)); LockingScheme lockingScheme = createLockingScheme(path, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { auditContext.setSrcInode(inodePath.getParentInodeOrNull()); if (context.getOptions().getRecursive()) { auditContext.setSrcInode(inodePath.getLastExistingInode()); } try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } mMountTable.checkUnderWritableMountPoint(path); if (context.isPersisted()) { // Check if ufs is writable checkUfsMode(path, OperationType.WRITE); } createFileInternal(rpcContext, inodePath, context); auditContext.setSrcInode(inodePath.getInode()).setSucceeded(true); return getFileInfoInternal(inodePath); } } } /** * @param rpcContext the rpc context * @param inodePath the path to be created * @param context the method context * @return the list of created inodes */ List<Inode> createFileInternal(RpcContext rpcContext, LockedInodePath inodePath, CreateFileContext context) throws InvalidPathException, FileAlreadyExistsException, BlockInfoException, IOException, FileDoesNotExistException { if (mWhitelist.inList(inodePath.getUri().toString())) { context.setCacheable(true); } // If the create succeeded, the list of created inodes will not be empty. List<Inode> created = mInodeTree.createPath(rpcContext, inodePath, context); if (context.isPersisted()) { // The path exists in UFS, so it is no longer absent. The ancestors exist in UFS, but the // actual file does not exist in UFS yet. mUfsAbsentPathCache.processExisting(inodePath.getUri().getParent()); } else { MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); Metrics.getUfsCounter(mMountTable.getMountInfo(resolution.getMountId()) .getUfsUri().toString(), Metrics.UFSOps.CREATE_FILE).inc(); } Metrics.FILES_CREATED.inc(); return created; } @Override public long getNewBlockIdForFile(AlluxioURI path) throws FileDoesNotExistException, InvalidPathException, AccessControlException, UnavailableException { Metrics.GET_NEW_BLOCK_OPS.inc(); try (RpcContext rpcContext = createRpcContext(); LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.WRITE_INODE); FileSystemMasterAuditContext auditContext = createAuditContext("getNewBlockIdForFile", path, null, inodePath.getInodeOrNull())) { try { mPermissionChecker.checkPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } Metrics.NEW_BLOCKS_GOT.inc(); long blockId = mInodeTree.newBlock(rpcContext, NewBlockEntry.newBuilder() .setId(inodePath.getInode().getId()) .build()); auditContext.setSucceeded(true); return blockId; } } @Override public Map<String, MountPointInfo> getMountPointInfoSummary() { SortedMap<String, MountPointInfo> mountPoints = new TreeMap<>(); for (Map.Entry<String, MountInfo> mountPoint : mMountTable.getMountTable().entrySet()) { mountPoints.put(mountPoint.getKey(), getDisplayMountPointInfo(mountPoint.getValue())); } return mountPoints; } @Override public MountPointInfo getDisplayMountPointInfo(AlluxioURI path) throws InvalidPathException { if (!mMountTable.isMountPoint(path)) { throw new InvalidPathException( ExceptionMessage.PATH_MUST_BE_MOUNT_POINT.getMessage(path)); } return getDisplayMountPointInfo(mMountTable.getMountTable().get(path.toString())); } /** * Gets the mount point information for display from a mount information. * * @param mountInfo the mount information to transform * @return the mount point information */ private MountPointInfo getDisplayMountPointInfo(MountInfo mountInfo) { MountPointInfo info = mountInfo.toDisplayMountPointInfo(); try (CloseableResource<UnderFileSystem> ufsResource = mUfsManager.get(mountInfo.getMountId()).acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); info.setUfsType(ufs.getUnderFSType()); try { info.setUfsCapacityBytes( ufs.getSpace(info.getUfsUri(), UnderFileSystem.SpaceType.SPACE_TOTAL)); } catch (IOException e) { LOG.warn("Cannot get total capacity of {}", info.getUfsUri(), e); } try { info.setUfsUsedBytes( ufs.getSpace(info.getUfsUri(), UnderFileSystem.SpaceType.SPACE_USED)); } catch (IOException e) { LOG.warn("Cannot get used capacity of {}", info.getUfsUri(), e); } } catch (UnavailableException | NotFoundException e) { // We should never reach here LOG.error("No UFS cached for {}", info, e); } return info; } @Override public long getInodeCount() { return mInodeTree.getInodeCount(); } @Override public int getNumberOfPinnedFiles() { return mInodeTree.getPinnedSize(); } @Override public void delete(AlluxioURI path, DeleteContext context) throws IOException, FileDoesNotExistException, DirectoryNotEmptyException, InvalidPathException, AccessControlException { Metrics.DELETE_PATHS_OPS.inc(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("delete", path, null, null)) { syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), context.getOptions().getRecursive() ? DescendantType.ALL : DescendantType.ONE, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath) ); LockingScheme lockingScheme = createLockingScheme(path, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); try (LockedInodePath inodePath = mInodeTree .lockInodePath(lockingScheme)) { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); if (context.getOptions().getRecursive()) { List<String> failedChildren = new ArrayList<>(); try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath childPath : descendants) { try { mPermissionChecker.checkPermission(Mode.Bits.WRITE, childPath); if (mMountTable.isMountPoint(childPath.getUri())) { mMountTable.checkUnderWritableMountPoint(childPath.getUri()); } } catch (AccessControlException e) { failedChildren.add(e.getMessage()); } } if (failedChildren.size() > 0) { throw new AccessControlException(ExceptionMessage.DELETE_FAILED_DIR_CHILDREN .getMessage(path, StringUtils.join(failedChildren, ","))); } } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } } mMountTable.checkUnderWritableMountPoint(path); if (!inodePath.fullPathExists()) { throw new FileDoesNotExistException(ExceptionMessage.PATH_DOES_NOT_EXIST .getMessage(path)); } deleteInternal(rpcContext, inodePath, context); auditContext.setSucceeded(true); } } } /** * Implements file deletion. * <p> * This method does not delete blocks. Instead, it returns deleted inodes so that their blocks can * be deleted after the inode deletion journal entry has been written. We cannot delete blocks * earlier because the inode deletion may fail, leaving us with inode containing deleted blocks. * * @param rpcContext the rpc context * @param inodePath the file {@link LockedInodePath} * @param deleteContext the method optitions */ @VisibleForTesting public void deleteInternal(RpcContext rpcContext, LockedInodePath inodePath, DeleteContext deleteContext) throws FileDoesNotExistException, IOException, DirectoryNotEmptyException, InvalidPathException { Preconditions.checkState(inodePath.getLockPattern() == LockPattern.WRITE_EDGE); // TODO(jiri): A crash after any UFS object is deleted and before the delete operation is // journaled will result in an inconsistency between Alluxio and UFS. if (!inodePath.fullPathExists()) { return; } long opTimeMs = System.currentTimeMillis(); Inode inode = inodePath.getInode(); if (inode == null) { return; } boolean recursive = deleteContext.getOptions().getRecursive(); if (inode.isDirectory() && !recursive && mInodeStore.hasChildren(inode.asDirectory())) { // inode is nonempty, and we don't want to delete a nonempty directory unless recursive is // true throw new DirectoryNotEmptyException(ExceptionMessage.DELETE_NONEMPTY_DIRECTORY_NONRECURSIVE, inode.getName()); } if (mInodeTree.isRootId(inode.getId())) { // The root cannot be deleted. throw new InvalidPathException(ExceptionMessage.DELETE_ROOT_DIRECTORY.getMessage()); } // Inodes for which deletion will be attempted List<Pair<AlluxioURI, LockedInodePath>> inodesToDelete = new ArrayList<>(); // Add root of sub-tree to delete inodesToDelete.add(new Pair<>(inodePath.getUri(), inodePath)); try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath childPath : descendants) { inodesToDelete.add(new Pair<>(mInodeTree.getPath(childPath.getInode()), childPath)); } // Prepare to delete persisted inodes UfsDeleter ufsDeleter = NoopUfsDeleter.INSTANCE; if (!deleteContext.getOptions().getAlluxioOnly()) { ufsDeleter = new SafeUfsDeleter(mMountTable, mInodeStore, inodesToDelete, deleteContext.getOptions().build()); } // Inodes to delete from tree after attempting to delete from UFS List<Pair<AlluxioURI, LockedInodePath>> revisedInodesToDelete = new ArrayList<>(); // Inodes that are not safe for recursive deletes Set<Long> unsafeInodes = new HashSet<>(); // Alluxio URIs (and the reason for failure) which could not be deleted List<Pair<String, String>> failedUris = new ArrayList<>(); // We go through each inode, removing it from its parent set and from mDelInodes. If it's a // file, we deal with the checkpoints and blocks as well. for (int i = inodesToDelete.size() - 1; i >= 0; i--) { rpcContext.throwIfCancelled(); Pair<AlluxioURI, LockedInodePath> inodePairToDelete = inodesToDelete.get(i); AlluxioURI alluxioUriToDelete = inodePairToDelete.getFirst(); Inode inodeToDelete = inodePairToDelete.getSecond().getInode(); String failureReason = null; if (unsafeInodes.contains(inodeToDelete.getId())) { failureReason = ExceptionMessage.DELETE_FAILED_DIR_NONEMPTY.getMessage(); } else if (inodeToDelete.isPersisted()) { // If this is a mount point, we have deleted all the children and can unmount it // TODO(calvin): Add tests (ALLUXIO-1831) if (mMountTable.isMountPoint(alluxioUriToDelete)) { mMountTable.delete(rpcContext, alluxioUriToDelete, true); } else { if (!deleteContext.getOptions().getAlluxioOnly()) { try { checkUfsMode(alluxioUriToDelete, OperationType.WRITE); // Attempt to delete node if all children were deleted successfully ufsDeleter.delete(alluxioUriToDelete, inodeToDelete); } catch (AccessControlException e) { // In case ufs is not writable, we will still attempt to delete other entries // if any as they may be from a different mount point LOG.warn(e.getMessage()); failureReason = e.getMessage(); } catch (IOException e) { LOG.warn(e.getMessage()); failureReason = e.getMessage(); } } } } if (failureReason == null) { if (inodeToDelete.isFile()) { long fileId = inodeToDelete.getId(); // Remove the file from the set of files to persist. mPersistRequests.remove(fileId); // Cancel any ongoing jobs. PersistJob job = mPersistJobs.get(fileId); if (job != null) { job.setCancelState(PersistJob.CancelState.TO_BE_CANCELED); } } revisedInodesToDelete.add(new Pair<>(alluxioUriToDelete, inodePairToDelete.getSecond())); } else { unsafeInodes.add(inodeToDelete.getId()); // Propagate 'unsafe-ness' to parent as one of its descendants can't be deleted unsafeInodes.add(inodeToDelete.getParentId()); failedUris.add(new Pair<>(alluxioUriToDelete.toString(), failureReason)); } } if (mSyncManager.isSyncPoint(inodePath.getUri())) { mSyncManager.stopSyncAndJournal(RpcContext.NOOP, inodePath.getUri()); } // Delete Inodes for (Pair<AlluxioURI, LockedInodePath> delInodePair : revisedInodesToDelete) { LockedInodePath tempInodePath = delInodePair.getSecond(); MountTable.Resolution resolution = mMountTable.resolve(tempInodePath.getUri()); mInodeTree.deleteInode(rpcContext, tempInodePath, opTimeMs); if (deleteContext.getOptions().getAlluxioOnly()) { Metrics.getUfsCounter(mMountTable.getMountInfo(resolution.getMountId()) .getUfsUri().toString(), Metrics.UFSOps.DELETE_FILE).inc(); } } if (!failedUris.isEmpty()) { Collection<String> messages = failedUris.stream() .map(pair -> String.format("%s (%s)", pair.getFirst(), pair.getSecond())) .collect(Collectors.toList()); throw new FailedPreconditionException( ExceptionMessage.DELETE_FAILED_UFS.getMessage(StringUtils.join(messages, ", "))); } } Metrics.PATHS_DELETED.inc(inodesToDelete.size()); } @Override public List<FileBlockInfo> getFileBlockInfoList(AlluxioURI path) throws FileDoesNotExistException, InvalidPathException, AccessControlException, UnavailableException { Metrics.GET_FILE_BLOCK_INFO_OPS.inc(); try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.READ); FileSystemMasterAuditContext auditContext = createAuditContext("getFileBlockInfoList", path, null, inodePath.getInodeOrNull())) { try { mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } List<FileBlockInfo> ret = getFileBlockInfoListInternal(inodePath); Metrics.FILE_BLOCK_INFOS_GOT.inc(); auditContext.setSucceeded(true); return ret; } } /** * @param inodePath the {@link LockedInodePath} to get the info for * @return a list of {@link FileBlockInfo} for all the blocks of the given inode */ private List<FileBlockInfo> getFileBlockInfoListInternal(LockedInodePath inodePath) throws InvalidPathException, FileDoesNotExistException, UnavailableException { InodeFile file = inodePath.getInodeFile(); List<BlockInfo> blockInfoList = mBlockMaster.getBlockInfoList(file.getBlockIds()); List<FileBlockInfo> ret = new ArrayList<>(); for (BlockInfo blockInfo : blockInfoList) { ret.add(generateFileBlockInfo(inodePath, blockInfo)); } return ret; } /** * Generates a {@link FileBlockInfo} object from internal metadata. This adds file information to * the block, such as the file offset, and additional UFS locations for the block. * * @param inodePath the file the block is a part of * @param blockInfo the {@link BlockInfo} to generate the {@link FileBlockInfo} from * @return a new {@link FileBlockInfo} for the block */ private FileBlockInfo generateFileBlockInfo(LockedInodePath inodePath, BlockInfo blockInfo) throws FileDoesNotExistException { InodeFile file = inodePath.getInodeFile(); FileBlockInfo fileBlockInfo = new FileBlockInfo(); fileBlockInfo.setBlockInfo(blockInfo); fileBlockInfo.setUfsLocations(new ArrayList<>()); // The sequence number part of the block id is the block index. long offset = file.getBlockSizeBytes() * BlockId.getSequenceNumber(blockInfo.getBlockId()); fileBlockInfo.setOffset(offset); if (fileBlockInfo.getBlockInfo().getLocations().isEmpty() && file.isPersisted()) { // No alluxio locations, but there is a checkpoint in the under storage system. Add the // locations from the under storage system. long blockId = fileBlockInfo.getBlockInfo().getBlockId(); List<String> locations = mUfsBlockLocationCache.get(blockId, inodePath.getUri(), fileBlockInfo.getOffset()); if (locations != null) { fileBlockInfo.setUfsLocations(locations); } } return fileBlockInfo; } /** * Returns whether the inodeFile is fully in Alluxio or not. The file is fully in Alluxio only if * all the blocks of the file are in Alluxio, in other words, the in-Alluxio percentage is 100. * * @return true if the file is fully in Alluxio, false otherwise */ private boolean isFullyInAlluxio(InodeFile inode) throws UnavailableException { return getInAlluxioPercentage(inode) == 100; } /** * Returns whether the inodeFile is fully in memory or not. The file is fully in memory only if * all the blocks of the file are in memory, in other words, the in-memory percentage is 100. * * @return true if the file is fully in Alluxio, false otherwise */ private boolean isFullyInMemory(InodeFile inode) throws UnavailableException { return getInMemoryPercentage(inode) == 100; } @Override public List<AlluxioURI> getInAlluxioFiles() throws UnavailableException { List<AlluxioURI> files = new ArrayList<>(); LockedInodePath rootPath; try { rootPath = mInodeTree.lockFullInodePath(new AlluxioURI(AlluxioURI.SEPARATOR), LockPattern.READ); } catch (FileDoesNotExistException | InvalidPathException e) { // Root should always exist. throw new RuntimeException(e); } try (LockedInodePath inodePath = rootPath) { getInAlluxioFilesInternal(inodePath, files); } return files; } @Override public List<AlluxioURI> getInMemoryFiles() throws UnavailableException { List<AlluxioURI> files = new ArrayList<>(); LockedInodePath rootPath; try { rootPath = mInodeTree.lockFullInodePath(new AlluxioURI(AlluxioURI.SEPARATOR), LockPattern.READ); } catch (FileDoesNotExistException | InvalidPathException e) { // Root should always exist. throw new RuntimeException(e); } try (LockedInodePath inodePath = rootPath) { getInMemoryFilesInternal(inodePath, files); } return files; } /** * Adds in-Alluxio files to the array list passed in. This method assumes the inode passed in is * already read locked. * * @param inodePath the inode path to search * @param files the list to accumulate the results in */ private void getInAlluxioFilesInternal(LockedInodePath inodePath, List<AlluxioURI> files) throws UnavailableException { Inode inode = inodePath.getInodeOrNull(); if (inode == null) { return; } if (inode.isFile()) { if (isFullyInAlluxio(inode.asFile())) { files.add(inodePath.getUri()); } } else { // This inode is a directory. for (Inode child : mInodeStore.getChildren(inode.asDirectory())) { try (LockedInodePath childPath = inodePath.lockChild(child, LockPattern.READ)) { getInAlluxioFilesInternal(childPath, files); } catch (InvalidPathException e) { // Inode is no longer a child, continue. continue; } } } } /** * Adds in-memory files to the array list passed in. This method assumes the inode passed in is * already read locked. * * @param inodePath the inode path to search * @param files the list to accumulate the results in */ private void getInMemoryFilesInternal(LockedInodePath inodePath, List<AlluxioURI> files) throws UnavailableException { Inode inode = inodePath.getInodeOrNull(); if (inode == null) { return; } if (inode.isFile()) { if (isFullyInMemory(inode.asFile())) { files.add(inodePath.getUri()); } } else { // This inode is a directory. for (Inode child : mInodeStore.getChildren(inode.asDirectory())) { try (LockedInodePath childPath = inodePath.lockChild(child, LockPattern.READ)) { getInMemoryFilesInternal(childPath, files); } catch (InvalidPathException e) { // Inode is no longer a child, continue. continue; } } } } /** * Gets the in-memory percentage of an Inode. For a file that has all blocks in Alluxio, it * returns 100; for a file that has no block in memory, it returns 0. Returns 0 for a directory. * * @param inode the inode * @return the in memory percentage */ private int getInMemoryPercentage(Inode inode) throws UnavailableException { if (!inode.isFile()) { return 0; } InodeFile inodeFile = inode.asFile(); long length = inodeFile.getLength(); if (length == 0) { return 100; } long inMemoryLength = 0; for (BlockInfo info : mBlockMaster.getBlockInfoList(inodeFile.getBlockIds())) { if (isInTopStorageTier(info)) { inMemoryLength += info.getLength(); } } return (int) (inMemoryLength * 100 / length); } /** * Gets the in-Alluxio percentage of an Inode. For a file that has all blocks in Alluxio, it * returns 100; for a file that has no block in Alluxio, it returns 0. Returns 0 for a directory. * * @param inode the inode * @return the in alluxio percentage */ private int getInAlluxioPercentage(Inode inode) throws UnavailableException { if (!inode.isFile()) { return 0; } InodeFile inodeFile = inode.asFile(); long length = inodeFile.getLength(); if (length == 0) { return 100; } long inAlluxioLength = 0; for (BlockInfo info : mBlockMaster.getBlockInfoList(inodeFile.getBlockIds())) { if (!info.getLocations().isEmpty()) { inAlluxioLength += info.getLength(); } } return (int) (inAlluxioLength * 100 / length); } /** * @return true if the given block is in the top storage level in some worker, false otherwise */ private boolean isInTopStorageTier(BlockInfo blockInfo) { for (BlockLocation location : blockInfo.getLocations()) { if (mBlockMaster.getGlobalStorageTierAssoc().getOrdinal(location.getTierAlias()) == 0) { return true; } } return false; } @Override public long createDirectory(AlluxioURI path, CreateDirectoryContext context) throws InvalidPathException, FileAlreadyExistsException, IOException, AccessControlException, FileDoesNotExistException { Metrics.CREATE_DIRECTORIES_OPS.inc(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("mkdir", path, null, null)) { syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), DescendantType.ONE, auditContext, inodePath -> context.getOptions().getRecursive() ? inodePath.getLastExistingInode() : inodePath.getParentInodeOrNull(), (inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath) ); LockingScheme lockingScheme = createLockingScheme(path, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { auditContext.setSrcInode(inodePath.getParentInodeOrNull()); if (context.getOptions().getRecursive()) { auditContext.setSrcInode(inodePath.getLastExistingInode()); } try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } mMountTable.checkUnderWritableMountPoint(path); if (context.isPersisted()) { checkUfsMode(path, OperationType.WRITE); } createDirectoryInternal(rpcContext, inodePath, context); auditContext.setSrcInode(inodePath.getInode()).setSucceeded(true); return inodePath.getInode().getId(); } } } /** * Implementation of directory creation for a given path. * * @param rpcContext the rpc context * @param inodePath the path of the directory * @param context method context * @return a list of created inodes */ List<Inode> createDirectoryInternal(RpcContext rpcContext, LockedInodePath inodePath, CreateDirectoryContext context) throws InvalidPathException, FileAlreadyExistsException, IOException, FileDoesNotExistException { Preconditions.checkState(inodePath.getLockPattern() == LockPattern.WRITE_EDGE); try { List<Inode> createResult = mInodeTree.createPath(rpcContext, inodePath, context); InodeDirectory inodeDirectory = inodePath.getInode().asDirectory(); String ufsFingerprint = Constants.INVALID_UFS_FINGERPRINT; if (inodeDirectory.isPersisted()) { UfsStatus ufsStatus = context.getUfsStatus(); // Retrieve the UFS fingerprint for this file. MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); AlluxioURI resolvedUri = resolution.getUri(); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); if (ufsStatus == null) { ufsFingerprint = ufs.getFingerprint(resolvedUri.toString()); } else { ufsFingerprint = Fingerprint.create(ufs.getUnderFSType(), ufsStatus).serialize(); } } } mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder() .setId(inodeDirectory.getId()) .setUfsFingerprint(ufsFingerprint) .build()); if (context.isPersisted()) { // The path exists in UFS, so it is no longer absent. mUfsAbsentPathCache.processExisting(inodePath.getUri()); } Metrics.DIRECTORIES_CREATED.inc(); return createResult; } catch (BlockInfoException e) { // Since we are creating a directory, the block size is ignored, no such exception should // happen. throw new RuntimeException(e); } } @Override public void rename(AlluxioURI srcPath, AlluxioURI dstPath, RenameContext context) throws FileAlreadyExistsException, FileDoesNotExistException, InvalidPathException, IOException, AccessControlException { Metrics.RENAME_PATH_OPS.inc(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("rename", srcPath, dstPath, null)) { syncMetadata(rpcContext, srcPath, context.getOptions().getCommonOptions(), DescendantType.ONE, auditContext, LockedInodePath::getParentInodeOrNull, (inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath) ); syncMetadata(rpcContext, dstPath, context.getOptions().getCommonOptions(), DescendantType.ONE, auditContext, LockedInodePath::getParentInodeOrNull, (inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath) ); LockingScheme srcLockingScheme = createLockingScheme(srcPath, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); LockingScheme dstLockingScheme = createLockingScheme(dstPath, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); try (InodePathPair inodePathPair = mInodeTree .lockInodePathPair(srcLockingScheme.getPath(), srcLockingScheme.getPattern(), dstLockingScheme.getPath(), dstLockingScheme.getPattern())) { LockedInodePath srcInodePath = inodePathPair.getFirst(); LockedInodePath dstInodePath = inodePathPair.getSecond(); auditContext.setSrcInode(srcInodePath.getParentInodeOrNull()); try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, srcInodePath); mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, dstInodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } mMountTable.checkUnderWritableMountPoint(srcPath); mMountTable.checkUnderWritableMountPoint(dstPath); renameInternal(rpcContext, srcInodePath, dstInodePath, context); auditContext.setSrcInode(srcInodePath.getInode()).setSucceeded(true); LOG.debug("Renamed {} to {}", srcPath, dstPath); } } } private boolean shouldPersistPath(String path) { for (String pattern : mPersistBlacklist) { if (path.contains(pattern)) { LOG.debug("Not persisting path {} because it is in {}: {}", path, PropertyKey.Name.MASTER_PERSISTENCE_BLACKLIST, mPersistBlacklist); return false; } } return true; } /** * Renames a file to a destination. * * @param rpcContext the rpc context * @param srcInodePath the source path to rename * @param dstInodePath the destination path to rename the file to * @param context method options */ private void renameInternal(RpcContext rpcContext, LockedInodePath srcInodePath, LockedInodePath dstInodePath, RenameContext context) throws InvalidPathException, FileDoesNotExistException, FileAlreadyExistsException, IOException, AccessControlException { if (!srcInodePath.fullPathExists()) { throw new FileDoesNotExistException( ExceptionMessage.PATH_DOES_NOT_EXIST.getMessage(srcInodePath.getUri())); } Inode srcInode = srcInodePath.getInode(); // Renaming path to itself is a no-op. if (srcInodePath.getUri().equals(dstInodePath.getUri())) { return; } // Renaming the root is not allowed. if (srcInodePath.getUri().isRoot()) { throw new InvalidPathException(ExceptionMessage.ROOT_CANNOT_BE_RENAMED.getMessage()); } if (dstInodePath.getUri().isRoot()) { throw new InvalidPathException(ExceptionMessage.RENAME_CANNOT_BE_TO_ROOT.getMessage()); } // Renaming across mount points is not allowed. String srcMount = mMountTable.getMountPoint(srcInodePath.getUri()); String dstMount = mMountTable.getMountPoint(dstInodePath.getUri()); if ((srcMount == null && dstMount != null) || (srcMount != null && dstMount == null) || ( srcMount != null && dstMount != null && !srcMount.equals(dstMount))) { throw new InvalidPathException(ExceptionMessage.RENAME_CANNOT_BE_ACROSS_MOUNTS .getMessage(srcInodePath.getUri(), dstInodePath.getUri())); } // Renaming onto a mount point is not allowed. if (mMountTable.isMountPoint(dstInodePath.getUri())) { throw new InvalidPathException( ExceptionMessage.RENAME_CANNOT_BE_ONTO_MOUNT_POINT.getMessage(dstInodePath.getUri())); } // Renaming a path to one of its subpaths is not allowed. Check for that, by making sure // srcComponents isn't a prefix of dstComponents. if (PathUtils.hasPrefix(dstInodePath.getUri().getPath(), srcInodePath.getUri().getPath())) { throw new InvalidPathException(ExceptionMessage.RENAME_CANNOT_BE_TO_SUBDIRECTORY .getMessage(srcInodePath.getUri(), dstInodePath.getUri())); } // Get the inodes of the src and dst parents. Inode srcParentInode = srcInodePath.getParentInodeDirectory(); if (!srcParentInode.isDirectory()) { throw new InvalidPathException( ExceptionMessage.PATH_MUST_HAVE_VALID_PARENT.getMessage(srcInodePath.getUri())); } Inode dstParentInode = dstInodePath.getParentInodeDirectory(); if (!dstParentInode.isDirectory()) { throw new InvalidPathException( ExceptionMessage.PATH_MUST_HAVE_VALID_PARENT.getMessage(dstInodePath.getUri())); } // Make sure destination path does not exist if (dstInodePath.fullPathExists()) { throw new FileAlreadyExistsException(String .format("Cannot rename because destination already exists. src: %s dst: %s", srcInodePath.getUri(), dstInodePath.getUri())); } // Now we remove srcInode from its parent and insert it into dstPath's parent renameInternal(rpcContext, srcInodePath, dstInodePath, false, context); // Check options and determine if we should schedule async persist. This is helpful for compute // frameworks that use rename as a commit operation. if (context.getPersist() && srcInode.isFile() && !srcInode.isPersisted() && shouldPersistPath(dstInodePath.toString())) { LOG.debug("Schedule Async Persist on rename for File {}", srcInodePath); mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder() .setId(srcInode.getId()) .setPersistenceState(PersistenceState.TO_BE_PERSISTED.name()) .build()); long shouldPersistTime = srcInode.asFile().getShouldPersistTime(); long persistenceWaitTime = shouldPersistTime == Constants.NO_AUTO_PERSIST ? 0 : getPersistenceWaitTime(shouldPersistTime); mPersistRequests.put(srcInode.getId(), new alluxio.time.ExponentialTimer( ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS), ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS), persistenceWaitTime, ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS))); } // If a directory is being renamed with persist on rename, attempt to persist children if (srcInode.isDirectory() && context.getPersist() && shouldPersistPath(dstInodePath.toString())) { LOG.debug("Schedule Async Persist on rename for Dir: {}", dstInodePath); try (LockedInodePathList descendants = mInodeTree.getDescendants(srcInodePath)) { for (LockedInodePath childPath : descendants) { Inode childInode = childPath.getInode(); // TODO(apc999): Resolve the child path legitimately if (childInode.isFile() && !childInode.isPersisted() && shouldPersistPath( childPath.toString().substring(srcInodePath.toString().length()))) { LOG.debug("Schedule Async Persist on rename for Child File: {}", childPath); mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder() .setId(childInode.getId()) .setPersistenceState(PersistenceState.TO_BE_PERSISTED.name()) .build()); long shouldPersistTime = childInode.asFile().getShouldPersistTime(); long persistenceWaitTime = shouldPersistTime == Constants.NO_AUTO_PERSIST ? 0 : getPersistenceWaitTime(shouldPersistTime); mPersistRequests.put(childInode.getId(), new alluxio.time.ExponentialTimer( ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS), ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS), persistenceWaitTime, ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS))); } } } } } /** * Implements renaming. * * @param rpcContext the rpc context * @param srcInodePath the path of the rename source * @param dstInodePath the path to the rename destination * @param replayed whether the operation is a result of replaying the journal * @param context method options */ private void renameInternal(RpcContext rpcContext, LockedInodePath srcInodePath, LockedInodePath dstInodePath, boolean replayed, RenameContext context) throws FileDoesNotExistException, InvalidPathException, IOException, AccessControlException { // Rename logic: // 1. Change the source inode name to the destination name. // 2. Insert the source inode into the destination parent. // 3. Do UFS operations if necessary. // 4. Remove the source inode (reverting the name) from the source parent. // 5. Set the last modification times for both source and destination parent inodes. Inode srcInode = srcInodePath.getInode(); AlluxioURI srcPath = srcInodePath.getUri(); AlluxioURI dstPath = dstInodePath.getUri(); InodeDirectory srcParentInode = srcInodePath.getParentInodeDirectory(); InodeDirectory dstParentInode = dstInodePath.getParentInodeDirectory(); String srcName = srcPath.getName(); String dstName = dstPath.getName(); LOG.debug("Renaming {} to {}", srcPath, dstPath); if (dstInodePath.fullPathExists()) { throw new InvalidPathException("Destination path: " + dstPath + " already exists."); } mInodeTree.rename(rpcContext, RenameEntry.newBuilder() .setId(srcInode.getId()) .setOpTimeMs(context.getOperationTimeMs()) .setNewParentId(dstParentInode.getId()) .setNewName(dstName) .setPath(srcPath.getPath()) .setNewPath(dstPath.getPath()) .build()); // 3. Do UFS operations if necessary. // If the source file is persisted, rename it in the UFS. try { if (!replayed && srcInode.isPersisted()) { // Check if ufs is writable checkUfsMode(srcPath, OperationType.WRITE); checkUfsMode(dstPath, OperationType.WRITE); MountTable.Resolution resolution = mMountTable.resolve(srcPath); // Persist ancestor directories from top to the bottom. We cannot use recursive create // parents here because the permission for the ancestors can be different. // inodes from the same mount point as the dst Stack<InodeDirectory> sameMountDirs = new Stack<>(); List<Inode> dstInodeList = dstInodePath.getInodeList(); for (int i = dstInodeList.size() - 1; i >= 0; i--) { // Since dstInodePath is guaranteed not to be a full path, all inodes in the incomplete // path are guaranteed to be a directory. InodeDirectory dir = dstInodeList.get(i).asDirectory(); sameMountDirs.push(dir); if (dir.isMountPoint()) { break; } } while (!sameMountDirs.empty()) { InodeDirectory dir = sameMountDirs.pop(); if (!dir.isPersisted()) { mInodeTree.syncPersistExistingDirectory(rpcContext, dir); } } String ufsSrcPath = resolution.getUri().toString(); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); String ufsDstUri = mMountTable.resolve(dstPath).getUri().toString(); boolean success; if (srcInode.isFile()) { success = ufs.renameRenamableFile(ufsSrcPath, ufsDstUri); } else { success = ufs.renameRenamableDirectory(ufsSrcPath, ufsDstUri); } if (!success) { throw new IOException( ExceptionMessage.FAILED_UFS_RENAME.getMessage(ufsSrcPath, ufsDstUri)); } } // The destination was persisted in ufs. mUfsAbsentPathCache.processExisting(dstPath); } } catch (Throwable t) { // On failure, revert changes and throw exception. mInodeTree.rename(rpcContext, RenameEntry.newBuilder() .setId(srcInode.getId()) .setOpTimeMs(context.getOperationTimeMs()) .setNewName(srcName) .setNewParentId(srcParentInode.getId()) .setPath(dstPath.getPath()) .setNewPath(srcPath.getPath()) .build()); throw t; } Metrics.PATHS_RENAMED.inc(); } /** * Propagates the persisted status to all parents of the given inode in the same mount partition. * * @param journalContext the journal context * @param inodePath the inode to start the propagation at * @return list of inodes which were marked as persisted */ private void propagatePersistedInternal(Supplier<JournalContext> journalContext, LockedInodePath inodePath) throws FileDoesNotExistException { Inode inode = inodePath.getInode(); List<Inode> inodes = inodePath.getInodeList(); // Traverse the inodes from target inode to the root. Collections.reverse(inodes); // Skip the first, to not examine the target inode itself. inodes = inodes.subList(1, inodes.size()); List<Inode> persistedInodes = new ArrayList<>(); for (Inode ancestor : inodes) { // the path is already locked. AlluxioURI path = mInodeTree.getPath(ancestor); if (mMountTable.isMountPoint(path)) { // Stop propagating the persisted status at mount points. break; } if (ancestor.isPersisted()) { // Stop if a persisted directory is encountered. break; } mInodeTree.updateInode(journalContext, UpdateInodeEntry.newBuilder() .setId(ancestor.getId()) .setPersistenceState(PersistenceState.PERSISTED.name()) .build()); } } @Override public void free(AlluxioURI path, FreeContext context) throws FileDoesNotExistException, InvalidPathException, AccessControlException, UnexpectedAlluxioException, IOException { Metrics.FREE_FILE_OPS.inc(); // No need to syncMetadata before free. try (RpcContext rpcContext = createRpcContext(context); LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.WRITE_INODE); FileSystemMasterAuditContext auditContext = createAuditContext("free", path, null, inodePath.getInodeOrNull())) { try { mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } freeInternal(rpcContext, inodePath, context); auditContext.setSucceeded(true); } } /** * Implements free operation. * * @param rpcContext the rpc context * @param inodePath inode of the path to free * @param context context to free method */ private void freeInternal(RpcContext rpcContext, LockedInodePath inodePath, FreeContext context) throws FileDoesNotExistException, UnexpectedAlluxioException, IOException, InvalidPathException, AccessControlException { Inode inode = inodePath.getInode(); if (inode.isDirectory() && !context.getOptions().getRecursive() && mInodeStore.hasChildren(inode.asDirectory())) { // inode is nonempty, and we don't free a nonempty directory unless recursive is true throw new UnexpectedAlluxioException( ExceptionMessage.CANNOT_FREE_NON_EMPTY_DIR.getMessage(mInodeTree.getPath(inode))); } long opTimeMs = System.currentTimeMillis(); List<Inode> freeInodes = new ArrayList<>(); freeInodes.add(inode); try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath descedant : Iterables.concat(descendants, Collections.singleton(inodePath))) { Inode freeInode = descedant.getInodeOrNull(); if (freeInode != null && freeInode.isFile()) { if (freeInode.getPersistenceState() != PersistenceState.PERSISTED) { throw new UnexpectedAlluxioException(ExceptionMessage.CANNOT_FREE_NON_PERSISTED_FILE .getMessage(mInodeTree.getPath(freeInode))); } if (freeInode.isPinned()) { if (!context.getOptions().getForced()) { throw new UnexpectedAlluxioException(ExceptionMessage.CANNOT_FREE_PINNED_FILE .getMessage(mInodeTree.getPath(freeInode))); } SetAttributeContext setAttributeContext = SetAttributeContext .mergeFrom(SetAttributePOptions.newBuilder().setRecursive(false).setPinned(false)); setAttributeSingleFile(rpcContext, descedant, true, opTimeMs, setAttributeContext); } // Remove corresponding blocks from workers. mBlockMaster.removeBlocks(freeInode.asFile().getBlockIds(), false /* delete */); } } } Metrics.FILES_FREED.inc(freeInodes.size()); } @Override public AlluxioURI getPath(long fileId) throws FileDoesNotExistException { try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(fileId, LockPattern.READ)) { // the path is already locked. return mInodeTree.getPath(inodePath.getInode()); } } @Override public Set<Long> getPinIdList() { // return both the explicitly pinned inodes and not persisted inodes which should not be evicted return Sets.union(mInodeTree.getPinIdSet(), mInodeTree.getToBePersistedIds()); } @Override public String getUfsAddress() { return ServerConfiguration.get(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS); } @Override public UfsInfo getUfsInfo(long mountId) { MountInfo info = mMountTable.getMountInfo(mountId); if (info == null) { return new UfsInfo(); } MountPOptions options = info.getOptions(); return new UfsInfo().setUri(info.getUfsUri()) .setMountOptions(MountContext .mergeFrom(MountPOptions.newBuilder().putAllProperties(options.getPropertiesMap()) .setReadOnly(options.getReadOnly()).setShared(options.getShared())) .getOptions().build()); } @Override public List<String> getWhiteList() { return mWhitelist.getList(); } @Override public List<Long> getLostFiles() { Set<Long> lostFiles = new HashSet<>(); for (long blockId : mBlockMaster.getLostBlocks()) { // the file id is the container id of the block id long containerId = BlockId.getContainerId(blockId); long fileId = IdUtils.createFileId(containerId); lostFiles.add(fileId); } return new ArrayList<>(lostFiles); } /** * Loads metadata for the path if it is (non-existing || load direct children is set). * * See {@link #shouldLoadMetadataIfNotExists(LockedInodePath, LoadMetadataContext)}. * * @param rpcContext the rpc context * @param path the path to load metadata for * @param context the {@link LoadMetadataContext} * @param isGetFileInfo whether this is loading for a {@link #getFileInfo} call */ private void loadMetadataIfNotExist(RpcContext rpcContext, AlluxioURI path, LoadMetadataContext context, boolean isGetFileInfo) throws InvalidPathException, AccessControlException { DescendantType syncDescendantType = GrpcUtils.fromProto(context.getOptions().getLoadDescendantType()); FileSystemMasterCommonPOptions commonOptions = context.getOptions().getCommonOptions(); // load metadata only and force sync InodeSyncStream sync = new InodeSyncStream(new LockingScheme(path, LockPattern.READ, false), this, rpcContext, syncDescendantType, commonOptions, isGetFileInfo, true, true); if (!sync.sync()) { LOG.debug("Failed to load metadata for path from UFS: {}", path); } } boolean shouldLoadMetadataIfNotExists(LockedInodePath inodePath, LoadMetadataContext context) { boolean inodeExists = inodePath.fullPathExists(); boolean loadDirectChildren = false; if (inodeExists) { try { Inode inode = inodePath.getInode(); loadDirectChildren = inode.isDirectory() && (context.getOptions().getLoadDescendantType() != LoadDescendantPType.NONE); } catch (FileDoesNotExistException e) { // This should never happen. throw new RuntimeException(e); } } return !inodeExists || loadDirectChildren; } private void prepareForMount(AlluxioURI ufsPath, long mountId, MountContext context) throws IOException { MountPOptions.Builder mountOption = context.getOptions(); try (CloseableResource<UnderFileSystem> ufsResource = mUfsManager.get(mountId).acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); // Check that the ufsPath exists and is a directory if (!ufs.isDirectory(ufsPath.toString())) { throw new IOException( ExceptionMessage.UFS_PATH_DOES_NOT_EXIST.getMessage(ufsPath.toString())); } if (UnderFileSystemUtils.isWeb(ufs)) { mountOption.setReadOnly(true); } } } private void updateMountInternal(Supplier<JournalContext> journalContext, LockedInodePath inodePath, AlluxioURI ufsPath, MountInfo mountInfo, MountContext context) throws FileAlreadyExistsException, InvalidPathException, IOException { long newMountId = IdUtils.createMountId(); // lock sync manager to ensure no sync point is added before the mount point is removed try (LockResource r = new LockResource(mSyncManager.getLock())) { List<AlluxioURI> syncPoints = mSyncManager.getFilterList(mountInfo.getMountId()); if (syncPoints != null && !syncPoints.isEmpty()) { throw new InvalidArgumentException("Updating a mount point with ActiveSync enabled is not" + " supported. Please remove all sync'ed paths from the mount point and try again."); } AlluxioURI alluxioPath = inodePath.getUri(); // validate new UFS client before updating the mount table mUfsManager.addMount(newMountId, new AlluxioURI(ufsPath.toString()), UnderFileSystemConfiguration.defaults(ServerConfiguration.global()) .setReadOnly(context.getOptions().getReadOnly()) .setShared(context.getOptions().getShared()) .createMountSpecificConf(context.getOptions().getPropertiesMap())); prepareForMount(ufsPath, newMountId, context); // old ufsClient is removed as part of the mount table update process mMountTable.update(journalContext, alluxioPath, newMountId, context.getOptions().build()); } catch (FileAlreadyExistsException | InvalidPathException | IOException e) { // revert everything mUfsManager.removeMount(newMountId); throw e; } } @Override public void updateMount(AlluxioURI alluxioPath, MountContext context) throws FileAlreadyExistsException, FileDoesNotExistException, InvalidPathException, IOException, AccessControlException { LockingScheme lockingScheme = createLockingScheme(alluxioPath, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); try (RpcContext rpcContext = createRpcContext(context); LockedInodePath inodePath = mInodeTree .lockInodePath(lockingScheme.getPath(), lockingScheme.getPattern()); FileSystemMasterAuditContext auditContext = createAuditContext( "updateMount", alluxioPath, null, inodePath.getParentInodeOrNull())) { try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } MountInfo mountInfo = mMountTable.getMountTable().get(alluxioPath.getPath()); if (mountInfo == null) { throw new InvalidPathException("Failed to update mount properties for " + inodePath.getUri() + ". Please ensure the path is an existing mount point."); } updateMountInternal(rpcContext, inodePath, mountInfo.getUfsUri(), mountInfo, context); auditContext.setSucceeded(true); } } @Override public void mount(AlluxioURI alluxioPath, AlluxioURI ufsPath, MountContext context) throws FileAlreadyExistsException, FileDoesNotExistException, InvalidPathException, IOException, AccessControlException { Metrics.MOUNT_OPS.inc(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("mount", alluxioPath, null, null)) { ufsPath = new AlluxioURI(PathUtils.normalizePath(ufsPath.toString(), AlluxioURI.SEPARATOR)); syncMetadata(rpcContext, alluxioPath, context.getOptions().getCommonOptions(), DescendantType.ONE, auditContext, LockedInodePath::getParentInodeOrNull, (inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath) ); LockingScheme lockingScheme = createLockingScheme(alluxioPath, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { auditContext.setSrcInode(inodePath.getParentInodeOrNull()); try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } mMountTable.checkUnderWritableMountPoint(alluxioPath); mountInternal(rpcContext, inodePath, ufsPath, context); auditContext.setSucceeded(true); Metrics.PATHS_MOUNTED.inc(); } } } /** * Mounts a UFS path onto an Alluxio path. * * @param rpcContext the rpc context * @param inodePath the Alluxio path to mount to * @param ufsPath the UFS path to mount * @param context the mount context */ private void mountInternal(RpcContext rpcContext, LockedInodePath inodePath, AlluxioURI ufsPath, MountContext context) throws InvalidPathException, FileAlreadyExistsException, FileDoesNotExistException, IOException, AccessControlException { // Check that the Alluxio Path does not exist if (inodePath.fullPathExists()) { // TODO(calvin): Add a test to validate this (ALLUXIO-1831) throw new InvalidPathException( ExceptionMessage.MOUNT_POINT_ALREADY_EXISTS.getMessage(inodePath.getUri())); } long mountId = IdUtils.createMountId(); mountInternal(rpcContext, inodePath, ufsPath, mountId, context); boolean loadMetadataSucceeded = false; try { // This will create the directory at alluxioPath InodeSyncStream.loadDirectoryMetadata(rpcContext, inodePath, LoadMetadataContext.mergeFrom( LoadMetadataPOptions.newBuilder().setCreateAncestors(false)), mMountTable, this); loadMetadataSucceeded = true; } finally { if (!loadMetadataSucceeded) { mMountTable.delete(rpcContext, inodePath.getUri(), true); } } } /** * Updates the mount table with the specified mount point. The mount options may be updated during * this method. * * @param journalContext the journal context * @param inodePath the Alluxio mount point * @param ufsPath the UFS endpoint to mount * @param mountId the mount id * @param context the mount context (may be updated) */ private void mountInternal(Supplier<JournalContext> journalContext, LockedInodePath inodePath, AlluxioURI ufsPath, long mountId, MountContext context) throws FileAlreadyExistsException, InvalidPathException, IOException { AlluxioURI alluxioPath = inodePath.getUri(); // Adding the mount point will not create the UFS instance and thus not connect to UFS mUfsManager.addMount(mountId, new AlluxioURI(ufsPath.toString()), UnderFileSystemConfiguration.defaults(ServerConfiguration.global()) .setReadOnly(context.getOptions().getReadOnly()) .setShared(context.getOptions().getShared()) .createMountSpecificConf(context.getOptions().getPropertiesMap())); try { prepareForMount(ufsPath, mountId, context); // Check that the alluxioPath we're creating doesn't shadow a path in the parent UFS MountTable.Resolution resolution = mMountTable.resolve(alluxioPath); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { String ufsResolvedPath = resolution.getUri().getPath(); if (ufsResource.get().exists(ufsResolvedPath)) { throw new IOException( ExceptionMessage.MOUNT_PATH_SHADOWS_PARENT_UFS.getMessage(alluxioPath, ufsResolvedPath)); } } // Add the mount point. This will only succeed if we are not mounting a prefix of an existing // mount. mMountTable.add(journalContext, alluxioPath, ufsPath, mountId, context.getOptions().build()); } catch (Exception e) { mUfsManager.removeMount(mountId); throw e; } } @Override public void unmount(AlluxioURI alluxioPath) throws FileDoesNotExistException, InvalidPathException, IOException, AccessControlException { Metrics.UNMOUNT_OPS.inc(); // Unmount should lock the parent to remove the child inode. try (RpcContext rpcContext = createRpcContext(); LockedInodePath inodePath = mInodeTree .lockInodePath(alluxioPath, LockPattern.WRITE_EDGE); FileSystemMasterAuditContext auditContext = createAuditContext("unmount", alluxioPath, null, inodePath.getInodeOrNull())) { try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } unmountInternal(rpcContext, inodePath); auditContext.setSucceeded(true); Metrics.PATHS_UNMOUNTED.inc(); } } /** * Unmounts a UFS path previously mounted onto an Alluxio path. * * This method does not delete blocks. Instead, it adds the to the passed-in block deletion * context so that the blocks can be deleted after the inode deletion journal entry has been * written. We cannot delete blocks earlier because the inode deletion may fail, leaving us with * inode containing deleted blocks. * * @param rpcContext the rpc context * @param inodePath the Alluxio path to unmount, must be a mount point */ private void unmountInternal(RpcContext rpcContext, LockedInodePath inodePath) throws InvalidPathException, FileDoesNotExistException, IOException { if (!inodePath.fullPathExists()) { throw new FileDoesNotExistException( "Failed to unmount: Path " + inodePath.getUri() + " does not exist"); } MountInfo mountInfo = mMountTable.getMountTable().get(inodePath.getUri().getPath()); if (mountInfo == null) { throw new InvalidPathException("Failed to unmount " + inodePath.getUri() + ". Please ensure" + " the path is an existing mount point."); } mSyncManager.stopSyncForMount(mountInfo.getMountId()); if (!mMountTable.delete(rpcContext, inodePath.getUri(), true)) { throw new InvalidPathException("Failed to unmount " + inodePath.getUri() + ". Please ensure" + " the path is an existing mount point and not root."); } try { // Use the internal delete API, setting {@code alluxioOnly} to true to prevent the delete // operations from being persisted in the UFS. deleteInternal(rpcContext, inodePath, DeleteContext .mergeFrom(DeletePOptions.newBuilder().setRecursive(true).setAlluxioOnly(true))); } catch (DirectoryNotEmptyException e) { throw new RuntimeException(String.format( "We should never see this exception because %s should never be thrown when recursive " + "is true.", e.getClass())); } } @Override public void setAcl(AlluxioURI path, SetAclAction action, List<AclEntry> entries, SetAclContext context) throws FileDoesNotExistException, AccessControlException, InvalidPathException, IOException { Metrics.SET_ACL_OPS.inc(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("setAcl", path, null, null)) { syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), context.getOptions().getRecursive() ? DescendantType.ALL : DescendantType.NONE, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkSetAttributePermission(inodePath, false, true, false) ); LockingScheme lockingScheme = createLockingScheme(path, context.getOptions().getCommonOptions(), LockPattern.WRITE_INODE); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { mPermissionChecker.checkSetAttributePermission(inodePath, false, true, false); if (context.getOptions().getRecursive()) { try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath child : descendants) { mPermissionChecker.checkSetAttributePermission(child, false, true, false); } } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } } if (!inodePath.fullPathExists()) { throw new FileDoesNotExistException(ExceptionMessage .PATH_DOES_NOT_EXIST.getMessage(path)); } setAclInternal(rpcContext, action, inodePath, entries, context); auditContext.setSucceeded(true); } } } private void setAclInternal(RpcContext rpcContext, SetAclAction action, LockedInodePath inodePath, List<AclEntry> entries, SetAclContext context) throws IOException, FileDoesNotExistException { Preconditions.checkState(inodePath.getLockPattern().isWrite()); long opTimeMs = System.currentTimeMillis(); // Check inputs for setAcl switch (action) { case REPLACE: Set<AclEntryType> types = entries.stream().map(AclEntry::getType).collect(Collectors.toSet()); Set<AclEntryType> requiredTypes = Sets.newHashSet(AclEntryType.OWNING_USER, AclEntryType.OWNING_GROUP, AclEntryType.OTHER); requiredTypes.removeAll(types); // make sure the required entries are present if (!requiredTypes.isEmpty()) { throw new IOException(ExceptionMessage.ACL_BASE_REQUIRED.getMessage( String.join(", ", requiredTypes.stream().map(AclEntryType::toString).collect( Collectors.toList())))); } break; case MODIFY: // fall through case REMOVE: if (entries.isEmpty()) { // Nothing to do. return; } break; case REMOVE_ALL: break; case REMOVE_DEFAULT: break; default: } setAclRecursive(rpcContext, action, inodePath, entries, false, opTimeMs, context); } private void setUfsAcl(LockedInodePath inodePath) throws InvalidPathException, AccessControlException { Inode inode = inodePath.getInodeOrNull(); checkUfsMode(inodePath.getUri(), OperationType.WRITE); MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); String ufsUri = resolution.getUri().toString(); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); if (ufs.isObjectStorage()) { LOG.warn("SetACL is not supported to object storage UFS via Alluxio. " + "UFS: " + ufsUri + ". This has no effect on the underlying object."); } else { try { List<AclEntry> entries = new ArrayList<>(inode.getACL().getEntries()); if (inode.isDirectory()) { entries.addAll(inode.asDirectory().getDefaultACL().getEntries()); } ufs.setAclEntries(ufsUri, entries); } catch (IOException e) { throw new AccessControlException("Could not setAcl for UFS file: " + ufsUri); } } } } private void setAclSingleInode(RpcContext rpcContext, SetAclAction action, LockedInodePath inodePath, List<AclEntry> entries, boolean replay, long opTimeMs) throws IOException, FileDoesNotExistException { Preconditions.checkState(inodePath.getLockPattern().isWrite()); Inode inode = inodePath.getInode(); // Check that we are not removing an extended mask. if (action == SetAclAction.REMOVE) { for (AclEntry entry : entries) { if ((entry.isDefault() && inode.getDefaultACL().hasExtended()) || (!entry.isDefault() && inode.getACL().hasExtended())) { if (entry.getType() == AclEntryType.MASK) { throw new InvalidArgumentException( "Deleting the mask for an extended ACL is not allowed. entry: " + entry); } } } } // Check that we are not setting default ACL to a file if (inode.isFile()) { for (AclEntry entry : entries) { if (entry.isDefault()) { throw new UnsupportedOperationException("Can not set default ACL for a file"); } } } mInodeTree.setAcl(rpcContext, SetAclEntry.newBuilder() .setId(inode.getId()) .setOpTimeMs(opTimeMs) .setAction(ProtoUtils.toProto(action)) .addAllEntries(entries.stream().map(ProtoUtils::toProto).collect(Collectors.toList())) .build()); try { if (!replay && inode.isPersisted()) { setUfsAcl(inodePath); } } catch (InvalidPathException | AccessControlException e) { LOG.warn("Setting ufs ACL failed for path: {}", inodePath.getUri(), e); // TODO(david): revert the acl and default acl to the initial state if writing to ufs failed. } } private void setAclRecursive(RpcContext rpcContext, SetAclAction action, LockedInodePath inodePath, List<AclEntry> entries, boolean replay, long opTimeMs, SetAclContext context) throws IOException, FileDoesNotExistException { Preconditions.checkState(inodePath.getLockPattern().isWrite()); setAclSingleInode(rpcContext, action, inodePath, entries, replay, opTimeMs); if (context.getOptions().getRecursive()) { try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath childPath : descendants) { rpcContext.throwIfCancelled(); setAclSingleInode(rpcContext, action, childPath, entries, replay, opTimeMs); } } } } @Override public void setAttribute(AlluxioURI path, SetAttributeContext context) throws FileDoesNotExistException, AccessControlException, InvalidPathException, IOException { SetAttributePOptions.Builder options = context.getOptions(); Metrics.SET_ATTRIBUTE_OPS.inc(); // for chown boolean rootRequired = options.hasOwner(); // for chgrp, chmod boolean ownerRequired = (options.hasGroup()) || (options.hasMode()); // for other attributes boolean writeRequired = !rootRequired && !ownerRequired; if (options.hasOwner() && options.hasGroup()) { try { checkUserBelongsToGroup(options.getOwner(), options.getGroup()); } catch (IOException e) { throw new IOException(String.format("Could not update owner:group for %s to %s:%s. %s", path.toString(), options.getOwner(), options.getGroup(), e.toString()), e); } } String commandName; boolean checkWritableMountPoint = false; if (options.hasOwner()) { commandName = "chown"; checkWritableMountPoint = true; } else if (options.hasGroup()) { commandName = "chgrp"; checkWritableMountPoint = true; } else if (options.hasMode()) { commandName = "chmod"; checkWritableMountPoint = true; } else { commandName = "setAttribute"; } try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext(commandName, path, null, null)) { // Force recursive sync metadata if it is a pinning and unpinning operation boolean recursiveSync = options.hasPinned() || options.getRecursive(); syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), recursiveSync ? DescendantType.ALL : DescendantType.ONE, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkSetAttributePermission( inodePath, rootRequired, ownerRequired, writeRequired) ); LockingScheme lockingScheme = createLockingScheme(path, options.getCommonOptions(), LockPattern.WRITE_INODE); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { auditContext.setSrcInode(inodePath.getInodeOrNull()); if (checkWritableMountPoint) { mMountTable.checkUnderWritableMountPoint(path); } if (!inodePath.fullPathExists()) { throw new FileDoesNotExistException(ExceptionMessage .PATH_DOES_NOT_EXIST.getMessage(path)); } try { mPermissionChecker .checkSetAttributePermission(inodePath, rootRequired, ownerRequired, writeRequired); if (context.getOptions().getRecursive()) { try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath childPath : descendants) { mPermissionChecker .checkSetAttributePermission(childPath, rootRequired, ownerRequired, writeRequired); } } } } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } setAttributeInternal(rpcContext, inodePath, context); auditContext.setSucceeded(true); } } } /** * Checks whether the owner belongs to the group. * * @param owner the owner to check * @param group the group to check * @throws FailedPreconditionException if owner does not belong to group */ private void checkUserBelongsToGroup(String owner, String group) throws IOException { List<String> groups = CommonUtils.getGroups(owner, ServerConfiguration.global()); if (groups == null || !groups.contains(group)) { throw new FailedPreconditionException("Owner " + owner + " does not belong to the group " + group); } } /** * Sets the file attribute. * * @param rpcContext the rpc context * @param inodePath the {@link LockedInodePath} to set attribute for * @param context attributes to be set, see {@link SetAttributePOptions} */ private void setAttributeInternal(RpcContext rpcContext, LockedInodePath inodePath, SetAttributeContext context) throws InvalidPathException, FileDoesNotExistException, AccessControlException, IOException { Inode targetInode = inodePath.getInode(); long opTimeMs = System.currentTimeMillis(); if (context.getOptions().getRecursive() && targetInode.isDirectory()) { try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath childPath : descendants) { rpcContext.throwIfCancelled(); setAttributeSingleFile(rpcContext, childPath, true, opTimeMs, context); } } } setAttributeSingleFile(rpcContext, inodePath, true, opTimeMs, context); } @Override public void scheduleAsyncPersistence(AlluxioURI path, ScheduleAsyncPersistenceContext context) throws AlluxioException, UnavailableException { try (RpcContext rpcContext = createRpcContext(context); LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.WRITE_INODE)) { scheduleAsyncPersistenceInternal(inodePath, context, rpcContext); } } private void scheduleAsyncPersistenceInternal(LockedInodePath inodePath, ScheduleAsyncPersistenceContext context, RpcContext rpcContext) throws InvalidPathException, FileDoesNotExistException { InodeFile inode = inodePath.getInodeFile(); if (!inode.isCompleted()) { throw new InvalidPathException( "Cannot persist an incomplete Alluxio file: " + inodePath.getUri()); } if (shouldPersistPath(inodePath.toString())) { mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder().setId(inode.getId()) .setPersistenceState(PersistenceState.TO_BE_PERSISTED.name()).build()); mPersistRequests.put(inode.getId(), new alluxio.time.ExponentialTimer( ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS), ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS), context.getPersistenceWaitTime(), ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS))); } } /** * Actively sync metadata, based on a list of changed files. * * @param path the path to sync * @param changedFiles collection of files that are changed under the path to sync, if this is * null, force sync the entire directory * @param executorService executor to execute the parallel incremental sync */ public void activeSyncMetadata(AlluxioURI path, Collection<AlluxioURI> changedFiles, ExecutorService executorService) throws IOException { if (changedFiles == null) { LOG.info("Start an active full sync of {}", path.toString()); } else { LOG.info("Start an active incremental sync of {} files", changedFiles.size()); } long start = System.currentTimeMillis(); if (changedFiles != null && changedFiles.isEmpty()) { return; } try (RpcContext rpcContext = createRpcContext()) { if (changedFiles == null) { // full sync // Set sync interval to 0 to force a sync. FileSystemMasterCommonPOptions options = FileSystemMasterCommonPOptions.newBuilder().setSyncIntervalMs(0).build(); LockingScheme scheme = createSyncLockingScheme(path, options, false); InodeSyncStream sync = new InodeSyncStream(scheme, this, rpcContext, DescendantType.ALL, options, false, false, false); if (!sync.sync()) { LOG.debug("Active full sync on {} didn't sync any paths.", path); } long end = System.currentTimeMillis(); LOG.info("Ended an active full sync of {} in {}ms", path.toString(), end - start); return; } else { // incremental sync Set<Callable<Void>> callables = new HashSet<>(); for (AlluxioURI changedFile : changedFiles) { callables.add(() -> { // Set sync interval to 0 to force a sync. FileSystemMasterCommonPOptions options = FileSystemMasterCommonPOptions.newBuilder().setSyncIntervalMs(0).build(); LockingScheme scheme = createSyncLockingScheme(changedFile, options, false); InodeSyncStream sync = new InodeSyncStream(scheme, this, rpcContext, DescendantType.ONE, options, false, false, false); if (!sync.sync()) { // Use debug because this can be a noisy log LOG.debug("Incremental sync on {} didn't sync any paths.", path); } return null; }); } executorService.invokeAll(callables); } } catch (InterruptedException e) { LOG.warn("InterruptedException during active sync: {}", e.toString()); Thread.currentThread().interrupt(); return; } catch (InvalidPathException | AccessControlException e) { LogUtils.warnWithException(LOG, "Failed to active sync on path {}", path, e); } if (changedFiles != null) { long end = System.currentTimeMillis(); LOG.info("Ended an active incremental sync of {} files in {}ms", changedFiles.size(), end - start); } } @Override public boolean recordActiveSyncTxid(long txId, long mountId) { MountInfo mountInfo = mMountTable.getMountInfo(mountId); if (mountInfo == null) { return false; } AlluxioURI mountPath = mountInfo.getAlluxioUri(); try (RpcContext rpcContext = createRpcContext(); LockedInodePath inodePath = mInodeTree .lockFullInodePath(mountPath, LockPattern.READ)) { File.ActiveSyncTxIdEntry txIdEntry = File.ActiveSyncTxIdEntry.newBuilder().setTxId(txId).setMountId(mountId).build(); rpcContext.journal(JournalEntry.newBuilder().setActiveSyncTxId(txIdEntry).build()); } catch (UnavailableException | InvalidPathException | FileDoesNotExistException e) { LOG.warn("Exception when recording activesync txid, path {}, exception {}", mountPath, e); return false; } return true; } private boolean syncMetadata(RpcContext rpcContext, AlluxioURI path, FileSystemMasterCommonPOptions options, DescendantType syncDescendantType, @Nullable FileSystemMasterAuditContext auditContext, @Nullable Function<LockedInodePath, Inode> auditContextSrcInodeFunc, @Nullable PermissionCheckFunction permissionCheckOperation) throws AccessControlException, InvalidPathException { return syncMetadata(rpcContext, path, options, syncDescendantType, auditContext, auditContextSrcInodeFunc, permissionCheckOperation, false); } /** * Sync metadata for an Alluxio path with the UFS. * * @param rpcContext the current RPC context * @param path the path to sync * @param options options included with the RPC * @param syncDescendantType how deep the sync should be performed * @param auditContextSrcInodeFunc the src inode for the audit context, if null, no source inode * is set on the audit context * @param permissionCheckOperation a consumer that accepts a locked inode path and a * {@link PermissionChecker}. The consumer is expected to call one * of the permission checkers functions with the given inode path. * If null, no permission checking is performed * @param isGetFileInfo true if syncing for a getFileInfo operation * @return */ private boolean syncMetadata(RpcContext rpcContext, AlluxioURI path, FileSystemMasterCommonPOptions options, DescendantType syncDescendantType, @Nullable FileSystemMasterAuditContext auditContext, @Nullable Function<LockedInodePath, Inode> auditContextSrcInodeFunc, @Nullable PermissionCheckFunction permissionCheckOperation, boolean isGetFileInfo) throws AccessControlException, InvalidPathException { LockingScheme syncScheme = createSyncLockingScheme(path, options, isGetFileInfo); if (!syncScheme.shouldSync()) { return false; } InodeSyncStream sync = new InodeSyncStream(syncScheme, this, rpcContext, syncDescendantType, options, auditContext, auditContextSrcInodeFunc, permissionCheckOperation, isGetFileInfo, false, false); return sync.sync(); } @FunctionalInterface interface PermissionCheckFunction { /** * Performs this operation on the given arguments. * * @param l the first input argument * @param c the second input argument */ void accept(LockedInodePath l, PermissionChecker c) throws AccessControlException, InvalidPathException; } ReadOnlyInodeStore getInodeStore() { return mInodeStore; } InodeTree getInodeTree() { return mInodeTree; } InodeLockManager getInodeLockManager() { return mInodeLockManager; } MountTable getMountTable() { return mMountTable; } UfsSyncPathCache getSyncPathCache() { return mUfsSyncPathCache; } PermissionChecker getPermissionChecker() { return mPermissionChecker; } @Override public FileSystemCommand workerHeartbeat(long workerId, List<Long> persistedFiles, WorkerHeartbeatContext context) throws IOException { List<String> persistedUfsFingerprints = context.getOptions().getPersistedFileFingerprintsList(); boolean hasPersistedFingerprints = persistedUfsFingerprints.size() == persistedFiles.size(); for (int i = 0; i < persistedFiles.size(); i++) { long fileId = persistedFiles.get(i); String ufsFingerprint = hasPersistedFingerprints ? persistedUfsFingerprints.get(i) : Constants.INVALID_UFS_FINGERPRINT; try { // Permission checking for each file is performed inside setAttribute setAttribute(getPath(fileId), SetAttributeContext .mergeFrom(SetAttributePOptions.newBuilder().setPersisted(true)) .setUfsFingerprint(ufsFingerprint)); } catch (FileDoesNotExistException | AccessControlException | InvalidPathException e) { LOG.error("Failed to set file {} as persisted, because {}", fileId, e); } } // TODO(zac) Clean up master and worker code since this is taken care of by job service now. // Worker should not persist any files. Instead, files are persisted through job service. List<PersistFile> filesToPersist = new ArrayList<>(); FileSystemCommandOptions commandOptions = new FileSystemCommandOptions(); commandOptions.setPersistOptions(new PersistCommandOptions(filesToPersist)); return new FileSystemCommand(CommandType.Persist, commandOptions); } /** * @param inodePath the {@link LockedInodePath} to use * @param updateUfs whether to update the UFS with the attribute change * @param opTimeMs the operation time (in milliseconds) * @param context the method context */ protected void setAttributeSingleFile(RpcContext rpcContext, LockedInodePath inodePath, boolean updateUfs, long opTimeMs, SetAttributeContext context) throws FileDoesNotExistException, InvalidPathException, AccessControlException { Inode inode = inodePath.getInode(); SetAttributePOptions.Builder protoOptions = context.getOptions(); if (protoOptions.hasPinned()) { mInodeTree.setPinned(rpcContext, inodePath, context.getOptions().getPinned(), context.getOptions().getPinnedMediaList(), opTimeMs); } UpdateInodeEntry.Builder entry = UpdateInodeEntry.newBuilder().setId(inode.getId()); if (protoOptions.hasReplicationMax() || protoOptions.hasReplicationMin()) { Integer replicationMax = protoOptions.hasReplicationMax() ? protoOptions.getReplicationMax() : null; Integer replicationMin = protoOptions.hasReplicationMin() ? protoOptions.getReplicationMin() : null; mInodeTree.setReplication(rpcContext, inodePath, replicationMax, replicationMin, opTimeMs); } // protoOptions may not have both fields set if (protoOptions.hasCommonOptions()) { FileSystemMasterCommonPOptions commonOpts = protoOptions.getCommonOptions(); TtlAction action = commonOpts.hasTtlAction() ? commonOpts.getTtlAction() : null; Long ttl = commonOpts.hasTtl() ? commonOpts.getTtl() : null; boolean modified = false; if (ttl != null && inode.getTtl() != ttl) { entry.setTtl(ttl); modified = true; } if (action != null && inode.getTtlAction() != action) { entry.setTtlAction(ProtobufUtils.toProtobuf(action)); modified = true; } if (modified) { entry.setLastModificationTimeMs(opTimeMs); } } if (protoOptions.hasPersisted()) { Preconditions.checkArgument(inode.isFile(), PreconditionMessage.PERSIST_ONLY_FOR_FILE); Preconditions.checkArgument(inode.asFile().isCompleted(), PreconditionMessage.FILE_TO_PERSIST_MUST_BE_COMPLETE); // TODO(manugoyal) figure out valid behavior in the un-persist case Preconditions .checkArgument(protoOptions.getPersisted(), PreconditionMessage.ERR_SET_STATE_UNPERSIST); if (!inode.asFile().isPersisted()) { entry.setPersistenceState(PersistenceState.PERSISTED.name()); entry.setLastModificationTimeMs(context.getOperationTimeMs()); propagatePersistedInternal(rpcContext, inodePath); Metrics.FILES_PERSISTED.inc(); } } boolean ownerGroupChanged = (protoOptions.hasOwner()) || (protoOptions.hasGroup()); boolean modeChanged = protoOptions.hasMode(); // If the file is persisted in UFS, also update corresponding owner/group/permission. if ((ownerGroupChanged || modeChanged) && updateUfs && inode.isPersisted()) { if ((inode instanceof InodeFile) && !inode.asFile().isCompleted()) { LOG.debug("Alluxio does not propagate chown/chgrp/chmod to UFS for incomplete files."); } else { checkUfsMode(inodePath.getUri(), OperationType.WRITE); MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); String ufsUri = resolution.getUri().toString(); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); if (ufs.isObjectStorage()) { LOG.debug("setOwner/setMode is not supported to object storage UFS via Alluxio. " + "UFS: " + ufsUri + ". This has no effect on the underlying object."); } else { String owner = null; String group = null; String mode = null; if (ownerGroupChanged) { try { owner = protoOptions.getOwner() != null ? protoOptions.getOwner() : inode.getOwner(); group = protoOptions.getGroup() != null ? protoOptions.getGroup() : inode.getGroup(); ufs.setOwner(ufsUri, owner, group); } catch (IOException e) { throw new AccessControlException("Could not setOwner for UFS file " + ufsUri + " . Aborting the setAttribute operation in Alluxio.", e); } } if (modeChanged) { try { mode = String.valueOf(protoOptions.getMode()); ufs.setMode(ufsUri, ModeUtils.protoToShort(protoOptions.getMode())); } catch (IOException e) { throw new AccessControlException("Could not setMode for UFS file " + ufsUri + " . Aborting the setAttribute operation in Alluxio.", e); } } // Retrieve the ufs fingerprint after the ufs changes. String existingFingerprint = inode.getUfsFingerprint(); if (!existingFingerprint.equals(Constants.INVALID_UFS_FINGERPRINT)) { // Update existing fingerprint, since contents did not change Fingerprint fp = Fingerprint.parse(existingFingerprint); fp.putTag(Fingerprint.Tag.OWNER, owner); fp.putTag(Fingerprint.Tag.GROUP, group); fp.putTag(Fingerprint.Tag.MODE, mode); context.setUfsFingerprint(fp.serialize()); } else { // Need to retrieve the fingerprint from ufs. context.setUfsFingerprint(ufs.getFingerprint(ufsUri)); } } } } } if (!context.getUfsFingerprint().equals(Constants.INVALID_UFS_FINGERPRINT)) { entry.setUfsFingerprint(context.getUfsFingerprint()); } // Only commit the set permission to inode after the propagation to UFS succeeded. if (protoOptions.hasOwner()) { entry.setOwner(protoOptions.getOwner()); } if (protoOptions.hasGroup()) { entry.setGroup(protoOptions.getGroup()); } if (modeChanged) { entry.setMode(ModeUtils.protoToShort(protoOptions.getMode())); } mInodeTree.updateInode(rpcContext, entry.build()); } @Override public List<SyncPointInfo> getSyncPathList() { return mSyncManager.getSyncPathList(); } @Override public void startSync(AlluxioURI syncPoint) throws IOException, InvalidPathException, AccessControlException, ConnectionFailedException { LockingScheme lockingScheme = new LockingScheme(syncPoint, LockPattern.WRITE_EDGE, true); try (RpcContext rpcContext = createRpcContext(); LockedInodePath inodePath = mInodeTree .lockInodePath(lockingScheme.getPath(), lockingScheme.getPattern()); FileSystemMasterAuditContext auditContext = createAuditContext("startSync", syncPoint, null, inodePath.getParentInodeOrNull())) { try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } mSyncManager.startSyncAndJournal(rpcContext, syncPoint); auditContext.setSucceeded(true); } } @Override public void stopSync(AlluxioURI syncPoint) throws IOException, InvalidPathException, AccessControlException { try (RpcContext rpcContext = createRpcContext()) { boolean isSuperUser = true; try { mPermissionChecker.checkSuperUser(); } catch (AccessControlException e) { isSuperUser = false; } if (isSuperUser) { // TODO(AM): Remove once we don't require a write lock on the sync point during a full sync // Stop sync w/o acquiring an inode lock to terminate an initial full scan (if running) mSyncManager.stopSyncAndJournal(rpcContext, syncPoint); } LockingScheme lockingScheme = new LockingScheme(syncPoint, LockPattern.READ, false); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme.getPath(), lockingScheme.getPattern()); FileSystemMasterAuditContext auditContext = createAuditContext("stopSync", syncPoint, null, inodePath.getParentInodeOrNull())) { try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } if (!isSuperUser) { // Stop sync here only if not terminated w/o holding the inode lock mSyncManager.stopSyncAndJournal(rpcContext, syncPoint); } auditContext.setSucceeded(true); } } } @Override public List<WorkerInfo> getWorkerInfoList() throws UnavailableException { return mBlockMaster.getWorkerInfoList(); } /** * @param fileId file ID * @param jobId persist job ID * @param persistenceWaitTime persistence initial wait time * @param uri Alluxio Uri of the file * @param tempUfsPath temp UFS path */ private void addPersistJob(long fileId, long jobId, long persistenceWaitTime, AlluxioURI uri, String tempUfsPath) { alluxio.time.ExponentialTimer timer = mPersistRequests.remove(fileId); if (timer == null) { timer = new alluxio.time.ExponentialTimer( ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS), ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS), persistenceWaitTime, ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS)); } mPersistJobs.put(fileId, new PersistJob(jobId, fileId, uri, tempUfsPath, timer)); } private long getPersistenceWaitTime(long shouldPersistTime) { long currentTime = System.currentTimeMillis(); if (shouldPersistTime >= currentTime) { return shouldPersistTime - currentTime; } else { return 0; } } /** * Periodically schedules jobs to persist files and updates metadata accordingly. */ @NotThreadSafe private final class PersistenceScheduler implements alluxio.heartbeat.HeartbeatExecutor { private static final long MAX_QUIET_PERIOD_SECONDS = 64; /** * Quiet period for job service flow control (in seconds). When job service refuses starting new * jobs, we use exponential backoff to alleviate the job service pressure. */ private long mQuietPeriodSeconds; /** * Creates a new instance of {@link PersistenceScheduler}. */ PersistenceScheduler() { mQuietPeriodSeconds = 0; } @Override public void close() {} // Nothing to clean up /** * Updates the file system metadata to reflect the fact that the persist file request expired. * * @param fileId the file ID */ private void handleExpired(long fileId) throws AlluxioException, UnavailableException { try (JournalContext journalContext = createJournalContext(); LockedInodePath inodePath = mInodeTree .lockFullInodePath(fileId, LockPattern.WRITE_INODE)) { InodeFile inode = inodePath.getInodeFile(); switch (inode.getPersistenceState()) { case LOST: // fall through case NOT_PERSISTED: // fall through case PERSISTED: LOG.warn("File {} (id={}) persistence state is {} and will not be changed.", inodePath.getUri(), fileId, inode.getPersistenceState()); return; case TO_BE_PERSISTED: mInodeTree.updateInode(journalContext, UpdateInodeEntry.newBuilder() .setId(inode.getId()) .setPersistenceState(PersistenceState.NOT_PERSISTED.name()) .build()); mInodeTree.updateInodeFile(journalContext, UpdateInodeFileEntry.newBuilder() .setId(inode.getId()) .setPersistJobId(Constants.PERSISTENCE_INVALID_JOB_ID) .setTempUfsPath(Constants.PERSISTENCE_INVALID_UFS_PATH) .build()); break; default: throw new IllegalStateException( "Unrecognized persistence state: " + inode.getPersistenceState()); } } } /** * Attempts to schedule a persist job and updates the file system metadata accordingly. * * @param fileId the file ID */ private void handleReady(long fileId) throws AlluxioException, IOException { alluxio.time.ExponentialTimer timer = mPersistRequests.get(fileId); // Lookup relevant file information. AlluxioURI uri; String tempUfsPath; try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(fileId, LockPattern.READ)) { InodeFile inode = inodePath.getInodeFile(); uri = inodePath.getUri(); switch (inode.getPersistenceState()) { case LOST: // fall through case NOT_PERSISTED: // fall through case PERSISTED: LOG.warn("File {} (id={}) persistence state is {} and will not be changed.", inodePath.getUri(), fileId, inode.getPersistenceState()); return; case TO_BE_PERSISTED: tempUfsPath = inodePath.getInodeFile().getTempUfsPath(); break; default: throw new IllegalStateException( "Unrecognized persistence state: " + inode.getPersistenceState()); } } MountTable.Resolution resolution = mMountTable.resolve(uri); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { // If previous persist job failed, clean up the temporary file. cleanup(ufsResource.get(), tempUfsPath); // Generate a temporary path to be used by the persist job. // If the persist destination is on object store, let persist job copy files to destination // directly if (ServerConfiguration.getBoolean(PropertyKey.MASTER_UNSAFE_DIRECT_PERSIST_OBJECT_ENABLED) && ufsResource.get().isObjectStorage()) { tempUfsPath = resolution.getUri().toString(); } else { tempUfsPath = PathUtils.temporaryFileName( System.currentTimeMillis(), resolution.getUri().toString()); } } PersistConfig config = new PersistConfig(uri.getPath(), resolution.getMountId(), false, tempUfsPath); // Schedule the persist job. long jobId; JobMasterClient client = mJobMasterClientPool.acquire(); try { jobId = client.run(config); } finally { mJobMasterClientPool.release(client); } mQuietPeriodSeconds /= 2; mPersistJobs.put(fileId, new PersistJob(jobId, fileId, uri, tempUfsPath, timer)); // Update the inode and journal the change. try (JournalContext journalContext = createJournalContext(); LockedInodePath inodePath = mInodeTree .lockFullInodePath(fileId, LockPattern.WRITE_INODE)) { InodeFile inode = inodePath.getInodeFile(); mInodeTree.updateInodeFile(journalContext, UpdateInodeFileEntry.newBuilder() .setId(inode.getId()) .setPersistJobId(jobId) .setTempUfsPath(tempUfsPath) .build()); } } /** * {@inheritDoc} * * The method iterates through the set of files to be persisted (identified by their ID) and * attempts to schedule a file persist job. Each iteration removes the file ID from the set * of files to be persisted unless the execution sets the {@code remove} flag to false. * * @throws InterruptedException if the thread is interrupted */ @Override public void heartbeat() throws InterruptedException { java.util.concurrent.TimeUnit.SECONDS.sleep(mQuietPeriodSeconds); // Process persist requests. for (long fileId : mPersistRequests.keySet()) { // Throw if interrupted. if (Thread.interrupted()) { throw new InterruptedException("PersistenceScheduler interrupted."); } boolean remove = true; alluxio.time.ExponentialTimer timer = mPersistRequests.get(fileId); if (timer == null) { // This could occur if a key is removed from mPersistRequests while we are iterating. continue; } alluxio.time.ExponentialTimer.Result timerResult = timer.tick(); if (timerResult == alluxio.time.ExponentialTimer.Result.NOT_READY) { // operation is not ready to be scheduled continue; } AlluxioURI uri = null; try { try (LockedInodePath inodePath = mInodeTree .lockFullInodePath(fileId, LockPattern.READ)) { uri = inodePath.getUri(); } try { checkUfsMode(uri, OperationType.WRITE); } catch (Exception e) { LOG.warn("Unable to schedule persist request for path {}: {}", uri, e.getMessage()); // Retry when ufs mode permits operation remove = false; continue; } switch (timerResult) { case EXPIRED: handleExpired(fileId); break; case READY: handleReady(fileId); break; default: throw new IllegalStateException("Unrecognized timer state: " + timerResult); } } catch (FileDoesNotExistException | InvalidPathException e) { LOG.warn("The file {} (id={}) to be persisted was not found : {}", uri, fileId, e.getMessage()); LOG.debug("Exception: ", e); } catch (UnavailableException e) { LOG.warn("Failed to persist file {}, will retry later: {}", uri, e.toString()); remove = false; } catch (ResourceExhaustedException e) { LOG.warn("The job service is busy, will retry later: {}", e.toString()); LOG.debug("Exception: ", e); mQuietPeriodSeconds = (mQuietPeriodSeconds == 0) ? 1 : Math.min(MAX_QUIET_PERIOD_SECONDS, mQuietPeriodSeconds * 2); remove = false; // End the method here until the next heartbeat. No more jobs should be scheduled during // the current heartbeat if the job master is at full capacity. return; } catch (Exception e) { LOG.warn("Unexpected exception encountered when scheduling the persist job for file {} " + "(id={}) : {}", uri, fileId, e.getMessage()); LOG.debug("Exception: ", e); } finally { if (remove) { mPersistRequests.remove(fileId); } } } } } /** * Periodically polls for the result of the jobs and updates metadata accordingly. */ @NotThreadSafe private final class PersistenceChecker implements alluxio.heartbeat.HeartbeatExecutor { /** * Creates a new instance of {@link PersistenceChecker}. */ PersistenceChecker() {} @Override public void close() {} // nothing to clean up /** * Updates the file system metadata to reflect the fact that the persist job succeeded. * * NOTE: It is the responsibility of the caller to update {@link #mPersistJobs}. * * @param job the successful job */ private void handleSuccess(PersistJob job) { long fileId = job.getFileId(); String tempUfsPath = job.getTempUfsPath(); List<Long> blockIds = new ArrayList<>(); UfsManager.UfsClient ufsClient = null; try (JournalContext journalContext = createJournalContext(); LockedInodePath inodePath = mInodeTree .lockFullInodePath(fileId, LockPattern.WRITE_INODE)) { InodeFile inode = inodePath.getInodeFile(); MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); ufsClient = mUfsManager.get(resolution.getMountId()); switch (inode.getPersistenceState()) { case LOST: // fall through case NOT_PERSISTED: // fall through case PERSISTED: LOG.warn("File {} (id={}) persistence state is {}. Successful persist has no effect.", job.getUri(), fileId, inode.getPersistenceState()); break; case TO_BE_PERSISTED: UpdateInodeEntry.Builder builder = UpdateInodeEntry.newBuilder(); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); String ufsPath = resolution.getUri().toString(); ufs.setOwner(tempUfsPath, inode.getOwner(), inode.getGroup()); ufs.setMode(tempUfsPath, inode.getMode()); if (!ufsPath.equals(tempUfsPath)) { // Make rename only when tempUfsPath is different from final ufsPath. Note that, // on object store, we take the optimization to skip the rename by having // tempUfsPath the same as final ufsPath. if (!ufs.renameRenamableFile(tempUfsPath, ufsPath)) { throw new IOException( String.format("Failed to rename %s to %s.", tempUfsPath, ufsPath)); } } builder.setUfsFingerprint(ufs.getFingerprint(ufsPath)); } mInodeTree.updateInodeFile(journalContext, UpdateInodeFileEntry.newBuilder() .setId(inode.getId()) .setPersistJobId(Constants.PERSISTENCE_INVALID_JOB_ID) .setTempUfsPath(Constants.PERSISTENCE_INVALID_UFS_PATH) .build()); mInodeTree.updateInode(journalContext, builder .setId(inode.getId()) .setPersistenceState(PersistenceState.PERSISTED.name()) .build()); propagatePersistedInternal(journalContext, inodePath); Metrics.FILES_PERSISTED.inc(); // Save state for possible cleanup blockIds.addAll(inode.getBlockIds()); break; default: throw new IllegalStateException( "Unrecognized persistence state: " + inode.getPersistenceState()); } } catch (FileDoesNotExistException | InvalidPathException e) { LOG.warn("The file {} (id={}) to be persisted was not found: {}", job.getUri(), fileId, e.getMessage()); LOG.debug("Exception: ", e); // Cleanup the temporary file. if (ufsClient != null) { try (CloseableResource<UnderFileSystem> ufsResource = ufsClient.acquireUfsResource()) { cleanup(ufsResource.get(), tempUfsPath); } } } catch (Exception e) { LOG.warn( "Unexpected exception encountered when trying to complete persistence of a file {} " + "(id={}) : {}", job.getUri(), fileId, e.getMessage()); LOG.debug("Exception: ", e); if (ufsClient != null) { try (CloseableResource<UnderFileSystem> ufsResource = ufsClient.acquireUfsResource()) { cleanup(ufsResource.get(), tempUfsPath); } } mPersistRequests.put(fileId, job.getTimer()); } // Cleanup possible staging UFS blocks files due to fast durable write fallback. // Note that this is best effort if (ufsClient != null) { for (long blockId : blockIds) { String ufsBlockPath = alluxio.worker.BlockUtils.getUfsBlockPath(ufsClient, blockId); try (CloseableResource<UnderFileSystem> ufsResource = ufsClient.acquireUfsResource()) { alluxio.util.UnderFileSystemUtils.deleteFileIfExists(ufsResource.get(), ufsBlockPath); } catch (Exception e) { LOG.warn("Failed to clean up staging UFS block file {}: {}", ufsBlockPath, e.toString()); } } } } @Override public void heartbeat() throws InterruptedException { boolean queueEmpty = mPersistCheckerPool.getQueue().isEmpty(); // Check the progress of persist jobs. for (long fileId : mPersistJobs.keySet()) { // Throw if interrupted. if (Thread.interrupted()) { throw new InterruptedException("PersistenceChecker interrupted."); } final PersistJob job = mPersistJobs.get(fileId); if (job == null) { // This could happen if a key is removed from mPersistJobs while we are iterating. continue; } // Cancel any jobs marked as canceled switch (job.getCancelState()) { case NOT_CANCELED: break; case TO_BE_CANCELED: // Send the message to cancel this job JobMasterClient client = mJobMasterClientPool.acquire(); try { client.cancel(job.getId()); job.setCancelState(PersistJob.CancelState.CANCELING); } catch (alluxio.exception.status.NotFoundException e) { LOG.warn("Persist job (id={}) for file {} (id={}) to cancel was not found: {}", job.getId(), job.getUri(), fileId, e.getMessage()); LOG.debug("Exception: ", e); mPersistJobs.remove(fileId); continue; } catch (Exception e) { LOG.warn("Unexpected exception encountered when cancelling a persist job (id={}) for " + "file {} (id={}) : {}", job.getId(), job.getUri(), fileId, e.getMessage()); LOG.debug("Exception: ", e); } finally { mJobMasterClientPool.release(client); } continue; case CANCELING: break; default: throw new IllegalStateException("Unrecognized cancel state: " + job.getCancelState()); } if (!queueEmpty) { // There are tasks waiting in the queue, so do not try to schedule anything continue; } long jobId = job.getId(); JobMasterClient client = mJobMasterClientPool.acquire(); try { JobInfo jobInfo = client.getJobStatus(jobId); switch (jobInfo.getStatus()) { case RUNNING: // fall through case CREATED: break; case FAILED: LOG.warn("The persist job (id={}) for file {} (id={}) failed: {}", jobId, job.getUri(), fileId, jobInfo.getErrorMessage()); mPersistJobs.remove(fileId); mPersistRequests.put(fileId, job.getTimer()); break; case CANCELED: mPersistJobs.remove(fileId); break; case COMPLETED: mPersistJobs.remove(fileId); mPersistCheckerPool.execute(() -> handleSuccess(job)); break; default: throw new IllegalStateException("Unrecognized job status: " + jobInfo.getStatus()); } } catch (Exception e) { LOG.warn("Exception encountered when trying to retrieve the status of a " + " persist job (id={}) for file {} (id={}): {}.", jobId, job.getUri(), fileId, e.getMessage()); LOG.debug("Exception: ", e); mPersistJobs.remove(fileId); mPersistRequests.put(fileId, job.getTimer()); } finally { mJobMasterClientPool.release(client); } } } } @NotThreadSafe private final class TimeSeriesRecorder implements alluxio.heartbeat.HeartbeatExecutor { @Override public void heartbeat() throws InterruptedException { // TODO(calvin): Provide a better way to keep track of metrics collected as time series MetricRegistry registry = MetricsSystem.METRIC_REGISTRY; SortedMap<String, Gauge> gauges = registry.getGauges(); // % Alluxio space used Long masterCapacityTotal = (Long) gauges .get(MetricKey.CLUSTER_CAPACITY_TOTAL.getName()).getValue(); Long masterCapacityUsed = (Long) gauges .get(MetricKey.CLUSTER_CAPACITY_USED.getName()).getValue(); int percentAlluxioSpaceUsed = (masterCapacityTotal > 0) ? (int) (100L * masterCapacityUsed / masterCapacityTotal) : 0; mTimeSeriesStore.record("% Alluxio Space Used", percentAlluxioSpaceUsed); // % UFS space used Long masterUnderfsCapacityTotal = (Long) gauges .get(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_TOTAL.getName()).getValue(); Long masterUnderfsCapacityUsed = (Long) gauges .get(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_USED.getName()).getValue(); int percentUfsSpaceUsed = (masterUnderfsCapacityTotal > 0) ? (int) (100L * masterUnderfsCapacityUsed / masterUnderfsCapacityTotal) : 0; mTimeSeriesStore.record("% UFS Space Used", percentUfsSpaceUsed); // Bytes read Long bytesReadLocalThroughput = (Long) gauges.get( MetricKey.CLUSTER_BYTES_READ_LOCAL_THROUGHPUT.getName()).getValue(); Long bytesReadDomainSocketThroughput = (Long) gauges .get(MetricKey.CLUSTER_BYTES_READ_DOMAIN_THROUGHPUT.getName()).getValue(); Long bytesReadRemoteThroughput = (Long) gauges .get(MetricKey.CLUSTER_BYTES_READ_ALLUXIO_THROUGHPUT.getName()).getValue(); Long bytesReadUfsThroughput = (Long) gauges .get(MetricKey.CLUSTER_BYTES_READ_UFS_THROUGHPUT.getName()).getValue(); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_READ_LOCAL_THROUGHPUT.getName(), bytesReadLocalThroughput); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_READ_DOMAIN_THROUGHPUT.getName(), bytesReadDomainSocketThroughput); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_READ_ALLUXIO_THROUGHPUT.getName(), bytesReadRemoteThroughput); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_READ_UFS_THROUGHPUT.getName(), bytesReadUfsThroughput); // Bytes written Long bytesWrittenLocalThroughput = (Long) gauges .get(MetricKey.CLUSTER_BYTES_WRITTEN_LOCAL_THROUGHPUT.getName()) .getValue(); Long bytesWrittenAlluxioThroughput = (Long) gauges .get(MetricKey.CLUSTER_BYTES_WRITTEN_ALLUXIO_THROUGHPUT.getName()).getValue(); Long bytesWrittenDomainSocketThroughput = (Long) gauges.get( MetricKey.CLUSTER_BYTES_WRITTEN_DOMAIN_THROUGHPUT.getName()).getValue(); Long bytesWrittenUfsThroughput = (Long) gauges .get(MetricKey.CLUSTER_BYTES_WRITTEN_UFS_THROUGHPUT.getName()).getValue(); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_WRITTEN_LOCAL_THROUGHPUT.getName(), bytesWrittenLocalThroughput); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_WRITTEN_ALLUXIO_THROUGHPUT.getName(), bytesWrittenAlluxioThroughput); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_WRITTEN_DOMAIN_THROUGHPUT.getName(), bytesWrittenDomainSocketThroughput); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_WRITTEN_UFS_THROUGHPUT.getName(), bytesWrittenUfsThroughput); } @Override public void close() {} // Nothing to clean up. } private static void cleanup(UnderFileSystem ufs, String ufsPath) { final String errMessage = "Failed to delete UFS file {}."; if (!ufsPath.isEmpty()) { try { if (!ufs.deleteExistingFile(ufsPath)) { LOG.warn(errMessage, ufsPath); } } catch (IOException e) { LOG.warn(errMessage, ufsPath, e); } } } @Override public void updateUfsMode(AlluxioURI ufsPath, UfsMode ufsMode) throws InvalidPathException, InvalidArgumentException, UnavailableException, AccessControlException { // TODO(adit): Create new fsadmin audit context try (RpcContext rpcContext = createRpcContext(); FileSystemMasterAuditContext auditContext = createAuditContext("updateUfsMode", ufsPath, null, null)) { mUfsManager.setUfsMode(rpcContext, ufsPath, ufsMode); auditContext.setSucceeded(true); } } /** * Check if the specified operation type is allowed to the ufs. * * @param alluxioPath the Alluxio path * @param opType the operation type */ private void checkUfsMode(AlluxioURI alluxioPath, OperationType opType) throws AccessControlException, InvalidPathException { MountTable.Resolution resolution = mMountTable.resolve(alluxioPath); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); UfsMode ufsMode = ufs.getOperationMode(mUfsManager.getPhysicalUfsState(ufs.getPhysicalStores())); switch (ufsMode) { case NO_ACCESS: throw new AccessControlException(ExceptionMessage.UFS_OP_NOT_ALLOWED.getMessage(opType, resolution.getUri(), UfsMode.NO_ACCESS)); case READ_ONLY: if (opType == OperationType.WRITE) { throw new AccessControlException(ExceptionMessage.UFS_OP_NOT_ALLOWED.getMessage(opType, resolution.getUri(), UfsMode.READ_ONLY)); } break; default: // All operations are allowed break; } } } /** * The operation type. This class is used to check if an operation to the under storage is allowed * during maintenance. */ enum OperationType { READ, WRITE, } /** * Class that contains metrics for FileSystemMaster. * This class is public because the counter names are referenced in * {@link alluxio.web.WebInterfaceAbstractMetricsServlet}. */ public static final class Metrics { private static final Counter DIRECTORIES_CREATED = MetricsSystem.counter(MetricKey.MASTER_DIRECTORIES_CREATED.getName()); private static final Counter FILE_BLOCK_INFOS_GOT = MetricsSystem.counter(MetricKey.MASTER_FILE_BLOCK_INFOS_GOT.getName()); private static final Counter FILE_INFOS_GOT = MetricsSystem.counter(MetricKey.MASTER_FILE_INFOS_GOT.getName()); private static final Counter FILES_COMPLETED = MetricsSystem.counter(MetricKey.MASTER_FILES_COMPLETED.getName()); private static final Counter FILES_CREATED = MetricsSystem.counter(MetricKey.MASTER_FILES_CREATED.getName()); private static final Counter FILES_FREED = MetricsSystem.counter(MetricKey.MASTER_FILES_FREED.getName()); private static final Counter FILES_PERSISTED = MetricsSystem.counter(MetricKey.MASTER_FILES_PERSISTED.getName()); private static final Counter NEW_BLOCKS_GOT = MetricsSystem.counter(MetricKey.MASTER_NEW_BLOCKS_GOT.getName()); private static final Counter PATHS_DELETED = MetricsSystem.counter(MetricKey.MASTER_PATHS_DELETED.getName()); private static final Counter PATHS_MOUNTED = MetricsSystem.counter(MetricKey.MASTER_PATHS_MOUNTED.getName()); private static final Counter PATHS_RENAMED = MetricsSystem.counter(MetricKey.MASTER_PATHS_RENAMED.getName()); private static final Counter PATHS_UNMOUNTED = MetricsSystem.counter(MetricKey.MASTER_PATHS_UNMOUNTED.getName()); // TODO(peis): Increment the RPCs OPs at the place where we receive the RPCs. private static final Counter COMPLETE_FILE_OPS = MetricsSystem.counter(MetricKey.MASTER_COMPLETE_FILE_OPS.getName()); private static final Counter CREATE_DIRECTORIES_OPS = MetricsSystem.counter(MetricKey.MASTER_CREATE_DIRECTORIES_OPS.getName()); private static final Counter CREATE_FILES_OPS = MetricsSystem.counter(MetricKey.MASTER_CREATE_FILES_OPS.getName()); private static final Counter DELETE_PATHS_OPS = MetricsSystem.counter(MetricKey.MASTER_DELETE_PATHS_OPS.getName()); private static final Counter FREE_FILE_OPS = MetricsSystem.counter(MetricKey.MASTER_FREE_FILE_OPS.getName()); private static final Counter GET_FILE_BLOCK_INFO_OPS = MetricsSystem.counter(MetricKey.MASTER_GET_FILE_BLOCK_INFO_OPS.getName()); private static final Counter GET_FILE_INFO_OPS = MetricsSystem.counter(MetricKey.MASTER_GET_FILE_INFO_OPS.getName()); private static final Counter GET_NEW_BLOCK_OPS = MetricsSystem.counter(MetricKey.MASTER_GET_NEW_BLOCK_OPS.getName()); private static final Counter MOUNT_OPS = MetricsSystem.counter(MetricKey.MASTER_MOUNT_OPS.getName()); private static final Counter RENAME_PATH_OPS = MetricsSystem.counter(MetricKey.MASTER_RENAME_PATH_OPS.getName()); private static final Counter SET_ACL_OPS = MetricsSystem.counter(MetricKey.MASTER_SET_ACL_OPS.getName()); private static final Counter SET_ATTRIBUTE_OPS = MetricsSystem.counter(MetricKey.MASTER_SET_ATTRIBUTE_OPS.getName()); private static final Counter UNMOUNT_OPS = MetricsSystem.counter(MetricKey.MASTER_UNMOUNT_OPS.getName()); private static final Map<String, Map<UFSOps, Counter>> SAVED_UFS_OPS = new ConcurrentHashMap<>(); /** * UFS operations enum. */ public enum UFSOps { CREATE_FILE, GET_FILE_INFO, DELETE_FILE, LIST_STATUS } /** * Get operations saved per ufs counter. * * @param ufsPath ufsPath * @param ufsOp ufs operation * @return the counter object */ @VisibleForTesting public static Counter getUfsCounter(String ufsPath, UFSOps ufsOp) { return SAVED_UFS_OPS.compute(ufsPath, (k, v) -> { if (v != null) { return v; } else { return new ConcurrentHashMap<>(); } }).compute(ufsOp, (k, v) -> { if (v != null) { return v; } else { return MetricsSystem.counter( Metric.getMetricNameWithTags(UFS_OP_SAVED_PREFIX + ufsOp.name(), MetricInfo.TAG_UFS, MetricsSystem.escape(new AlluxioURI(ufsPath)))); } }); } /** * Register some file system master related gauges. * * @param master the file system master * @param ufsManager the under filesystem manager */ @VisibleForTesting public static void registerGauges( final FileSystemMaster master, final UfsManager ufsManager) { MetricsSystem.registerGaugeIfAbsent(MetricKey.MASTER_FILES_PINNED.getName(), master::getNumberOfPinnedFiles); MetricsSystem.registerGaugeIfAbsent(MetricKey.MASTER_TOTAL_PATHS.getName(), () -> master.getInodeCount()); final String ufsDataFolder = ServerConfiguration.get(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS); MetricsSystem.registerGaugeIfAbsent(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_TOTAL.getName(), () -> { try (CloseableResource<UnderFileSystem> ufsResource = ufsManager.getRoot().acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); return ufs.getSpace(ufsDataFolder, UnderFileSystem.SpaceType.SPACE_TOTAL); } catch (IOException e) { LOG.error(e.getMessage(), e); return Stream.empty(); } }); MetricsSystem.registerGaugeIfAbsent(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_USED.getName(), () -> { try (CloseableResource<UnderFileSystem> ufsResource = ufsManager.getRoot().acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); return ufs.getSpace(ufsDataFolder, UnderFileSystem.SpaceType.SPACE_USED); } catch (IOException e) { LOG.error(e.getMessage(), e); return Stream.empty(); } }); MetricsSystem.registerGaugeIfAbsent(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_FREE.getName(), () -> { long ret = 0L; try (CloseableResource<UnderFileSystem> ufsResource = ufsManager.getRoot().acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); ret = ufs.getSpace(ufsDataFolder, UnderFileSystem.SpaceType.SPACE_FREE); } catch (IOException e) { LOG.error(e.getMessage(), e); } return ret; }); } private Metrics() {} // prevent instantiation } /** * Creates a {@link FileSystemMasterAuditContext} instance. * * @param command the command to be logged by this {@link AuditContext} * @param srcPath the source path of this command * @param dstPath the destination path of this command * @param srcInode the source inode of this command * @return newly-created {@link FileSystemMasterAuditContext} instance */ private FileSystemMasterAuditContext createAuditContext(String command, AlluxioURI srcPath, @Nullable AlluxioURI dstPath, @Nullable Inode srcInode) { FileSystemMasterAuditContext auditContext = new FileSystemMasterAuditContext(mAsyncAuditLogWriter); if (mAsyncAuditLogWriter != null) { String user = null; String ugi = ""; try { user = AuthenticatedClientUser.getClientUser(ServerConfiguration.global()); } catch (AccessControlException e) { ugi = "N/A"; } if (user != null) { try { String primaryGroup = CommonUtils.getPrimaryGroupName(user, ServerConfiguration.global()); ugi = user + "," + primaryGroup; } catch (IOException e) { LOG.debug("Failed to get primary group for user {}.", user); ugi = user + ",N/A"; } } AuthType authType = ServerConfiguration.getEnum(PropertyKey.SECURITY_AUTHENTICATION_TYPE, AuthType.class); auditContext.setUgi(ugi) .setAuthType(authType) .setIp(ClientIpAddressInjector.getIpAddress()) .setCommand(command).setSrcPath(srcPath).setDstPath(dstPath) .setSrcInode(srcInode).setAllowed(true); } return auditContext; } private BlockDeletionContext createBlockDeletionContext() { return new DefaultBlockDeletionContext(this::removeBlocks, blocks -> blocks.forEach(mUfsBlockLocationCache::invalidate)); } private void removeBlocks(List<Long> blocks) throws IOException { if (blocks.isEmpty()) { return; } RetryPolicy retry = new CountingRetry(3); IOException lastThrown = null; while (retry.attempt()) { try { mBlockMaster.removeBlocks(blocks, true); return; } catch (UnavailableException e) { lastThrown = e; } } throw new IOException("Failed to remove deleted blocks from block master", lastThrown); } /** * @return a context for executing an RPC */ @VisibleForTesting public RpcContext createRpcContext() throws UnavailableException { return createRpcContext(new InternalOperationContext()); } /** * @param operationContext the operation context * @return a context for executing an RPC */ @VisibleForTesting public RpcContext createRpcContext(OperationContext operationContext) throws UnavailableException { return new RpcContext(createBlockDeletionContext(), createJournalContext(), operationContext.withTracker(mStateLockCallTracker)); } private LockingScheme createLockingScheme(AlluxioURI path, FileSystemMasterCommonPOptions options, LockPattern desiredLockMode) { return new LockingScheme(path, desiredLockMode, options, mUfsSyncPathCache, false); } private LockingScheme createSyncLockingScheme(AlluxioURI path, FileSystemMasterCommonPOptions options, boolean isGetFileInfo) { return new LockingScheme(path, LockPattern.READ, options, mUfsSyncPathCache, isGetFileInfo); } boolean isAclEnabled() { return ServerConfiguration.getBoolean(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_ENABLED); } @Override public List<TimeSeries> getTimeSeries() { return mTimeSeriesStore.getTimeSeries(); } @Override public AlluxioURI reverseResolve(AlluxioURI ufsUri) throws InvalidPathException { MountTable.ReverseResolution resolution = mMountTable.reverseResolve(ufsUri); if (resolution == null) { throw new InvalidPathException(ufsUri.toString() + " is not a valid ufs uri"); } return resolution.getUri(); } @Override @Nullable public String getRootInodeOwner() { return mInodeTree.getRootUserName(); } }
EvilMcJerkface/alluxio
core/server/master/src/main/java/alluxio/master/file/DefaultFileSystemMaster.java
Java
apache-2.0
193,983
$.mockjax({ url: "*", response: function(options) { this.responseText = ExampleData.exampleData; }, responseTime: 0 }); $(function() { $("#tree1").tree(); });
mbraak/jqTree
static/examples/load_json_data_from_server.js
JavaScript
apache-2.0
188
namespace GraphSharp.Algorithms.Layout.Simple.FDP { public enum FRCoolingFunction { Linear, Exponential } }
FTSRG/seviz
Source/Graph#/Algorithms/Layout/Simple/FDP/FRCoolingFunction.cs
C#
apache-2.0
139
package main import "fmt" //var ( // samples = []int{} // b = 1 //) func main() { var samples = []int{} samples = append(samples, 1) fmt.Println(samples) } // Output: // [1]
containous/yaegi
_test/a9.go
GO
apache-2.0
187
/** * @author Hincu Andrei (andreih1981@gmail.com)on 01.12.2017. * @version $Id$. * @since 0.1. */ package ru.job4j.jdbc;
andreiHi/hincuA
chapter_008/src/main/java/ru/job4j/jdbc/package-info.java
Java
apache-2.0
125
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.drill.exec; import org.apache.drill.exec.physical.impl.common.HashTable; import org.apache.drill.exec.rpc.user.InboundImpersonationManager; import org.apache.drill.exec.server.options.OptionValidator; import org.apache.drill.exec.server.options.TypeValidators.BooleanValidator; import org.apache.drill.exec.server.options.TypeValidators.DoubleValidator; import org.apache.drill.exec.server.options.TypeValidators.EnumeratedStringValidator; import org.apache.drill.exec.server.options.TypeValidators.LongValidator; import org.apache.drill.exec.server.options.TypeValidators.MaxWidthValidator; import org.apache.drill.exec.server.options.TypeValidators.PositiveLongValidator; import org.apache.drill.exec.server.options.TypeValidators.PowerOfTwoLongValidator; import org.apache.drill.exec.server.options.TypeValidators.RangeDoubleValidator; import org.apache.drill.exec.server.options.TypeValidators.RangeLongValidator; import org.apache.drill.exec.server.options.TypeValidators.StringValidator; import org.apache.drill.exec.server.options.TypeValidators.AdminUsersValidator; import org.apache.drill.exec.server.options.TypeValidators.AdminUserGroupsValidator; import org.apache.drill.exec.testing.ExecutionControls; public final class ExecConstants { private ExecConstants() { // Don't allow instantiation } public static final String ZK_RETRY_TIMES = "drill.exec.zk.retry.count"; public static final String ZK_RETRY_DELAY = "drill.exec.zk.retry.delay"; public static final String ZK_CONNECTION = "drill.exec.zk.connect"; public static final String ZK_TIMEOUT = "drill.exec.zk.timeout"; public static final String ZK_ROOT = "drill.exec.zk.root"; public static final String ZK_REFRESH = "drill.exec.zk.refresh"; public static final String BIT_RETRY_TIMES = "drill.exec.rpc.bit.server.retry.count"; public static final String BIT_RETRY_DELAY = "drill.exec.rpc.bit.server.retry.delay"; public static final String BIT_TIMEOUT = "drill.exec.bit.timeout" ; public static final String SERVICE_NAME = "drill.exec.cluster-id"; public static final String INITIAL_BIT_PORT = "drill.exec.rpc.bit.server.port"; public static final String INITIAL_DATA_PORT = "drill.exec.rpc.bit.server.dataport"; public static final String BIT_RPC_TIMEOUT = "drill.exec.rpc.bit.timeout"; public static final String INITIAL_USER_PORT = "drill.exec.rpc.user.server.port"; public static final String USER_RPC_TIMEOUT = "drill.exec.rpc.user.timeout"; public static final String METRICS_CONTEXT_NAME = "drill.exec.metrics.context"; public static final String USE_IP_ADDRESS = "drill.exec.rpc.use.ip"; public static final String CLIENT_RPC_THREADS = "drill.exec.rpc.user.client.threads"; public static final String BIT_SERVER_RPC_THREADS = "drill.exec.rpc.bit.server.threads"; public static final String USER_SERVER_RPC_THREADS = "drill.exec.rpc.user.server.threads"; public static final String FRAG_RUNNER_RPC_TIMEOUT = "drill.exec.rpc.fragrunner.timeout"; public static final PositiveLongValidator FRAG_RUNNER_RPC_TIMEOUT_VALIDATOR = new PositiveLongValidator(FRAG_RUNNER_RPC_TIMEOUT, Long.MAX_VALUE); public static final String TRACE_DUMP_DIRECTORY = "drill.exec.trace.directory"; public static final String TRACE_DUMP_FILESYSTEM = "drill.exec.trace.filesystem"; public static final String TEMP_DIRECTORIES = "drill.exec.tmp.directories"; public static final String TEMP_FILESYSTEM = "drill.exec.tmp.filesystem"; public static final String INCOMING_BUFFER_IMPL = "drill.exec.buffer.impl"; /** incoming buffer size (number of batches) */ public static final String INCOMING_BUFFER_SIZE = "drill.exec.buffer.size"; public static final String SPOOLING_BUFFER_DELETE = "drill.exec.buffer.spooling.delete"; public static final String SPOOLING_BUFFER_MEMORY = "drill.exec.buffer.spooling.size"; public static final String BATCH_PURGE_THRESHOLD = "drill.exec.sort.purge.threshold"; // Spill boot-time Options common to all spilling operators // (Each individual operator may override the common options) public static final String SPILL_FILESYSTEM = "drill.exec.spill.fs"; public static final String SPILL_DIRS = "drill.exec.spill.directories"; public static final String OUTPUT_BATCH_SIZE = "drill.exec.memory.operator.output_batch_size"; // Output Batch Size in Bytes. We have a small lower bound so we can test with unit tests without the // need to produce very large batches that take up lot of memory. public static final LongValidator OUTPUT_BATCH_SIZE_VALIDATOR = new RangeLongValidator(OUTPUT_BATCH_SIZE, 128, 512 * 1024 * 1024); // External Sort Boot configuration public static final String EXTERNAL_SORT_TARGET_SPILL_BATCH_SIZE = "drill.exec.sort.external.spill.batch.size"; public static final String EXTERNAL_SORT_SPILL_GROUP_SIZE = "drill.exec.sort.external.spill.group.size"; public static final String EXTERNAL_SORT_SPILL_THRESHOLD = "drill.exec.sort.external.spill.threshold"; public static final String EXTERNAL_SORT_SPILL_DIRS = "drill.exec.sort.external.spill.directories"; public static final String EXTERNAL_SORT_SPILL_FILESYSTEM = "drill.exec.sort.external.spill.fs"; public static final String EXTERNAL_SORT_SPILL_FILE_SIZE = "drill.exec.sort.external.spill.file_size"; public static final String EXTERNAL_SORT_MSORT_MAX_BATCHSIZE = "drill.exec.sort.external.msort.batch.maxsize"; public static final String EXTERNAL_SORT_DISABLE_MANAGED = "drill.exec.sort.external.disable_managed"; public static final String EXTERNAL_SORT_MERGE_LIMIT = "drill.exec.sort.external.merge_limit"; public static final String EXTERNAL_SORT_SPILL_BATCH_SIZE = "drill.exec.sort.external.spill.spill_batch_size"; public static final String EXTERNAL_SORT_MERGE_BATCH_SIZE = "drill.exec.sort.external.spill.merge_batch_size"; public static final String EXTERNAL_SORT_MAX_MEMORY = "drill.exec.sort.external.mem_limit"; public static final String EXTERNAL_SORT_BATCH_LIMIT = "drill.exec.sort.external.batch_limit"; // External Sort Runtime options public static final BooleanValidator EXTERNAL_SORT_DISABLE_MANAGED_OPTION = new BooleanValidator("exec.sort.disable_managed"); // Hash Aggregate Options public static final String HASHAGG_NUM_PARTITIONS_KEY = "exec.hashagg.num_partitions"; public static final LongValidator HASHAGG_NUM_PARTITIONS_VALIDATOR = new RangeLongValidator(HASHAGG_NUM_PARTITIONS_KEY, 1, 128); // 1 means - no spilling public static final String HASHAGG_MAX_MEMORY_KEY = "exec.hashagg.mem_limit"; public static final LongValidator HASHAGG_MAX_MEMORY_VALIDATOR = new RangeLongValidator(HASHAGG_MAX_MEMORY_KEY, 0, Integer.MAX_VALUE); // min batches is used for tuning (each partition needs so many batches when planning the number of partitions, // or reserve this number when calculating whether the remaining available memory is too small and requires a spill.) // Low value may OOM (e.g., when incoming rows become wider), higher values use fewer partitions but are safer public static final String HASHAGG_MIN_BATCHES_PER_PARTITION_KEY = "exec.hashagg.min_batches_per_partition"; public static final LongValidator HASHAGG_MIN_BATCHES_PER_PARTITION_VALIDATOR = new RangeLongValidator(HASHAGG_MIN_BATCHES_PER_PARTITION_KEY, 1, 5); // Can be turned off mainly for testing. Memory prediction is used to decide on when to spill to disk; with this option off, // spill would be triggered only by another mechanism -- "catch OOMs and then spill". public static final String HASHAGG_USE_MEMORY_PREDICTION_KEY = "exec.hashagg.use_memory_prediction"; public static final BooleanValidator HASHAGG_USE_MEMORY_PREDICTION_VALIDATOR = new BooleanValidator(HASHAGG_USE_MEMORY_PREDICTION_KEY); public static final String HASHAGG_SPILL_DIRS = "drill.exec.hashagg.spill.directories"; public static final String HASHAGG_SPILL_FILESYSTEM = "drill.exec.hashagg.spill.fs"; public static final String HASHAGG_FALLBACK_ENABLED_KEY = "drill.exec.hashagg.fallback.enabled"; public static final BooleanValidator HASHAGG_FALLBACK_ENABLED_VALIDATOR = new BooleanValidator(HASHAGG_FALLBACK_ENABLED_KEY); public static final String SSL_PROVIDER = "drill.exec.ssl.provider"; // valid values are "JDK", "OPENSSL" // default JDK public static final String SSL_PROTOCOL = "drill.exec.ssl.protocol"; // valid values are SSL, SSLV2, SSLV3, TLS, TLSV1, TLSv1.1, TLSv1.2(default) public static final String SSL_KEYSTORE_TYPE = "drill.exec.ssl.keyStoreType"; public static final String SSL_KEYSTORE_PATH = "drill.exec.ssl.keyStorePath"; // path to keystore. default : $JRE_HOME/lib/security/keystore.jks public static final String SSL_KEYSTORE_PASSWORD = "drill.exec.ssl.keyStorePassword"; // default: changeit public static final String SSL_KEY_PASSWORD = "drill.exec.ssl.keyPassword"; // public static final String SSL_TRUSTSTORE_TYPE = "drill.exec.ssl.trustStoreType"; // valid values are jks(default), jceks, pkcs12 public static final String SSL_TRUSTSTORE_PATH = "drill.exec.ssl.trustStorePath"; // path to keystore. default : $JRE_HOME/lib/security/cacerts.jks public static final String SSL_TRUSTSTORE_PASSWORD = "drill.exec.ssl.trustStorePassword"; // default: changeit public static final String SSL_USE_HADOOP_CONF = "drill.exec.ssl.useHadoopConfig"; // Initialize ssl params from hadoop if not provided by drill. default: true public static final String SSL_HANDSHAKE_TIMEOUT = "drill.exec.security.user.encryption.ssl.handshakeTimeout"; // Default 10 seconds public static final String TEXT_LINE_READER_BATCH_SIZE = "drill.exec.storage.file.text.batch.size"; public static final String TEXT_LINE_READER_BUFFER_SIZE = "drill.exec.storage.file.text.buffer.size"; public static final String HAZELCAST_SUBNETS = "drill.exec.cache.hazel.subnets"; public static final String HTTP_ENABLE = "drill.exec.http.enabled"; public static final String HTTP_MAX_PROFILES = "drill.exec.http.max_profiles"; public static final String HTTP_PORT = "drill.exec.http.port"; public static final String HTTP_PORT_HUNT = "drill.exec.http.porthunt"; public static final String HTTP_ENABLE_SSL = "drill.exec.http.ssl_enabled"; public static final String HTTP_CORS_ENABLED = "drill.exec.http.cors.enabled"; public static final String HTTP_CORS_ALLOWED_ORIGINS = "drill.exec.http.cors.allowedOrigins"; public static final String HTTP_CORS_ALLOWED_METHODS = "drill.exec.http.cors.allowedMethods"; public static final String HTTP_CORS_ALLOWED_HEADERS = "drill.exec.http.cors.allowedHeaders"; public static final String HTTP_CORS_CREDENTIALS = "drill.exec.http.cors.credentials"; public static final String HTTP_SESSION_MEMORY_RESERVATION = "drill.exec.http.session.memory.reservation"; public static final String HTTP_SESSION_MEMORY_MAXIMUM = "drill.exec.http.session.memory.maximum"; public static final String HTTP_SESSION_MAX_IDLE_SECS = "drill.exec.http.session_max_idle_secs"; public static final String HTTP_KEYSTORE_PATH = SSL_KEYSTORE_PATH; public static final String HTTP_KEYSTORE_PASSWORD = SSL_KEYSTORE_PASSWORD; public static final String HTTP_TRUSTSTORE_PATH = SSL_TRUSTSTORE_PATH; public static final String HTTP_TRUSTSTORE_PASSWORD = SSL_TRUSTSTORE_PASSWORD; public static final String HTTP_AUTHENTICATION_MECHANISMS = "drill.exec.http.auth.mechanisms"; public static final String HTTP_SPNEGO_PRINCIPAL = "drill.exec.http.auth.spnego.principal"; public static final String HTTP_SPNEGO_KEYTAB = "drill.exec.http.auth.spnego.keytab"; public static final String SYS_STORE_PROVIDER_CLASS = "drill.exec.sys.store.provider.class"; public static final String SYS_STORE_PROVIDER_LOCAL_PATH = "drill.exec.sys.store.provider.local.path"; public static final String SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE = "drill.exec.sys.store.provider.local.write"; public static final String PROFILES_STORE_INMEMORY = "drill.exec.profiles.store.inmemory"; public static final String PROFILES_STORE_CAPACITY = "drill.exec.profiles.store.capacity"; public static final String IMPERSONATION_ENABLED = "drill.exec.impersonation.enabled"; public static final String IMPERSONATION_MAX_CHAINED_USER_HOPS = "drill.exec.impersonation.max_chained_user_hops"; public static final String AUTHENTICATION_MECHANISMS = "drill.exec.security.auth.mechanisms"; public static final String USER_AUTHENTICATION_ENABLED = "drill.exec.security.user.auth.enabled"; public static final String USER_AUTHENTICATOR_IMPL = "drill.exec.security.user.auth.impl"; public static final String PAM_AUTHENTICATOR_PROFILES = "drill.exec.security.user.auth.pam_profiles"; public static final String BIT_AUTHENTICATION_ENABLED = "drill.exec.security.bit.auth.enabled"; public static final String BIT_AUTHENTICATION_MECHANISM = "drill.exec.security.bit.auth.mechanism"; public static final String USE_LOGIN_PRINCIPAL = "drill.exec.security.bit.auth.use_login_principal"; public static final String USER_ENCRYPTION_SASL_ENABLED = "drill.exec.security.user.encryption.sasl.enabled"; public static final String USER_ENCRYPTION_SASL_MAX_WRAPPED_SIZE = "drill.exec.security.user.encryption.sasl.max_wrapped_size"; public static final String WEB_SERVER_THREAD_POOL_MAX = "drill.exec.web_server.thread_pool_max"; public static final String USER_SSL_ENABLED = "drill.exec.security.user.encryption.ssl.enabled"; public static final String BIT_ENCRYPTION_SASL_ENABLED = "drill.exec.security.bit.encryption.sasl.enabled"; public static final String BIT_ENCRYPTION_SASL_MAX_WRAPPED_SIZE = "drill.exec.security.bit.encryption.sasl.max_wrapped_size"; /** Size of JDBC batch queue (in batches) above which throttling begins. */ public static final String JDBC_BATCH_QUEUE_THROTTLING_THRESHOLD = "drill.jdbc.batch_queue_throttling_threshold"; // Thread pool size for scan threads. Used by the Parquet scan. public static final String SCAN_THREADPOOL_SIZE = "drill.exec.scan.threadpool_size"; // The size of the thread pool used by a scan to decode the data. Used by Parquet public static final String SCAN_DECODE_THREADPOOL_SIZE = "drill.exec.scan.decode_threadpool_size"; /** * Currently if a query is cancelled, but one of the fragments reports the status as FAILED instead of CANCELLED or * FINISHED we report the query result as CANCELLED by swallowing the failures occurred in fragments. This BOOT * setting allows the user to see the query status as failure. Useful for developers/testers. */ public static final String RETURN_ERROR_FOR_FAILURE_IN_CANCELLED_FRAGMENTS = "drill.exec.debug.return_error_for_failure_in_cancelled_fragments"; public static final String CLIENT_SUPPORT_COMPLEX_TYPES = "drill.client.supports-complex-types"; /** * Configuration properties connected with dynamic UDFs support */ public static final String UDF_RETRY_ATTEMPTS = "drill.exec.udf.retry-attempts"; public static final String UDF_DIRECTORY_LOCAL = "drill.exec.udf.directory.local"; public static final String UDF_DIRECTORY_FS = "drill.exec.udf.directory.fs"; public static final String UDF_DIRECTORY_ROOT = "drill.exec.udf.directory.root"; public static final String UDF_DIRECTORY_STAGING = "drill.exec.udf.directory.staging"; public static final String UDF_DIRECTORY_REGISTRY = "drill.exec.udf.directory.registry"; public static final String UDF_DIRECTORY_TMP = "drill.exec.udf.directory.tmp"; public static final String UDF_DISABLE_DYNAMIC = "drill.exec.udf.disable_dynamic"; /** * Local temporary directory is used as base for temporary storage of Dynamic UDF jars. */ public static final String DRILL_TMP_DIR = "drill.tmp-dir"; /** * Temporary tables can be created ONLY in default temporary workspace. */ public static final String DEFAULT_TEMPORARY_WORKSPACE = "drill.exec.default_temporary_workspace"; public static final String OUTPUT_FORMAT_OPTION = "store.format"; public static final OptionValidator OUTPUT_FORMAT_VALIDATOR = new StringValidator(OUTPUT_FORMAT_OPTION); public static final String PARQUET_BLOCK_SIZE = "store.parquet.block-size"; public static final String PARQUET_WRITER_USE_SINGLE_FS_BLOCK = "store.parquet.writer.use_single_fs_block"; public static final OptionValidator PARQUET_WRITER_USE_SINGLE_FS_BLOCK_VALIDATOR = new BooleanValidator( PARQUET_WRITER_USE_SINGLE_FS_BLOCK); public static final OptionValidator PARQUET_BLOCK_SIZE_VALIDATOR = new PositiveLongValidator(PARQUET_BLOCK_SIZE, Integer.MAX_VALUE); public static final String PARQUET_PAGE_SIZE = "store.parquet.page-size"; public static final OptionValidator PARQUET_PAGE_SIZE_VALIDATOR = new PositiveLongValidator(PARQUET_PAGE_SIZE, Integer.MAX_VALUE); public static final String PARQUET_DICT_PAGE_SIZE = "store.parquet.dictionary.page-size"; public static final OptionValidator PARQUET_DICT_PAGE_SIZE_VALIDATOR = new PositiveLongValidator(PARQUET_DICT_PAGE_SIZE, Integer.MAX_VALUE); public static final String PARQUET_WRITER_COMPRESSION_TYPE = "store.parquet.compression"; public static final OptionValidator PARQUET_WRITER_COMPRESSION_TYPE_VALIDATOR = new EnumeratedStringValidator( PARQUET_WRITER_COMPRESSION_TYPE, "snappy", "gzip", "none"); public static final String PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING = "store.parquet.enable_dictionary_encoding"; public static final OptionValidator PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING_VALIDATOR = new BooleanValidator( PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING); public static final String PARQUET_VECTOR_FILL_THRESHOLD = "store.parquet.vector_fill_threshold"; public static final OptionValidator PARQUET_VECTOR_FILL_THRESHOLD_VALIDATOR = new PositiveLongValidator(PARQUET_VECTOR_FILL_THRESHOLD, 99l); public static final String PARQUET_VECTOR_FILL_CHECK_THRESHOLD = "store.parquet.vector_fill_check_threshold"; public static final OptionValidator PARQUET_VECTOR_FILL_CHECK_THRESHOLD_VALIDATOR = new PositiveLongValidator(PARQUET_VECTOR_FILL_CHECK_THRESHOLD, 100l); public static final String PARQUET_NEW_RECORD_READER = "store.parquet.use_new_reader"; public static final OptionValidator PARQUET_RECORD_READER_IMPLEMENTATION_VALIDATOR = new BooleanValidator(PARQUET_NEW_RECORD_READER); public static final String PARQUET_READER_INT96_AS_TIMESTAMP = "store.parquet.reader.int96_as_timestamp"; public static final OptionValidator PARQUET_READER_INT96_AS_TIMESTAMP_VALIDATOR = new BooleanValidator(PARQUET_READER_INT96_AS_TIMESTAMP); public static final String PARQUET_PAGEREADER_ASYNC = "store.parquet.reader.pagereader.async"; public static final OptionValidator PARQUET_PAGEREADER_ASYNC_VALIDATOR = new BooleanValidator(PARQUET_PAGEREADER_ASYNC); // Number of pages the Async Parquet page reader will read before blocking public static final String PARQUET_PAGEREADER_QUEUE_SIZE = "store.parquet.reader.pagereader.queuesize"; public static final OptionValidator PARQUET_PAGEREADER_QUEUE_SIZE_VALIDATOR = new PositiveLongValidator(PARQUET_PAGEREADER_QUEUE_SIZE, Integer.MAX_VALUE); public static final String PARQUET_PAGEREADER_ENFORCETOTALSIZE = "store.parquet.reader.pagereader.enforceTotalSize"; public static final OptionValidator PARQUET_PAGEREADER_ENFORCETOTALSIZE_VALIDATOR = new BooleanValidator(PARQUET_PAGEREADER_ENFORCETOTALSIZE); public static final String PARQUET_COLUMNREADER_ASYNC = "store.parquet.reader.columnreader.async"; public static final OptionValidator PARQUET_COLUMNREADER_ASYNC_VALIDATOR = new BooleanValidator(PARQUET_COLUMNREADER_ASYNC); // Use a buffering reader for Parquet page reader public static final String PARQUET_PAGEREADER_USE_BUFFERED_READ = "store.parquet.reader.pagereader.bufferedread"; public static final OptionValidator PARQUET_PAGEREADER_USE_BUFFERED_READ_VALIDATOR = new BooleanValidator(PARQUET_PAGEREADER_USE_BUFFERED_READ); // Size in MiB of the buffer the Parquet page reader will use to read from disk. Default is 1 MiB public static final String PARQUET_PAGEREADER_BUFFER_SIZE = "store.parquet.reader.pagereader.buffersize"; public static final OptionValidator PARQUET_PAGEREADER_BUFFER_SIZE_VALIDATOR = new LongValidator(PARQUET_PAGEREADER_BUFFER_SIZE); // try to use fadvise if available public static final String PARQUET_PAGEREADER_USE_FADVISE = "store.parquet.reader.pagereader.usefadvise"; public static final OptionValidator PARQUET_PAGEREADER_USE_FADVISE_VALIDATOR = new BooleanValidator(PARQUET_PAGEREADER_USE_FADVISE); public static final OptionValidator COMPILE_SCALAR_REPLACEMENT = new BooleanValidator("exec.compile.scalar_replacement"); public static final String JSON_ALL_TEXT_MODE = "store.json.all_text_mode"; public static final BooleanValidator JSON_READER_ALL_TEXT_MODE_VALIDATOR = new BooleanValidator(JSON_ALL_TEXT_MODE); public static final BooleanValidator JSON_EXTENDED_TYPES = new BooleanValidator("store.json.extended_types"); public static final BooleanValidator JSON_WRITER_UGLIFY = new BooleanValidator("store.json.writer.uglify"); public static final BooleanValidator JSON_WRITER_SKIPNULLFIELDS = new BooleanValidator("store.json.writer.skip_null_fields"); public static final String JSON_READER_SKIP_INVALID_RECORDS_FLAG = "store.json.reader.skip_invalid_records"; public static final BooleanValidator JSON_SKIP_MALFORMED_RECORDS_VALIDATOR = new BooleanValidator(JSON_READER_SKIP_INVALID_RECORDS_FLAG); public static final String JSON_READER_PRINT_INVALID_RECORDS_LINE_NOS_FLAG = "store.json.reader.print_skipped_invalid_record_number"; public static final BooleanValidator JSON_READER_PRINT_INVALID_RECORDS_LINE_NOS_FLAG_VALIDATOR = new BooleanValidator(JSON_READER_PRINT_INVALID_RECORDS_LINE_NOS_FLAG); public static final DoubleValidator TEXT_ESTIMATED_ROW_SIZE = new RangeDoubleValidator("store.text.estimated_row_size_bytes", 1, Long.MAX_VALUE); /** * Json writer option for writing `NaN` and `Infinity` tokens as numbers (not enclosed with double quotes) */ public static final String JSON_WRITER_NAN_INF_NUMBERS = "store.json.writer.allow_nan_inf"; public static final BooleanValidator JSON_WRITER_NAN_INF_NUMBERS_VALIDATOR = new BooleanValidator(JSON_WRITER_NAN_INF_NUMBERS); /** * Json reader option that enables parser to read `NaN` and `Infinity` tokens as numbers */ public static final String JSON_READER_NAN_INF_NUMBERS = "store.json.reader.allow_nan_inf"; public static final BooleanValidator JSON_READER_NAN_INF_NUMBERS_VALIDATOR = new BooleanValidator(JSON_READER_NAN_INF_NUMBERS); /** * The column label (for directory levels) in results when querying files in a directory * E.g. labels: dir0 dir1<pre> * structure: foo * |- bar - a.parquet * |- baz - b.parquet</pre> */ public static final String FILESYSTEM_PARTITION_COLUMN_LABEL = "drill.exec.storage.file.partition.column.label"; public static final StringValidator FILESYSTEM_PARTITION_COLUMN_LABEL_VALIDATOR = new StringValidator(FILESYSTEM_PARTITION_COLUMN_LABEL); /** * Implicit file columns */ public static final String IMPLICIT_FILENAME_COLUMN_LABEL = "drill.exec.storage.implicit.filename.column.label"; public static final OptionValidator IMPLICIT_FILENAME_COLUMN_LABEL_VALIDATOR = new StringValidator(IMPLICIT_FILENAME_COLUMN_LABEL); public static final String IMPLICIT_SUFFIX_COLUMN_LABEL = "drill.exec.storage.implicit.suffix.column.label"; public static final OptionValidator IMPLICIT_SUFFIX_COLUMN_LABEL_VALIDATOR = new StringValidator(IMPLICIT_SUFFIX_COLUMN_LABEL); public static final String IMPLICIT_FQN_COLUMN_LABEL = "drill.exec.storage.implicit.fqn.column.label"; public static final OptionValidator IMPLICIT_FQN_COLUMN_LABEL_VALIDATOR = new StringValidator(IMPLICIT_FQN_COLUMN_LABEL); public static final String IMPLICIT_FILEPATH_COLUMN_LABEL = "drill.exec.storage.implicit.filepath.column.label"; public static final OptionValidator IMPLICIT_FILEPATH_COLUMN_LABEL_VALIDATOR = new StringValidator(IMPLICIT_FILEPATH_COLUMN_LABEL); public static final String JSON_READ_NUMBERS_AS_DOUBLE = "store.json.read_numbers_as_double"; public static final BooleanValidator JSON_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new BooleanValidator(JSON_READ_NUMBERS_AS_DOUBLE); public static final String MONGO_ALL_TEXT_MODE = "store.mongo.all_text_mode"; public static final OptionValidator MONGO_READER_ALL_TEXT_MODE_VALIDATOR = new BooleanValidator(MONGO_ALL_TEXT_MODE); public static final String MONGO_READER_READ_NUMBERS_AS_DOUBLE = "store.mongo.read_numbers_as_double"; public static final OptionValidator MONGO_READER_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new BooleanValidator(MONGO_READER_READ_NUMBERS_AS_DOUBLE); public static final String MONGO_BSON_RECORD_READER = "store.mongo.bson.record.reader"; public static final OptionValidator MONGO_BSON_RECORD_READER_VALIDATOR = new BooleanValidator(MONGO_BSON_RECORD_READER); public static final String ENABLE_UNION_TYPE_KEY = "exec.enable_union_type"; public static final BooleanValidator ENABLE_UNION_TYPE = new BooleanValidator(ENABLE_UNION_TYPE_KEY); // Kafka plugin related options. public static final String KAFKA_ALL_TEXT_MODE = "store.kafka.all_text_mode"; public static final OptionValidator KAFKA_READER_ALL_TEXT_MODE_VALIDATOR = new BooleanValidator(KAFKA_ALL_TEXT_MODE); public static final String KAFKA_READER_READ_NUMBERS_AS_DOUBLE = "store.kafka.read_numbers_as_double"; public static final OptionValidator KAFKA_READER_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new BooleanValidator( KAFKA_READER_READ_NUMBERS_AS_DOUBLE); public static final String KAFKA_RECORD_READER = "store.kafka.record.reader"; public static final OptionValidator KAFKA_RECORD_READER_VALIDATOR = new StringValidator(KAFKA_RECORD_READER); public static final String KAFKA_POLL_TIMEOUT = "store.kafka.poll.timeout"; public static final PositiveLongValidator KAFKA_POLL_TIMEOUT_VALIDATOR = new PositiveLongValidator(KAFKA_POLL_TIMEOUT, Long.MAX_VALUE); // TODO: We need to add a feature that enables storage plugins to add their own options. Currently we have to declare // in core which is not right. Move this option and above two mongo plugin related options once we have the feature. public static final String HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS = "store.hive.optimize_scan_with_native_readers"; public static final OptionValidator HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS_VALIDATOR = new BooleanValidator(HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS); public static final String SLICE_TARGET = "planner.slice_target"; public static final long SLICE_TARGET_DEFAULT = 100000l; public static final PositiveLongValidator SLICE_TARGET_OPTION = new PositiveLongValidator(SLICE_TARGET, Long.MAX_VALUE); public static final String CAST_TO_NULLABLE_NUMERIC = "drill.exec.functions.cast_empty_string_to_null"; public static final BooleanValidator CAST_TO_NULLABLE_NUMERIC_OPTION = new BooleanValidator(CAST_TO_NULLABLE_NUMERIC); /** * HashTable runtime settings */ public static final String MIN_HASH_TABLE_SIZE_KEY = "exec.min_hash_table_size"; public static final PositiveLongValidator MIN_HASH_TABLE_SIZE = new PositiveLongValidator(MIN_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY); public static final String MAX_HASH_TABLE_SIZE_KEY = "exec.max_hash_table_size"; public static final PositiveLongValidator MAX_HASH_TABLE_SIZE = new PositiveLongValidator(MAX_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY); /** * Limits the maximum level of parallelization to this factor time the number of Drillbits */ public static final String CPU_LOAD_AVERAGE_KEY = "planner.cpu_load_average"; public static final DoubleValidator CPU_LOAD_AVERAGE = new DoubleValidator(CPU_LOAD_AVERAGE_KEY); public static final String MAX_WIDTH_PER_NODE_KEY = "planner.width.max_per_node"; public static final MaxWidthValidator MAX_WIDTH_PER_NODE = new MaxWidthValidator(MAX_WIDTH_PER_NODE_KEY); /** * The maximum level or parallelization any stage of the query can do. Note that while this * might be the number of active Drillbits, realistically, this could be well beyond that * number of we want to do things like speed results return. */ public static final String MAX_WIDTH_GLOBAL_KEY = "planner.width.max_per_query"; public static final OptionValidator MAX_WIDTH_GLOBAL = new PositiveLongValidator(MAX_WIDTH_GLOBAL_KEY, Integer.MAX_VALUE); /** * Factor by which a node with endpoint affinity will be favored while creating assignment */ public static final String AFFINITY_FACTOR_KEY = "planner.affinity_factor"; public static final OptionValidator AFFINITY_FACTOR = new DoubleValidator(AFFINITY_FACTOR_KEY); public static final String EARLY_LIMIT0_OPT_KEY = "planner.enable_limit0_optimization"; public static final BooleanValidator EARLY_LIMIT0_OPT = new BooleanValidator(EARLY_LIMIT0_OPT_KEY); public static final String ENABLE_MEMORY_ESTIMATION_KEY = "planner.memory.enable_memory_estimation"; public static final OptionValidator ENABLE_MEMORY_ESTIMATION = new BooleanValidator(ENABLE_MEMORY_ESTIMATION_KEY); /** * Maximum query memory per node (in MB). Re-plan with cheaper operators if * memory estimation exceeds this limit. * <p/> * DEFAULT: 2048 MB */ public static final String MAX_QUERY_MEMORY_PER_NODE_KEY = "planner.memory.max_query_memory_per_node"; public static final LongValidator MAX_QUERY_MEMORY_PER_NODE = new RangeLongValidator(MAX_QUERY_MEMORY_PER_NODE_KEY, 1024 * 1024, Long.MAX_VALUE); /** * Alternative way to compute per-query-per-node memory as a percent * of the total available system memory. * <p> * Suggestion for computation. * <ul> * <li>Assume an allowance for non-managed operators. Default assumption: * 50%</li> * <li>Assume a desired number of concurrent queries. Default assumption: * 10.</li> * <li>The value of this parameter is<br> * (1 - non-managed allowance) / concurrency</li> * </ul> * Doing the math produces the default 5% number. The actual number * given is no less than the <tt>max_query_memory_per_node</tt> * amount. * <p> * This number is used only when throttling is disabled. Setting the * number to 0 effectively disables this technique as it will always * produce values lower than <tt>max_query_memory_per_node</tt>. * <p> * DEFAULT: 5% */ public static String PERCENT_MEMORY_PER_QUERY_KEY = "planner.memory.percent_per_query"; public static DoubleValidator PERCENT_MEMORY_PER_QUERY = new RangeDoubleValidator( PERCENT_MEMORY_PER_QUERY_KEY, 0, 1.0); /** * Minimum memory allocated to each buffered operator instance. * <p/> * DEFAULT: 40 MB */ public static final String MIN_MEMORY_PER_BUFFERED_OP_KEY = "planner.memory.min_memory_per_buffered_op"; public static final LongValidator MIN_MEMORY_PER_BUFFERED_OP = new RangeLongValidator(MIN_MEMORY_PER_BUFFERED_OP_KEY, 1024 * 1024, Long.MAX_VALUE); /** * Extra query memory per node for non-blocking operators. * NOTE: This option is currently used only for memory estimation. * <p/> * DEFAULT: 64 MB * MAXIMUM: 2048 MB */ public static final String NON_BLOCKING_OPERATORS_MEMORY_KEY = "planner.memory.non_blocking_operators_memory"; public static final OptionValidator NON_BLOCKING_OPERATORS_MEMORY = new PowerOfTwoLongValidator( NON_BLOCKING_OPERATORS_MEMORY_KEY, 1 << 11); public static final String HASH_JOIN_TABLE_FACTOR_KEY = "planner.memory.hash_join_table_factor"; public static final OptionValidator HASH_JOIN_TABLE_FACTOR = new DoubleValidator(HASH_JOIN_TABLE_FACTOR_KEY); public static final String HASH_AGG_TABLE_FACTOR_KEY = "planner.memory.hash_agg_table_factor"; public static final OptionValidator HASH_AGG_TABLE_FACTOR = new DoubleValidator(HASH_AGG_TABLE_FACTOR_KEY); public static final String AVERAGE_FIELD_WIDTH_KEY = "planner.memory.average_field_width"; public static final OptionValidator AVERAGE_FIELD_WIDTH = new PositiveLongValidator(AVERAGE_FIELD_WIDTH_KEY, Long.MAX_VALUE); // Mux Exchange options. public static final String ORDERED_MUX_EXCHANGE = "planner.enable_ordered_mux_exchange"; // Resource management boot-time options. public static final String MAX_MEMORY_PER_NODE = "drill.exec.rm.memory_per_node"; public static final String MAX_CPUS_PER_NODE = "drill.exec.rm.cpus_per_node"; // Resource management system run-time options. // Enables queues. When running embedded, enables an in-process queue. When // running distributed, enables the Zookeeper-based distributed queue. public static final BooleanValidator ENABLE_QUEUE = new BooleanValidator("exec.queue.enable"); public static final LongValidator LARGE_QUEUE_SIZE = new PositiveLongValidator("exec.queue.large", 10_000); public static final LongValidator SMALL_QUEUE_SIZE = new PositiveLongValidator("exec.queue.small", 100_000); public static final LongValidator QUEUE_THRESHOLD_SIZE = new PositiveLongValidator("exec.queue.threshold", Long.MAX_VALUE); public static final LongValidator QUEUE_TIMEOUT = new PositiveLongValidator("exec.queue.timeout_millis", Long.MAX_VALUE); // Ratio of memory for small queries vs. large queries. // Each small query gets 1 unit, each large query gets QUEUE_MEMORY_RATIO units. // A lower limit of 1 enforces the intuition that a large query should never get // *less* memory than a small one. public static final DoubleValidator QUEUE_MEMORY_RATIO = new RangeDoubleValidator("exec.queue.memory_ratio", 1.0, 1000); public static final DoubleValidator QUEUE_MEMORY_RESERVE = new RangeDoubleValidator("exec.queue.memory_reserve_ratio", 0, 1.0); public static final String ENABLE_VERBOSE_ERRORS_KEY = "exec.errors.verbose"; public static final OptionValidator ENABLE_VERBOSE_ERRORS = new BooleanValidator(ENABLE_VERBOSE_ERRORS_KEY); public static final String ENABLE_NEW_TEXT_READER_KEY = "exec.storage.enable_new_text_reader"; public static final OptionValidator ENABLE_NEW_TEXT_READER = new BooleanValidator(ENABLE_NEW_TEXT_READER_KEY); public static final String BOOTSTRAP_STORAGE_PLUGINS_FILE = "bootstrap-storage-plugins.json"; public static final String DRILL_SYS_FILE_SUFFIX = ".sys.drill"; public static final String ENABLE_WINDOW_FUNCTIONS = "window.enable"; public static final OptionValidator ENABLE_WINDOW_FUNCTIONS_VALIDATOR = new BooleanValidator(ENABLE_WINDOW_FUNCTIONS); public static final String DRILLBIT_CONTROL_INJECTIONS = "drill.exec.testing.controls"; public static final OptionValidator DRILLBIT_CONTROLS_VALIDATOR = new ExecutionControls.ControlsOptionValidator(DRILLBIT_CONTROL_INJECTIONS, 1); public static final String NEW_VIEW_DEFAULT_PERMS_KEY = "new_view_default_permissions"; public static final OptionValidator NEW_VIEW_DEFAULT_PERMS_VALIDATOR = new StringValidator(NEW_VIEW_DEFAULT_PERMS_KEY); public static final String CTAS_PARTITIONING_HASH_DISTRIBUTE = "store.partition.hash_distribute"; public static final BooleanValidator CTAS_PARTITIONING_HASH_DISTRIBUTE_VALIDATOR = new BooleanValidator(CTAS_PARTITIONING_HASH_DISTRIBUTE); public static final String ENABLE_BULK_LOAD_TABLE_LIST_KEY = "exec.enable_bulk_load_table_list"; public static final BooleanValidator ENABLE_BULK_LOAD_TABLE_LIST = new BooleanValidator(ENABLE_BULK_LOAD_TABLE_LIST_KEY); /** * When getting Hive Table information with exec.enable_bulk_load_table_list set to true, * use the exec.bulk_load_table_list.bulk_size to determine how many tables to fetch from HiveMetaStore * at a time. (The number of tables can get to be quite large.) */ public static final String BULK_LOAD_TABLE_LIST_BULK_SIZE_KEY = "exec.bulk_load_table_list.bulk_size"; public static final PositiveLongValidator BULK_LOAD_TABLE_LIST_BULK_SIZE = new PositiveLongValidator(BULK_LOAD_TABLE_LIST_BULK_SIZE_KEY, Integer.MAX_VALUE); /** * Option whose value is a comma separated list of admin usernames. Admin users are users who have special privileges * such as changing system options. */ public static final String ADMIN_USERS_KEY = "security.admin.users"; public static final AdminUsersValidator ADMIN_USERS_VALIDATOR = new AdminUsersValidator(ADMIN_USERS_KEY); /** * Option whose value is a comma separated list of admin usergroups. */ public static final String ADMIN_USER_GROUPS_KEY = "security.admin.user_groups"; public static final AdminUserGroupsValidator ADMIN_USER_GROUPS_VALIDATOR = new AdminUserGroupsValidator(ADMIN_USER_GROUPS_KEY); /** * Option whose value is a string representing list of inbound impersonation policies. * * Impersonation policy format: * [ * { * proxy_principals : { users : [“...”], groups : [“...”] }, * target_principals : { users : [“...”], groups : [“...”] } * }, * ... * ] */ public static final String IMPERSONATION_POLICIES_KEY = "exec.impersonation.inbound_policies"; public static final StringValidator IMPERSONATION_POLICY_VALIDATOR = new InboundImpersonationManager.InboundImpersonationPolicyValidator(IMPERSONATION_POLICIES_KEY); /** * Web settings */ public static final String WEB_LOGS_MAX_LINES = "web.logs.max_lines"; public static final OptionValidator WEB_LOGS_MAX_LINES_VALIDATOR = new PositiveLongValidator(WEB_LOGS_MAX_LINES, Integer.MAX_VALUE); public static final String CODE_GEN_EXP_IN_METHOD_SIZE = "exec.java.compiler.exp_in_method_size"; public static final LongValidator CODE_GEN_EXP_IN_METHOD_SIZE_VALIDATOR = new LongValidator(CODE_GEN_EXP_IN_METHOD_SIZE); /** * Timeout for create prepare statement request. If the request exceeds this timeout, then request is timed out. * Default value is 10mins. */ public static final String CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS = "prepare.statement.create_timeout_ms"; public static final OptionValidator CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS_VALIDATOR = new PositiveLongValidator(CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS, Integer.MAX_VALUE); public static final String DYNAMIC_UDF_SUPPORT_ENABLED = "exec.udf.enable_dynamic_support"; public static final BooleanValidator DYNAMIC_UDF_SUPPORT_ENABLED_VALIDATOR = new BooleanValidator(DYNAMIC_UDF_SUPPORT_ENABLED); /** * Option to save query profiles. If false, no query profile will be saved * for any query. */ public static final String ENABLE_QUERY_PROFILE_OPTION = "exec.query_profile.save"; public static final BooleanValidator ENABLE_QUERY_PROFILE_VALIDATOR = new BooleanValidator(ENABLE_QUERY_PROFILE_OPTION); /** * Profiles are normally written after the last client message to reduce latency. * When running tests, however, we want the profile written <i>before</i> the * return so that the client can immediately read the profile for test * verification. */ public static final String QUERY_PROFILE_DEBUG_OPTION = "exec.query_profile.debug_mode"; public static final BooleanValidator QUERY_PROFILE_DEBUG_VALIDATOR = new BooleanValidator(QUERY_PROFILE_DEBUG_OPTION); public static final String USE_DYNAMIC_UDFS_KEY = "exec.udf.use_dynamic"; public static final BooleanValidator USE_DYNAMIC_UDFS = new BooleanValidator(USE_DYNAMIC_UDFS_KEY); public static final String QUERY_TRANSIENT_STATE_UPDATE_KEY = "exec.query.progress.update"; public static final BooleanValidator QUERY_TRANSIENT_STATE_UPDATE = new BooleanValidator(QUERY_TRANSIENT_STATE_UPDATE_KEY); public static final String PERSISTENT_TABLE_UMASK = "exec.persistent_table.umask"; public static final StringValidator PERSISTENT_TABLE_UMASK_VALIDATOR = new StringValidator(PERSISTENT_TABLE_UMASK); /** * Enables batch iterator (operator) validation. Validation is normally enabled * only when assertions are enabled. This option enables iterator validation even * if assertions are not enabled. That is, it allows iterator validation even on * a "production" Drill instance. */ public static final String ENABLE_ITERATOR_VALIDATION_OPTION = "debug.validate_iterators"; public static final BooleanValidator ENABLE_ITERATOR_VALIDATOR = new BooleanValidator(ENABLE_ITERATOR_VALIDATION_OPTION); /** * Boot-time config option to enable validation. Primarily used for tests. * If true, overrrides the above. (That is validation is done if assertions are on, * if the above session option is set to true, or if this config option is set to true. */ public static final String ENABLE_ITERATOR_VALIDATION = "drill.exec.debug.validate_iterators"; /** * When iterator validation is enabled, additionally validates the vectors in * each batch passed to each iterator. */ public static final String ENABLE_VECTOR_VALIDATION_OPTION = "debug.validate_vectors"; public static final BooleanValidator ENABLE_VECTOR_VALIDATOR = new BooleanValidator(ENABLE_VECTOR_VALIDATION_OPTION); /** * Boot-time config option to enable vector validation. Primarily used for * tests. Add the following to the command line to enable:<br> * <tt>-ea -Ddrill.exec.debug.validate_vectors=true</tt> */ public static final String ENABLE_VECTOR_VALIDATION = "drill.exec.debug.validate_vectors"; public static final String OPTION_DEFAULTS_ROOT = "drill.exec.options."; public static String bootDefaultFor(String name) { return OPTION_DEFAULTS_ROOT + name; } /** * Boot-time config option provided to modify duration of the grace period. * Grace period is the amount of time where the drillbit accepts work after * the shutdown request is triggered. The primary use of grace period is to * avoid the race conditions caused by zookeeper delay in updating the state * information of the drillbit that is shutting down. So, it is advisable * to have a grace period that is atleast twice the amount of zookeeper * refresh time. */ public static final String GRACE_PERIOD = "drill.exec.grace_period_ms"; public static final String DRILL_PORT_HUNT = "drill.exec.port_hunt"; }
KulykRoman/drill
exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
Java
apache-2.0
42,918
from flask import Flask app = Flask(__name__) @app.get("/") def index(): return "hello, world" if __name__ == "__main__": # Dev only: run "python main.py" and open http://localhost:8080 app.run(host="localhost", port=8080, debug=True)
GoogleCloudPlatform/buildpack-samples
sample-python/main.py
Python
apache-2.0
252
/* * Copyright 2012 Mike Adamson * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.assemblade.opendj.acis; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; public class AciFactory implements AciPatterns { private static Pattern headerPattern = Pattern.compile(header); private static Pattern targetPattern = Pattern.compile(target); private static Pattern bodyPattern = Pattern.compile(body); public static AccessControlItem parse(String aci) { String name = null; String targets = null; String rules = null; Matcher headerMatcher = headerPattern.matcher(aci); if (headerMatcher.find()) { targets = aci.substring(0, headerMatcher.start()); name = headerMatcher.group(1); rules = aci.substring(headerMatcher.end()); } List<Target> targetList = new ArrayList<Target>(); Matcher targetMatcher = targetPattern.matcher(targets); while (targetMatcher.find()) { String keyword = targetMatcher.group(1); String operator = targetMatcher.group(2); String expression = targetMatcher.group(3); targetList.add(new Target(keyword, operator, expression)); } List<Permission> ruleList = new ArrayList<Permission>(); Matcher bodyMatcher = bodyPattern.matcher(rules); while (bodyMatcher.find()) { String permission = bodyMatcher.group(1); String rights = bodyMatcher.group(2); String rule = bodyMatcher.group(3); ruleList.add(new Permission(permission, rights, Subject.parse(rule))); } return new AccessControlItem(name, targetList, ruleList); } }
assemblade/CAT
cat-directory/src/main/java/com/assemblade/opendj/acis/AciFactory.java
Java
apache-2.0
2,298
//*******************************************************************************************// // // // Download Free Evaluation Version From: https://bytescout.com/download/web-installer // // // // Also available as Web API! Get Your Free API Key: https://app.pdf.co/signup // // // // Copyright © 2017-2020 ByteScout, Inc. All rights reserved. // // https://www.bytescout.com // // https://pdf.co // // // //*******************************************************************************************// var myHeaders = new Headers(); myHeaders.append("Content-Type", "application/json"); myHeaders.append("x-api-key", ""); // You can also upload your own file into PDF.co and use it as url. Check "Upload File" samples for code snippets: https://github.com/bytescout/pdf-co-api-samples/tree/master/File%20Upload/ var raw = JSON.stringify({ "url": "https://bytescout-com.s3-us-west-2.amazonaws.com/files/demo-files/cloud-api/document-parser/sample-invoice.pdf", "rulescsv": "Amazon,Amazon Web Services Invoice|Amazon CloudFront\nDigital Ocean,DigitalOcean|DOInvoice\nAcme,ACME Inc.|1540 Long Street, Jacksonville, 32099", "caseSensitive": "true", "async": false, "encrypt": "false", "inline": "true", "password": "", "profiles": "" }); var requestOptions = { method: 'POST', headers: myHeaders, body: raw, redirect: 'follow' }; fetch("https://api.pdf.co/v1/pdf/classifier", requestOptions) .then(response => response.text()) .then(result => console.log(result)) .catch(error => console.log('error', error));
bytescout/ByteScout-SDK-SourceCode
PDF.co Web API/PDF Classifier/JavaScript/Classify PDF From URL (jQuery)/program.js
JavaScript
apache-2.0
2,113
/* Copyright 2015 Verloka Vadim, http://ogy.pp.ua Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ using System; using System.Collections.Generic; namespace JesusPassword.assets.core { [Serializable] public struct Site { public string Name { get; set; } public string Address { get; set; } public string Login { get; set; } public string Password { get; set; } public string Mail { get; set; } public Dictionary<string, string> CustomFields { get; set; } public DateTime DateAdd { get; set; } } }
ogycode/JesusPassword
src/Windows/Unsupported projects/Jesus password/assets/core/Site.cs
C#
apache-2.0
1,134
/* * Copyright 2017 Benedikt Ritter * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.britter.bootifytestpyramid.domain; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.Test; import java.math.BigDecimal; import static com.github.britter.bootifytestpyramid.domain.WeightTemplates.ONE; import static com.github.britter.bootifytestpyramid.domain.WeightTemplates.TWO; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertAll; import static org.junit.jupiter.api.Assertions.assertThrows; class WeightTest { @Nested class Invariants { @Test void should_throw_exception_when_passing_null_value() { assertThrows(NullPointerException.class, () -> new Weight(null)); } @Test void should_throw_exception_when_passing_negative_value() { assertAll( () -> assertThrows(IllegalArgumentException.class, () -> new Weight(BigDecimal.valueOf(-1))), () -> assertThrows(IllegalArgumentException.class, () -> new Weight(-1)) ); } } @Nested class Calculations { @Nested class Add { @Test void should_add_weights() { assertThat(ONE.add(ONE)).isEqualTo(TWO); } } @Nested class Multiply { @Test void should_multiply_weights() { assertThat(ONE.multiply(2)).isEqualTo(TWO); } @Test void should_throw_exception_when_multiply_with_negtaive_factor() { assertThrows(IllegalArgumentException.class, () -> ONE.multiply(-2)); } } } @Nested class Comparing { @Test void should_compare_to_other_weights() { assertAll( () -> assertThat(ONE.compareTo(ONE)).isEqualTo(0), () -> assertThat(ONE.compareTo(TWO)).isLessThan(0), () -> assertThat(TWO.compareTo(ONE)).isGreaterThan(0) ); } } }
britter/bootify-testpyramid
src/test/java/com/github/britter/bootifytestpyramid/domain/WeightTest.java
Java
apache-2.0
2,642
package com.occar.test.rest; import javax.ws.rs.FormParam; import javax.ws.rs.POST; import javax.ws.rs.Path; import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; @Path("/db") public interface DBRestClient { @POST @Produces(MediaType.APPLICATION_JSON) public Response query(@FormParam("q") String query, @FormParam("uid") String uid); }
richygreat/service
src/test/java/com/occar/test/rest/DBRestClient.java
Java
apache-2.0
385
/******************************************************************************* * * This file is part of iBioSim. Please visit <http://www.async.ece.utah.edu/ibiosim> * for the latest version of iBioSim. * * Copyright (C) 2017 University of Utah * * This library is free software; you can redistribute it and/or modify it * under the terms of the Apache License. A copy of the license agreement is provided * in the file named "LICENSE.txt" included with this software distribution * and also available online at <http://www.async.ece.utah.edu/ibiosim/License>. * *******************************************************************************/ // $ANTLR 3.4 /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g 2013-06-26 17:00:36 package edu.utah.ece.async.lema.verification.platu.platuLpn.io; import org.antlr.runtime.*; import java.util.Stack; import java.util.List; import java.util.ArrayList; @SuppressWarnings({"all", "warnings", "unchecked"}) public class PlatuGrammarLexer extends Lexer { public static final int EOF=-1; public static final int T__57=57; public static final int T__58=58; public static final int T__59=59; public static final int T__60=60; public static final int T__61=61; public static final int T__62=62; public static final int T__63=63; public static final int AND=4; public static final int BITWISE_AND=5; public static final int BITWISE_LSHIFT=6; public static final int BITWISE_NEGATION=7; public static final int BITWISE_OR=8; public static final int BITWISE_RSHIFT=9; public static final int BITWISE_XOR=10; public static final int COLON=11; public static final int COMMA=12; public static final int COMMENT=13; public static final int DIGIT=14; public static final int DIV=15; public static final int EQUALS=16; public static final int EQUIV=17; public static final int FALSE=18; public static final int GREATER=19; public static final int GREATER_EQUAL=20; public static final int ID=21; public static final int IGNORE=22; public static final int IMPLICATION=23; public static final int INPUT=24; public static final int INT=25; public static final int INTERNAL=26; public static final int LABEL=27; public static final int LESS=28; public static final int LESS_EQUAL=29; public static final int LETTER=30; public static final int LPAREN=31; public static final int MARKING=32; public static final int MINUS=33; public static final int MOD=34; public static final int MODULE=35; public static final int MULTILINECOMMENT=36; public static final int NAME=37; public static final int NEGATION=38; public static final int NOT_EQUIV=39; public static final int OR=40; public static final int OUTPUT=41; public static final int PERIOD=42; public static final int PLUS=43; public static final int POSTSET=44; public static final int PRESET=45; public static final int QMARK=46; public static final int QUOTE=47; public static final int RPAREN=48; public static final int SEMICOLON=49; public static final int STATE_VECTOR=50; public static final int TIMES=51; public static final int TRANSITION=52; public static final int TRUE=53; public static final int UNDERSCORE=54; public static final int WS=55; public static final int XMLCOMMENT=56; // delegates // delegators public Lexer[] getDelegates() { return new Lexer[] {}; } public PlatuGrammarLexer() {} public PlatuGrammarLexer(CharStream input) { this(input, new RecognizerSharedState()); } public PlatuGrammarLexer(CharStream input, RecognizerSharedState state) { super(input,state); } public String getGrammarFileName() { return "/Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g"; } // $ANTLR start "T__57" public final void mT__57() throws RecognitionException { try { int _type = T__57; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:11:7: ( '[' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:11:9: '[' { match('['); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "T__57" // $ANTLR start "T__58" public final void mT__58() throws RecognitionException { try { int _type = T__58; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:12:7: ( ']' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:12:9: ']' { match(']'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "T__58" // $ANTLR start "T__59" public final void mT__59() throws RecognitionException { try { int _type = T__59; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:13:7: ( 'assert' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:13:9: 'assert' { match("assert"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "T__59" // $ANTLR start "T__60" public final void mT__60() throws RecognitionException { try { int _type = T__60; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:14:7: ( 'const' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:14:9: 'const' { match("const"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "T__60" // $ANTLR start "T__61" public final void mT__61() throws RecognitionException { try { int _type = T__61; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:15:7: ( 'inf' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:15:9: 'inf' { match("inf"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "T__61" // $ANTLR start "T__62" public final void mT__62() throws RecognitionException { try { int _type = T__62; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:16:7: ( 'inst' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:16:9: 'inst' { match("inst"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "T__62" // $ANTLR start "T__63" public final void mT__63() throws RecognitionException { try { int _type = T__63; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:17:7: ( 'main' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:17:9: 'main' { match("main"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "T__63" // $ANTLR start "LPAREN" public final void mLPAREN() throws RecognitionException { try { int _type = LPAREN; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1343:7: ( '(' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1343:9: '(' { match('('); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "LPAREN" // $ANTLR start "RPAREN" public final void mRPAREN() throws RecognitionException { try { int _type = RPAREN; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1344:7: ( ')' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1344:9: ')' { match(')'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "RPAREN" // $ANTLR start "QMARK" public final void mQMARK() throws RecognitionException { try { int _type = QMARK; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1345:6: ( '?' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1345:8: '?' { match('?'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "QMARK" // $ANTLR start "COLON" public final void mCOLON() throws RecognitionException { try { int _type = COLON; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1346:6: ( ':' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1346:8: ':' { match(':'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "COLON" // $ANTLR start "SEMICOLON" public final void mSEMICOLON() throws RecognitionException { try { int _type = SEMICOLON; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1347:10: ( ';' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1347:12: ';' { match(';'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "SEMICOLON" // $ANTLR start "PERIOD" public final void mPERIOD() throws RecognitionException { try { int _type = PERIOD; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1348:7: ( '.' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1348:9: '.' { match('.'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "PERIOD" // $ANTLR start "UNDERSCORE" public final void mUNDERSCORE() throws RecognitionException { try { int _type = UNDERSCORE; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1349:11: ( '_' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1349:13: '_' { match('_'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "UNDERSCORE" // $ANTLR start "COMMA" public final void mCOMMA() throws RecognitionException { try { int _type = COMMA; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1350:6: ( ',' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1350:8: ',' { match(','); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "COMMA" // $ANTLR start "QUOTE" public final void mQUOTE() throws RecognitionException { try { int _type = QUOTE; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1351:6: ( '\"' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1351:8: '\"' { match('\"'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "QUOTE" // $ANTLR start "MODULE" public final void mMODULE() throws RecognitionException { try { int _type = MODULE; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1354:7: ( 'mod' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1354:9: 'mod' { match("mod"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "MODULE" // $ANTLR start "NAME" public final void mNAME() throws RecognitionException { try { int _type = NAME; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1355:5: ( 'name' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1355:7: 'name' { match("name"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "NAME" // $ANTLR start "INPUT" public final void mINPUT() throws RecognitionException { try { int _type = INPUT; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1356:6: ( 'input' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1356:8: 'input' { match("input"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "INPUT" // $ANTLR start "OUTPUT" public final void mOUTPUT() throws RecognitionException { try { int _type = OUTPUT; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1357:7: ( 'output' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1357:9: 'output' { match("output"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "OUTPUT" // $ANTLR start "INTERNAL" public final void mINTERNAL() throws RecognitionException { try { int _type = INTERNAL; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1358:9: ( 'var' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1358:11: 'var' { match("var"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "INTERNAL" // $ANTLR start "MARKING" public final void mMARKING() throws RecognitionException { try { int _type = MARKING; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1359:8: ( 'marking' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1359:10: 'marking' { match("marking"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "MARKING" // $ANTLR start "STATE_VECTOR" public final void mSTATE_VECTOR() throws RecognitionException { try { int _type = STATE_VECTOR; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1360:13: ( 'statevector' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1360:15: 'statevector' { match("statevector"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "STATE_VECTOR" // $ANTLR start "TRANSITION" public final void mTRANSITION() throws RecognitionException { try { int _type = TRANSITION; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1361:11: ( 'transition' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1361:13: 'transition' { match("transition"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "TRANSITION" // $ANTLR start "LABEL" public final void mLABEL() throws RecognitionException { try { int _type = LABEL; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1362:6: ( 'label' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1362:8: 'label' { match("label"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "LABEL" // $ANTLR start "PRESET" public final void mPRESET() throws RecognitionException { try { int _type = PRESET; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1363:7: ( 'preset' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1363:9: 'preset' { match("preset"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "PRESET" // $ANTLR start "POSTSET" public final void mPOSTSET() throws RecognitionException { try { int _type = POSTSET; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1364:8: ( 'postset' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1364:10: 'postset' { match("postset"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "POSTSET" // $ANTLR start "TRUE" public final void mTRUE() throws RecognitionException { try { int _type = TRUE; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1365:5: ( 'true' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1365:7: 'true' { match("true"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "TRUE" // $ANTLR start "FALSE" public final void mFALSE() throws RecognitionException { try { int _type = FALSE; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1366:6: ( 'false' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1366:8: 'false' { match("false"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "FALSE" // $ANTLR start "PLUS" public final void mPLUS() throws RecognitionException { try { int _type = PLUS; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1369:5: ( '+' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1369:7: '+' { match('+'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "PLUS" // $ANTLR start "MINUS" public final void mMINUS() throws RecognitionException { try { int _type = MINUS; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1370:6: ( '-' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1370:8: '-' { match('-'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "MINUS" // $ANTLR start "TIMES" public final void mTIMES() throws RecognitionException { try { int _type = TIMES; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1371:6: ( '*' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1371:8: '*' { match('*'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "TIMES" // $ANTLR start "DIV" public final void mDIV() throws RecognitionException { try { int _type = DIV; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1372:4: ( '/' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1372:6: '/' { match('/'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "DIV" // $ANTLR start "MOD" public final void mMOD() throws RecognitionException { try { int _type = MOD; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1373:4: ( '%' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1373:6: '%' { match('%'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "MOD" // $ANTLR start "EQUALS" public final void mEQUALS() throws RecognitionException { try { int _type = EQUALS; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1374:7: ( '=' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1374:9: '=' { match('='); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "EQUALS" // $ANTLR start "GREATER" public final void mGREATER() throws RecognitionException { try { int _type = GREATER; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1377:8: ( '>' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1377:10: '>' { match('>'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "GREATER" // $ANTLR start "LESS" public final void mLESS() throws RecognitionException { try { int _type = LESS; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1378:5: ( '<' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1378:7: '<' { match('<'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "LESS" // $ANTLR start "GREATER_EQUAL" public final void mGREATER_EQUAL() throws RecognitionException { try { int _type = GREATER_EQUAL; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1379:14: ( '>=' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1379:16: '>=' { match(">="); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "GREATER_EQUAL" // $ANTLR start "LESS_EQUAL" public final void mLESS_EQUAL() throws RecognitionException { try { int _type = LESS_EQUAL; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1380:11: ( '<=' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1380:13: '<=' { match("<="); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "LESS_EQUAL" // $ANTLR start "EQUIV" public final void mEQUIV() throws RecognitionException { try { int _type = EQUIV; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1381:6: ( '==' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1381:8: '==' { match("=="); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "EQUIV" // $ANTLR start "NOT_EQUIV" public final void mNOT_EQUIV() throws RecognitionException { try { int _type = NOT_EQUIV; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1382:10: ( '!=' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1382:12: '!=' { match("!="); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "NOT_EQUIV" // $ANTLR start "NEGATION" public final void mNEGATION() throws RecognitionException { try { int _type = NEGATION; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1385:9: ( '!' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1385:11: '!' { match('!'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "NEGATION" // $ANTLR start "AND" public final void mAND() throws RecognitionException { try { int _type = AND; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1386:4: ( '&&' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1386:6: '&&' { match("&&"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "AND" // $ANTLR start "OR" public final void mOR() throws RecognitionException { try { int _type = OR; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1387:3: ( '||' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1387:5: '||' { match("||"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "OR" // $ANTLR start "IMPLICATION" public final void mIMPLICATION() throws RecognitionException { try { int _type = IMPLICATION; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1388:12: ( '->' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1388:14: '->' { match("->"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "IMPLICATION" // $ANTLR start "BITWISE_NEGATION" public final void mBITWISE_NEGATION() throws RecognitionException { try { int _type = BITWISE_NEGATION; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1391:17: ( '~' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1391:19: '~' { match('~'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "BITWISE_NEGATION" // $ANTLR start "BITWISE_AND" public final void mBITWISE_AND() throws RecognitionException { try { int _type = BITWISE_AND; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1392:12: ( '&' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1392:14: '&' { match('&'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "BITWISE_AND" // $ANTLR start "BITWISE_OR" public final void mBITWISE_OR() throws RecognitionException { try { int _type = BITWISE_OR; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1393:11: ( '|' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1393:13: '|' { match('|'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "BITWISE_OR" // $ANTLR start "BITWISE_XOR" public final void mBITWISE_XOR() throws RecognitionException { try { int _type = BITWISE_XOR; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1394:12: ( '^' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1394:14: '^' { match('^'); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "BITWISE_XOR" // $ANTLR start "BITWISE_LSHIFT" public final void mBITWISE_LSHIFT() throws RecognitionException { try { int _type = BITWISE_LSHIFT; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1395:15: ( '<<' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1395:17: '<<' { match("<<"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "BITWISE_LSHIFT" // $ANTLR start "BITWISE_RSHIFT" public final void mBITWISE_RSHIFT() throws RecognitionException { try { int _type = BITWISE_RSHIFT; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1396:15: ( '>>' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1396:17: '>>' { match(">>"); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "BITWISE_RSHIFT" // $ANTLR start "LETTER" public final void mLETTER() throws RecognitionException { try { // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1398:16: ( ( 'a' .. 'z' | 'A' .. 'Z' ) ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g: { if ( (input.LA(1) >= 'A' && input.LA(1) <= 'Z')||(input.LA(1) >= 'a' && input.LA(1) <= 'z') ) { input.consume(); } else { MismatchedSetException mse = new MismatchedSetException(null,input); recover(mse); throw mse; } } } finally { // do for sure before leaving } } // $ANTLR end "LETTER" // $ANTLR start "DIGIT" public final void mDIGIT() throws RecognitionException { try { // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1399:15: ( '0' .. '9' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g: { if ( (input.LA(1) >= '0' && input.LA(1) <= '9') ) { input.consume(); } else { MismatchedSetException mse = new MismatchedSetException(null,input); recover(mse); throw mse; } } } finally { // do for sure before leaving } } // $ANTLR end "DIGIT" // $ANTLR start "INT" public final void mINT() throws RecognitionException { try { int _type = INT; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1400:4: ( ( '-' )? ( DIGIT )+ ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1400:6: ( '-' )? ( DIGIT )+ { // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1400:6: ( '-' )? int alt1=2; int LA1_0 = input.LA(1); if ( (LA1_0=='-') ) { alt1=1; } switch (alt1) { case 1 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1400:6: '-' { match('-'); } break; } // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1400:11: ( DIGIT )+ int cnt2=0; loop2: do { int alt2=2; int LA2_0 = input.LA(1); if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) { alt2=1; } switch (alt2) { case 1 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g: { if ( (input.LA(1) >= '0' && input.LA(1) <= '9') ) { input.consume(); } else { MismatchedSetException mse = new MismatchedSetException(null,input); recover(mse); throw mse; } } break; default : if ( cnt2 >= 1 ) break loop2; EarlyExitException eee = new EarlyExitException(2, input); throw eee; } cnt2++; } while (true); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "INT" // $ANTLR start "ID" public final void mID() throws RecognitionException { try { int _type = ID; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1401:3: ( LETTER ( ( UNDERSCORE | PERIOD )? ( LETTER | DIGIT ) )* ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1401:5: LETTER ( ( UNDERSCORE | PERIOD )? ( LETTER | DIGIT ) )* { mLETTER(); // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1401:12: ( ( UNDERSCORE | PERIOD )? ( LETTER | DIGIT ) )* loop4: do { int alt4=2; int LA4_0 = input.LA(1); if ( (LA4_0=='.'||(LA4_0 >= '0' && LA4_0 <= '9')||(LA4_0 >= 'A' && LA4_0 <= 'Z')||LA4_0=='_'||(LA4_0 >= 'a' && LA4_0 <= 'z')) ) { alt4=1; } switch (alt4) { case 1 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1401:13: ( UNDERSCORE | PERIOD )? ( LETTER | DIGIT ) { // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1401:13: ( UNDERSCORE | PERIOD )? int alt3=2; int LA3_0 = input.LA(1); if ( (LA3_0=='.'||LA3_0=='_') ) { alt3=1; } switch (alt3) { case 1 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g: { if ( input.LA(1)=='.'||input.LA(1)=='_' ) { input.consume(); } else { MismatchedSetException mse = new MismatchedSetException(null,input); recover(mse); throw mse; } } break; } if ( (input.LA(1) >= '0' && input.LA(1) <= '9')||(input.LA(1) >= 'A' && input.LA(1) <= 'Z')||(input.LA(1) >= 'a' && input.LA(1) <= 'z') ) { input.consume(); } else { MismatchedSetException mse = new MismatchedSetException(null,input); recover(mse); throw mse; } } break; default : break loop4; } } while (true); } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "ID" // $ANTLR start "WS" public final void mWS() throws RecognitionException { try { int _type = WS; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1402:3: ( ( ' ' | '\\t' | '\\n' | '\\r' | '\\f' )+ ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1402:5: ( ' ' | '\\t' | '\\n' | '\\r' | '\\f' )+ { // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1402:5: ( ' ' | '\\t' | '\\n' | '\\r' | '\\f' )+ int cnt5=0; loop5: do { int alt5=2; int LA5_0 = input.LA(1); if ( ((LA5_0 >= '\t' && LA5_0 <= '\n')||(LA5_0 >= '\f' && LA5_0 <= '\r')||LA5_0==' ') ) { alt5=1; } switch (alt5) { case 1 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g: { if ( (input.LA(1) >= '\t' && input.LA(1) <= '\n')||(input.LA(1) >= '\f' && input.LA(1) <= '\r')||input.LA(1)==' ' ) { input.consume(); } else { MismatchedSetException mse = new MismatchedSetException(null,input); recover(mse); throw mse; } } break; default : if ( cnt5 >= 1 ) break loop5; EarlyExitException eee = new EarlyExitException(5, input); throw eee; } cnt5++; } while (true); _channel = HIDDEN; } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "WS" // $ANTLR start "COMMENT" public final void mCOMMENT() throws RecognitionException { try { int _type = COMMENT; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1403:8: ( '//' ( . )* ( '\\n' | '\\r' ) ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1403:10: '//' ( . )* ( '\\n' | '\\r' ) { match("//"); // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1403:15: ( . )* loop6: do { int alt6=2; int LA6_0 = input.LA(1); if ( (LA6_0=='\n'||LA6_0=='\r') ) { alt6=2; } else if ( ((LA6_0 >= '\u0000' && LA6_0 <= '\t')||(LA6_0 >= '\u000B' && LA6_0 <= '\f')||(LA6_0 >= '\u000E' && LA6_0 <= '\uFFFF')) ) { alt6=1; } switch (alt6) { case 1 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1403:15: . { matchAny(); } break; default : break loop6; } } while (true); if ( input.LA(1)=='\n'||input.LA(1)=='\r' ) { input.consume(); } else { MismatchedSetException mse = new MismatchedSetException(null,input); recover(mse); throw mse; } _channel = HIDDEN; } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "COMMENT" // $ANTLR start "MULTILINECOMMENT" public final void mMULTILINECOMMENT() throws RecognitionException { try { int _type = MULTILINECOMMENT; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1404:17: ( '/*' ( . )* '*/' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1404:19: '/*' ( . )* '*/' { match("/*"); // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1404:24: ( . )* loop7: do { int alt7=2; int LA7_0 = input.LA(1); if ( (LA7_0=='*') ) { int LA7_1 = input.LA(2); if ( (LA7_1=='/') ) { alt7=2; } else if ( ((LA7_1 >= '\u0000' && LA7_1 <= '.')||(LA7_1 >= '0' && LA7_1 <= '\uFFFF')) ) { alt7=1; } } else if ( ((LA7_0 >= '\u0000' && LA7_0 <= ')')||(LA7_0 >= '+' && LA7_0 <= '\uFFFF')) ) { alt7=1; } switch (alt7) { case 1 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1404:24: . { matchAny(); } break; default : break loop7; } } while (true); match("*/"); _channel = HIDDEN; } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "MULTILINECOMMENT" // $ANTLR start "XMLCOMMENT" public final void mXMLCOMMENT() throws RecognitionException { try { int _type = XMLCOMMENT; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:11: ( ( '<' '!' '-' '-' ) ( . )* ( '-' '-' '>' ) ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:13: ( '<' '!' '-' '-' ) ( . )* ( '-' '-' '>' ) { // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:13: ( '<' '!' '-' '-' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:14: '<' '!' '-' '-' { match('<'); match('!'); match('-'); match('-'); } // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:31: ( . )* loop8: do { int alt8=2; int LA8_0 = input.LA(1); if ( (LA8_0=='-') ) { int LA8_1 = input.LA(2); if ( (LA8_1=='-') ) { int LA8_3 = input.LA(3); if ( (LA8_3=='>') ) { alt8=2; } else if ( ((LA8_3 >= '\u0000' && LA8_3 <= '=')||(LA8_3 >= '?' && LA8_3 <= '\uFFFF')) ) { alt8=1; } } else if ( ((LA8_1 >= '\u0000' && LA8_1 <= ',')||(LA8_1 >= '.' && LA8_1 <= '\uFFFF')) ) { alt8=1; } } else if ( ((LA8_0 >= '\u0000' && LA8_0 <= ',')||(LA8_0 >= '.' && LA8_0 <= '\uFFFF')) ) { alt8=1; } switch (alt8) { case 1 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:31: . { matchAny(); } break; default : break loop8; } } while (true); // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:34: ( '-' '-' '>' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:35: '-' '-' '>' { match('-'); match('-'); match('>'); } _channel = HIDDEN; } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "XMLCOMMENT" // $ANTLR start "IGNORE" public final void mIGNORE() throws RecognitionException { try { int _type = IGNORE; int _channel = DEFAULT_TOKEN_CHANNEL; // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1406:7: ( '<' '?' ( . )* '?' '>' ) // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1406:9: '<' '?' ( . )* '?' '>' { match('<'); match('?'); // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1406:17: ( . )* loop9: do { int alt9=2; int LA9_0 = input.LA(1); if ( (LA9_0=='?') ) { int LA9_1 = input.LA(2); if ( (LA9_1=='>') ) { alt9=2; } else if ( ((LA9_1 >= '\u0000' && LA9_1 <= '=')||(LA9_1 >= '?' && LA9_1 <= '\uFFFF')) ) { alt9=1; } } else if ( ((LA9_0 >= '\u0000' && LA9_0 <= '>')||(LA9_0 >= '@' && LA9_0 <= '\uFFFF')) ) { alt9=1; } switch (alt9) { case 1 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1406:17: . { matchAny(); } break; default : break loop9; } } while (true); match('?'); match('>'); _channel = HIDDEN; } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } } // $ANTLR end "IGNORE" public void mTokens() throws RecognitionException { // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:8: ( T__57 | T__58 | T__59 | T__60 | T__61 | T__62 | T__63 | LPAREN | RPAREN | QMARK | COLON | SEMICOLON | PERIOD | UNDERSCORE | COMMA | QUOTE | MODULE | NAME | INPUT | OUTPUT | INTERNAL | MARKING | STATE_VECTOR | TRANSITION | LABEL | PRESET | POSTSET | TRUE | FALSE | PLUS | MINUS | TIMES | DIV | MOD | EQUALS | GREATER | LESS | GREATER_EQUAL | LESS_EQUAL | EQUIV | NOT_EQUIV | NEGATION | AND | OR | IMPLICATION | BITWISE_NEGATION | BITWISE_AND | BITWISE_OR | BITWISE_XOR | BITWISE_LSHIFT | BITWISE_RSHIFT | INT | ID | WS | COMMENT | MULTILINECOMMENT | XMLCOMMENT | IGNORE ) int alt10=58; alt10 = dfa10.predict(input); switch (alt10) { case 1 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:10: T__57 { mT__57(); } break; case 2 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:16: T__58 { mT__58(); } break; case 3 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:22: T__59 { mT__59(); } break; case 4 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:28: T__60 { mT__60(); } break; case 5 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:34: T__61 { mT__61(); } break; case 6 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:40: T__62 { mT__62(); } break; case 7 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:46: T__63 { mT__63(); } break; case 8 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:52: LPAREN { mLPAREN(); } break; case 9 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:59: RPAREN { mRPAREN(); } break; case 10 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:66: QMARK { mQMARK(); } break; case 11 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:72: COLON { mCOLON(); } break; case 12 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:78: SEMICOLON { mSEMICOLON(); } break; case 13 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:88: PERIOD { mPERIOD(); } break; case 14 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:95: UNDERSCORE { mUNDERSCORE(); } break; case 15 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:106: COMMA { mCOMMA(); } break; case 16 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:112: QUOTE { mQUOTE(); } break; case 17 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:118: MODULE { mMODULE(); } break; case 18 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:125: NAME { mNAME(); } break; case 19 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:130: INPUT { mINPUT(); } break; case 20 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:136: OUTPUT { mOUTPUT(); } break; case 21 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:143: INTERNAL { mINTERNAL(); } break; case 22 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:152: MARKING { mMARKING(); } break; case 23 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:160: STATE_VECTOR { mSTATE_VECTOR(); } break; case 24 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:173: TRANSITION { mTRANSITION(); } break; case 25 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:184: LABEL { mLABEL(); } break; case 26 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:190: PRESET { mPRESET(); } break; case 27 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:197: POSTSET { mPOSTSET(); } break; case 28 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:205: TRUE { mTRUE(); } break; case 29 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:210: FALSE { mFALSE(); } break; case 30 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:216: PLUS { mPLUS(); } break; case 31 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:221: MINUS { mMINUS(); } break; case 32 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:227: TIMES { mTIMES(); } break; case 33 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:233: DIV { mDIV(); } break; case 34 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:237: MOD { mMOD(); } break; case 35 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:241: EQUALS { mEQUALS(); } break; case 36 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:248: GREATER { mGREATER(); } break; case 37 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:256: LESS { mLESS(); } break; case 38 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:261: GREATER_EQUAL { mGREATER_EQUAL(); } break; case 39 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:275: LESS_EQUAL { mLESS_EQUAL(); } break; case 40 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:286: EQUIV { mEQUIV(); } break; case 41 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:292: NOT_EQUIV { mNOT_EQUIV(); } break; case 42 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:302: NEGATION { mNEGATION(); } break; case 43 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:311: AND { mAND(); } break; case 44 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:315: OR { mOR(); } break; case 45 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:318: IMPLICATION { mIMPLICATION(); } break; case 46 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:330: BITWISE_NEGATION { mBITWISE_NEGATION(); } break; case 47 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:347: BITWISE_AND { mBITWISE_AND(); } break; case 48 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:359: BITWISE_OR { mBITWISE_OR(); } break; case 49 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:370: BITWISE_XOR { mBITWISE_XOR(); } break; case 50 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:382: BITWISE_LSHIFT { mBITWISE_LSHIFT(); } break; case 51 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:397: BITWISE_RSHIFT { mBITWISE_RSHIFT(); } break; case 52 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:412: INT { mINT(); } break; case 53 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:416: ID { mID(); } break; case 54 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:419: WS { mWS(); } break; case 55 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:422: COMMENT { mCOMMENT(); } break; case 56 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:430: MULTILINECOMMENT { mMULTILINECOMMENT(); } break; case 57 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:447: XMLCOMMENT { mXMLCOMMENT(); } break; case 58 : // /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:458: IGNORE { mIGNORE(); } break; } } protected DFA10 dfa10 = new DFA10(this); static final String DFA10_eotS = "\3\uffff\4\46\11\uffff\10\46\1\uffff\1\67\1\uffff\1\72\1\uffff\1"+ "\74\1\77\1\104\1\106\1\110\1\112\5\uffff\16\46\25\uffff\2\46\1\137"+ "\4\46\1\144\2\46\1\147\11\46\1\uffff\1\161\1\46\1\163\1\46\1\uffff"+ "\1\165\1\46\1\uffff\2\46\1\171\5\46\1\177\1\uffff\1\u0080\1\uffff"+ "\1\46\1\uffff\3\46\1\uffff\1\u0085\2\46\1\u0088\1\u0089\2\uffff"+ "\1\46\1\u008b\2\46\1\uffff\1\u008e\1\46\2\uffff\1\u0090\1\uffff"+ "\2\46\1\uffff\1\u0093\1\uffff\2\46\1\uffff\3\46\1\u0099\1\u009a"+ "\2\uffff"; static final String DFA10_eofS = "\u009b\uffff"; static final String DFA10_minS = "\1\11\2\uffff\1\163\1\157\1\156\1\141\11\uffff\1\141\1\165\1\141"+ "\1\164\1\162\1\141\1\157\1\141\1\uffff\1\60\1\uffff\1\52\1\uffff"+ "\2\75\1\41\1\75\1\46\1\174\5\uffff\1\163\1\156\1\146\1\151\1\144"+ "\1\155\1\164\1\162\2\141\1\142\1\145\1\163\1\154\25\uffff\1\145"+ "\1\163\1\56\1\164\1\165\1\156\1\153\1\56\1\145\1\160\1\56\1\164"+ "\1\156\2\145\1\163\1\164\1\163\1\162\1\164\1\uffff\1\56\1\164\1"+ "\56\1\151\1\uffff\1\56\1\165\1\uffff\1\145\1\163\1\56\1\154\1\145"+ "\1\163\1\145\1\164\1\56\1\uffff\1\56\1\uffff\1\156\1\uffff\1\164"+ "\1\166\1\151\1\uffff\1\56\1\164\1\145\2\56\2\uffff\1\147\1\56\1"+ "\145\1\164\1\uffff\1\56\1\164\2\uffff\1\56\1\uffff\1\143\1\151\1"+ "\uffff\1\56\1\uffff\1\164\1\157\1\uffff\1\157\1\156\1\162\2\56\2"+ "\uffff"; static final String DFA10_maxS = "\1\176\2\uffff\1\163\1\157\1\156\1\157\11\uffff\1\141\1\165\1\141"+ "\1\164\1\162\1\141\1\162\1\141\1\uffff\1\76\1\uffff\1\57\1\uffff"+ "\1\75\1\76\1\77\1\75\1\46\1\174\5\uffff\1\163\1\156\1\163\1\162"+ "\1\144\1\155\1\164\1\162\1\141\1\165\1\142\1\145\1\163\1\154\25"+ "\uffff\1\145\1\163\1\172\1\164\1\165\1\156\1\153\1\172\1\145\1\160"+ "\1\172\1\164\1\156\2\145\1\163\1\164\1\163\1\162\1\164\1\uffff\1"+ "\172\1\164\1\172\1\151\1\uffff\1\172\1\165\1\uffff\1\145\1\163\1"+ "\172\1\154\1\145\1\163\1\145\1\164\1\172\1\uffff\1\172\1\uffff\1"+ "\156\1\uffff\1\164\1\166\1\151\1\uffff\1\172\1\164\1\145\2\172\2"+ "\uffff\1\147\1\172\1\145\1\164\1\uffff\1\172\1\164\2\uffff\1\172"+ "\1\uffff\1\143\1\151\1\uffff\1\172\1\uffff\1\164\1\157\1\uffff\1"+ "\157\1\156\1\162\2\172\2\uffff"; static final String DFA10_acceptS = "\1\uffff\1\1\1\2\4\uffff\1\10\1\11\1\12\1\13\1\14\1\15\1\16\1\17"+ "\1\20\10\uffff\1\36\1\uffff\1\40\1\uffff\1\42\6\uffff\1\56\1\61"+ "\1\64\1\65\1\66\16\uffff\1\55\1\37\1\67\1\70\1\41\1\50\1\43\1\46"+ "\1\63\1\44\1\47\1\62\1\71\1\72\1\45\1\51\1\52\1\53\1\57\1\54\1\60"+ "\24\uffff\1\5\4\uffff\1\21\2\uffff\1\25\11\uffff\1\6\1\uffff\1\7"+ "\1\uffff\1\22\3\uffff\1\34\5\uffff\1\4\1\23\4\uffff\1\31\2\uffff"+ "\1\35\1\3\1\uffff\1\24\2\uffff\1\32\1\uffff\1\26\2\uffff\1\33\5"+ "\uffff\1\30\1\27"; static final String DFA10_specialS = "\u009b\uffff}>"; static final String[] DFA10_transitionS = { "\2\47\1\uffff\2\47\22\uffff\1\47\1\40\1\17\2\uffff\1\34\1\41"+ "\1\uffff\1\7\1\10\1\32\1\30\1\16\1\31\1\14\1\33\12\45\1\12\1"+ "\13\1\37\1\35\1\36\1\11\1\uffff\32\46\1\1\1\uffff\1\2\1\44\1"+ "\15\1\uffff\1\3\1\46\1\4\2\46\1\27\2\46\1\5\2\46\1\25\1\6\1"+ "\20\1\21\1\26\2\46\1\23\1\24\1\46\1\22\4\46\1\uffff\1\42\1\uffff"+ "\1\43", "", "", "\1\50", "\1\51", "\1\52", "\1\53\15\uffff\1\54", "", "", "", "", "", "", "", "", "", "\1\55", "\1\56", "\1\57", "\1\60", "\1\61", "\1\62", "\1\64\2\uffff\1\63", "\1\65", "", "\12\45\4\uffff\1\66", "", "\1\71\4\uffff\1\70", "", "\1\73", "\1\75\1\76", "\1\102\32\uffff\1\101\1\100\1\uffff\1\103", "\1\105", "\1\107", "\1\111", "", "", "", "", "", "\1\113", "\1\114", "\1\115\11\uffff\1\117\2\uffff\1\116", "\1\120\10\uffff\1\121", "\1\122", "\1\123", "\1\124", "\1\125", "\1\126", "\1\127\23\uffff\1\130", "\1\131", "\1\132", "\1\133", "\1\134", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "\1\135", "\1\136", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "\1\140", "\1\141", "\1\142", "\1\143", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "\1\145", "\1\146", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "\1\150", "\1\151", "\1\152", "\1\153", "\1\154", "\1\155", "\1\156", "\1\157", "\1\160", "", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "\1\162", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "\1\164", "", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "\1\166", "", "\1\167", "\1\170", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "\1\172", "\1\173", "\1\174", "\1\175", "\1\176", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "", "\1\u0081", "", "\1\u0082", "\1\u0083", "\1\u0084", "", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "\1\u0086", "\1\u0087", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "", "", "\1\u008a", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "\1\u008c", "\1\u008d", "", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "\1\u008f", "", "", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "", "\1\u0091", "\1\u0092", "", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "", "\1\u0094", "\1\u0095", "", "\1\u0096", "\1\u0097", "\1\u0098", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46", "", "" }; static final short[] DFA10_eot = DFA.unpackEncodedString(DFA10_eotS); static final short[] DFA10_eof = DFA.unpackEncodedString(DFA10_eofS); static final char[] DFA10_min = DFA.unpackEncodedStringToUnsignedChars(DFA10_minS); static final char[] DFA10_max = DFA.unpackEncodedStringToUnsignedChars(DFA10_maxS); static final short[] DFA10_accept = DFA.unpackEncodedString(DFA10_acceptS); static final short[] DFA10_special = DFA.unpackEncodedString(DFA10_specialS); static final short[][] DFA10_transition; static { int numStates = DFA10_transitionS.length; DFA10_transition = new short[numStates][]; for (int i=0; i<numStates; i++) { DFA10_transition[i] = DFA.unpackEncodedString(DFA10_transitionS[i]); } } class DFA10 extends DFA { public DFA10(BaseRecognizer recognizer) { this.recognizer = recognizer; this.decisionNumber = 10; this.eot = DFA10_eot; this.eof = DFA10_eof; this.min = DFA10_min; this.max = DFA10_max; this.accept = DFA10_accept; this.special = DFA10_special; this.transition = DFA10_transition; } public String getDescription() { return "1:1: Tokens : ( T__57 | T__58 | T__59 | T__60 | T__61 | T__62 | T__63 | LPAREN | RPAREN | QMARK | COLON | SEMICOLON | PERIOD | UNDERSCORE | COMMA | QUOTE | MODULE | NAME | INPUT | OUTPUT | INTERNAL | MARKING | STATE_VECTOR | TRANSITION | LABEL | PRESET | POSTSET | TRUE | FALSE | PLUS | MINUS | TIMES | DIV | MOD | EQUALS | GREATER | LESS | GREATER_EQUAL | LESS_EQUAL | EQUIV | NOT_EQUIV | NEGATION | AND | OR | IMPLICATION | BITWISE_NEGATION | BITWISE_AND | BITWISE_OR | BITWISE_XOR | BITWISE_LSHIFT | BITWISE_RSHIFT | INT | ID | WS | COMMENT | MULTILINECOMMENT | XMLCOMMENT | IGNORE );"; } } }
MyersResearchGroup/iBioSim
verification/src/main/java/edu/utah/ece/async/lema/verification/platu/platuLpn/io/PlatuGrammarLexer.java
Java
apache-2.0
77,940
/* * Copyright 2015 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package demo; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.context.annotation.ComponentScan; import transform.LoggingTransformer; @SpringBootApplication @ComponentScan(basePackageClasses=LoggingTransformer.class) public class TransformApplication { public static void main(String[] args) { SpringApplication.run(TransformApplication.class, args); } }
ericbottard/spring-cloud-stream
spring-cloud-stream-samples/transform/src/main/java/demo/TransformApplication.java
Java
apache-2.0
1,079
import json import random from datetime import datetime, timedelta import hashlib from django.http import HttpResponse, JsonResponse from django.shortcuts import render_to_response from django.template import loader from django.utils import encoding from core.grafana.GrafanaES import Grafana from core.grafana.QueryES import Query from core.grafana.data_tranformation import stacked_hist, pledges_merging from core.libs.cache import setCacheEntry, getCacheEntry from core.oauth.utils import login_customrequired from core.views import initRequest, DateTimeEncoder, DateEncoder colours_codes = { "0": "#AE3C51", "1": "#6298FF", "2": "#D97529", "3": "#009246", "AOD": "#006019", "Analysis": "#FF00FF", "CA": "#FF1F1F", "CAF processing": "#CAD141", "CERN": "#AE3C51", "Custodial": "#FF0000", "DE": "#000000", "DESD": "#4189FF", "DPD": "#FEF100", "Data Processing": "#FFFF00", "Data Processing (XP)": "#008800", "Default": "#808080", "ES": "#EDBF00", "ESD": "#001640", "Extra Production": "#FF0000", "FR": "#0055A5", "Group Analysis": "#808080", "Group Production": "#008800", "HITS": "#FF6666", "IT": "#009246", "MC Event Generation": "#356C20", "MC Production": "#0000FF", "MC Reconstruction": "#00006B", "MC Reconstruction (XP)": "#D97529", "MC Simulation": "#0000FF", "MC Simulation (XP)": "#AE3C51", "MC Simulation Fast": "#0099CC", "MC Simulation Fast (XP)": "#0099CC", "MC Simulation Full": "#00CCCC", "MC Simulation Full (XP)": "#00CCCC", "ND": "#6298FF", "NL": "#D97529", "Other": "#66008D", "Others": "#00FFFF", "Others (XP)": "#009246", "Primary": "#FFA500", "RAW": "#FF0000", "RU": "#66008D", "Rest": "#625D5D", "Secondary": "#00FFFF", "T0 processing": "#DB9900", "TW": "#89000F", "Testing": "#00FF00", "ToBeDeleted": "#FFFF00", "UK": "#356C20", "UNKNOWN": "#FFA500", "US": "#00006B", "User Analysis": "#FF00FF", "Validation": "#000000", "analysis": "#FF0000", "bstream": "#0055A5", "cancelled": "#FF9933", "closed": "#808080", "evgen": "#D97529", "evgentx": "#AE3C51", "failed": "#bf1b00", "filter": "#DB9900", "finished": "#248F24", "ganga": "#1433CC", "gangarobot": "#006666", "gangarobot-64": "#009999", "gangarobot-filestager": "#00CCCC", "gangarobot-new": "#00FFFF", "gangarobot-nightly": "#99FF00", "gangarobot-pft": "#99CC33", "gangarobot-pft-trial": "#999966", "gangarobot-rctest": "#996699", "gangarobot-root": "#CC0000", "gangarobot-squid": "#CC0066", "gangarobotnew": "#CC3399", "hammercloud": "#A5D3CA", "merge": "#FFA600", "merging": "#47D147", "non-panda_analysis": "#CCCCCC", "pandamover": "#FFE920", "pile": "#FF00FF", "prod_test": "#B4D1B6", "production": "#CAD141", "ptest": "#89C7FF", "rc_test": "#A5FF8A", "reco": "#00006B", "reprocessing": "#008800", "running": "#47D147", "simul": "#0000FF", "software": "#FFCFA4s", "t0_caf": "#CAD141", "t0_processing": "#FFA600", "test": "#00FF00", "transfering": "#47D147", "txtgen": "#29AFD6", "validation": "#000000" } @login_customrequired def index(request): """The main page containing drop-down menus to select group by options etc. Data delivers asynchroniously by request to grafana_api view""" valid, response = initRequest(request) # all possible group by options and plots to build group_by = {'dst_federation': 'Federation'} split_series = {'adcactivity': 'ADC Activity', 'jobstatus': 'Job status'} plots = {'cpuconsumption': 'CPU Consumption', 'wallclockhepspec06': 'WallClock HEPSPEC06'} data = { 'group_by': group_by, 'split_series': split_series, 'plots': plots, } response = render_to_response('grafana-api-plots.html', data, content_type='text/html') return response def chartjs(request): """The main page containing drop-down menus to select group by options etc. Data delivers asynchroniously by request to grafana_api view""" valid, response = initRequest(request) # all possible group by options and plots to build group_by = {'dst_federation': 'Federation'} split_series = {'adcactivity': 'ADC Activity', 'jobstatus': 'Job status'} plots = {'cpuconsumption': 'CPU Consumption', 'wallclockhepspec06': 'WallClock HEPSPEC06'} data = { 'group_by': group_by, 'split_series': split_series, 'plots': plots, } response = render_to_response('grafana-chartjs-plots.html', data, content_type='text/html') return response def grafana_api(request): valid, response = initRequest(request) group_by = None split_series = None if 'groupby' in request.session['requestParams']: groupby_params = request.session['requestParams']['groupby'].split(',') if 'time' in groupby_params: pass else: group_by = groupby_params[0] if len(groupby_params) > 1: split_series = groupby_params[1] result = [] q = Query() q = q.request_to_query(request) last_pledges = Query(agg_func='last', table='pledges_last', field='value', grouping='real_federation') # / api / datasources / proxy / 9267 / query?db = monit_production_rebus # sum_pledges = Query(agg_func='sum', table='pledges', field='atlas', grouping='time(1m),real_federation') try: if q.table == 'pledges_last' or q.table == 'pledges_sum' or q.table == 'pledges_hs06sec': result = Grafana(database='monit_production_rebus').get_data(q) else: result = Grafana().get_data(q) # last_pledges = Grafana().get_data(last_pledges) if 'type' in request.session['requestParams'] and request.session['requestParams']['type'] == 'd3js': data = stacked_hist(result['results'][0]['series'], group_by, split_series) return JsonResponse(data) if 'type' in request.session['requestParams'] and request.session['requestParams']['type'] == 'chartjs': last_pledges = Grafana(database='monit_production_rebus').get_data(last_pledges) data = {} data = stacked_hist(result['results'][0]['series'], group_by, split_series) last_pledges = stacked_hist(last_pledges['results'][0]['series'], 'real_federation') lables = list(data.keys()) pledges_keys = list(last_pledges.keys()) datasets = [] elements = {} for object in data: for element in data[object]: elements.setdefault(element, []).append(data[object][element]) if object in pledges_keys: elements.setdefault('pledges', []).append(last_pledges[object]['all'] * 7 * 24 * 60 * 60) else: elements.setdefault('pledges', []).append(0) background = '' for key in elements: if key in colours_codes: background = colours_codes[key] else: r = lambda: random.randint(0, 255) background = '#%02X%02X%02X' % (r(), r(), r()) if key != 'pledges': datasets.append( {'label': key, 'stack': 'Stack 0', 'data': elements[key], 'backgroundColor': background}) else: datasets.append( {'label': key, 'stack': 'Stack 1', 'data': elements[key], 'backgroundColor': '#FF0000'}) data = {'labels': lables, 'datasets': datasets} return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json') if 'export' in request.session['requestParams']: if request.session['requestParams']['export'] == 'csv': data = stacked_hist(result['results'][0]['series'], group_by, split_series) import csv import copy response = HttpResponse(content_type='text/csv') column_titles = copy.deepcopy(groupby_params) column_titles.append('value') response['Content-Disposition'] = 'attachment; filename={0}.csv'.format('_'.join(groupby_params)) writer = csv.writer(response, delimiter=";") writer.writerow(column_titles) csvList = [] if len(groupby_params) > 1: csvList = grab_children(data) else: for key, value in data.items(): csvList.append([key, value['all']]) writer.writerows(csvList) return response except Exception as ex: result.append(ex) return JsonResponse(result) def grab_children(data, parent=None, child=None): if child is None: child = [] for key, value in data.items(): if isinstance(value, dict): grab_children(value, key, child) else: child.append([parent, key, value]) return child #@login_customrequired def pledges(request): valid, response = initRequest(request) if 'date_from' in request.session['requestParams'] and 'date_to' in request.session['requestParams']: starttime = request.session['requestParams']['date_from'] endtime = request.session['requestParams']['date_to'] date_to = datetime.strptime(endtime, "%d.%m.%Y %H:%M:%S") date_from = datetime.strptime(starttime, "%d.%m.%Y %H:%M:%S") total_seconds = (date_to - date_from).total_seconds() total_days = (date_to - date_from).days date_list = [] if (date_to - date_from).days > 30: n = 20 while True: start_date = date_from end_date = (start_date + timedelta(days=n)) end_date = end_date - timedelta(minutes=1) if end_date >= date_to: end_date = date_to - timedelta(minutes=1) date_list.append([start_date.strftime("%d.%m.%Y %H:%M:%S"), end_date.strftime("%d.%m.%Y %H:%M:%S")]) break else: date_list.append([start_date.strftime("%d.%m.%Y %H:%M:%S"), end_date.strftime("%d.%m.%Y %H:%M:%S")]) date_from = end_date + timedelta(minutes=1) else: newendtime = (date_to - timedelta(minutes=1)).strftime("%d.%m.%Y %H:%M:%S") date_list.append([starttime, newendtime]) else: timebefore = timedelta(days=7) endtime = (datetime.utcnow()).replace(minute=00, hour=00, second=00, microsecond=000) starttime = (endtime - timebefore).replace(minute=00, hour=00, second=00, microsecond=000) total_seconds = (starttime - endtime).total_seconds() total_days = (endtime - starttime).days endtime = endtime - timedelta(minutes=1) endtime = endtime.strftime("%d.%m.%Y %H:%M:%S") starttime = starttime.strftime("%d.%m.%Y %H:%M:%S") if 'type' in request.session['requestParams'] and request.session['requestParams'] \ ['type'] == 'federation': key = hashlib.md5(encoding.force_bytes("{0}_{1}_federation".format(starttime, endtime))) key = key.hexdigest() federations = getCacheEntry(request, key, isData=True) if federations is not None: federations = json.loads(federations) return HttpResponse(json.dumps(federations), content_type='text/json') pledges_dict = {} pledges_list = [] federations_info = {} if len(date_list) > 1: for date in date_list: hs06sec = Query(agg_func='sum', table='completed', field=['sum_hs06sec','sum_count', 'sum_cpuconsumptiontime','sum_walltime'], grouping='time,dst_federation,dst_tier,dst_experiment_site,computingsite', starttime=date[0], endtime=date[1]) hs06sec = Grafana().get_data(hs06sec) pledges_sum = Query(agg_func='mean', table='pledges_hs06sec', field='value', grouping='time,real_federation,tier', starttime=date[0], endtime=date[1]) pledges_sum = Grafana(database='monit_production_rebus').get_data(pledges_sum) pledges_dict, federations_info = pledges_merging(hs06sec, pledges_sum, total_seconds, pledges_dict, federations_info) else: hs06sec = Query(agg_func='sum', table='completed', field=['sum_hs06sec','sum_count', 'sum_cpuconsumptiontime','sum_walltime'], grouping='time,dst_federation,dst_tier,dst_experiment_site,computingsite', starttime=date_list[0][0], endtime=date_list[0][1]) hs06sec = Grafana().get_data(hs06sec) pledges_sum = Query(agg_func='mean', table='pledges_hs06sec', field='value', grouping='time,real_federation,tier', starttime=date_list[0][0], endtime=date_list[0][1]) pledges_sum = Grafana(database='monit_production_rebus').get_data(pledges_sum) pledges_dict, federations_info = pledges_merging(hs06sec, pledges_sum, total_seconds, pledges_dict, federations_info) for pledges in pledges_dict: if pledges == 'NULL': continue else: # pledges_list.append( # {type: pledges, "hs06sec": pledges_dict[pledges]['hs06sec'], # 'pledges': pledges_dict[pledges]['pledges']}) pledges_list.append({"dst_federation": pledges, "hs06sec": int(round(float(pledges_dict[pledges]['hs06sec']) / 86400, 2)), 'pledges': int(round(float(pledges_dict[pledges]['pledges']) / 86400, 2)), 'tier': pledges_dict[pledges]['tier'], 'federation_info': federations_info[pledges] if pledges in federations_info else None} ) setCacheEntry(request, key, json.dumps(pledges_list), 60 * 60 * 24 * 30, isData=True) return HttpResponse(json.dumps(pledges_list), content_type='text/json') elif 'type' in request.session['requestParams'] and request.session['requestParams'] \ ['type'] == 'country': key = hashlib.md5(encoding.force_bytes("{0}_{1}_country".format(starttime, endtime))) key = key.hexdigest() countries = getCacheEntry(request, key, isData=True) if countries is not None: countries = json.loads(countries) return HttpResponse(json.dumps(countries), content_type='text/json') federations_info = {} pledges_dict = {} pledges_list = [] if len(date_list) > 1: for date in date_list: hs06sec = Query(agg_func='sum', table='completed', field='sum_hs06sec', grouping='time,dst_federation,dst_country', starttime=date[0], endtime=date[1]) hs06sec = Grafana().get_data(hs06sec) pledges_sum = Query(agg_func='mean', table='pledges_hs06sec', field='value', grouping='time,real_federation,country', starttime=date[0], endtime=date[1]) pledges_sum = Grafana(database='monit_production_rebus').get_data(pledges_sum) pledges_dict = pledges_merging(hs06sec, pledges_sum, total_seconds, pledges_dict, federations_info, type='dst_country') else: hs06sec = Query(agg_func='sum', table='completed', field='sum_hs06sec', grouping='time,dst_federation,dst_country', starttime=date_list[0][0], endtime=date_list[0][1]) hs06sec = Grafana().get_data(hs06sec) pledges_sum = Query(agg_func='mean', table='pledges_hs06sec', field='value', grouping='time,real_federation,country', starttime=date_list[0][0], endtime=date_list[0][1]) pledges_sum = Grafana(database='monit_production_rebus').get_data(pledges_sum) pledges_dict = pledges_merging(hs06sec, pledges_sum, total_seconds, federations_info, pledges_dict, type='dst_country') for pledges in pledges_dict: if pledges == 'NULL': continue else: pledges_list.append( {"dst_country": pledges, "hs06sec": int(round(float(pledges_dict[pledges]['hs06sec']) / 86400, 2)), 'pledges': int(round(float(pledges_dict[pledges]['pledges']) / 86400, 2))}) setCacheEntry(request, key, json.dumps(pledges_list), 60 * 60 * 24 * 30, isData=True) return HttpResponse(json.dumps(pledges_list), content_type='text/json') else: data = getCacheEntry(request, "pledges") # data = None if data is not None: data = json.loads(data) t = loader.get_template('grafana-pledges.html') return HttpResponse(t.render(data, request), content_type='text/html') else: key_fed = hashlib.md5(encoding.force_bytes("{0}_{1}_federation".format(starttime, endtime))) key_country = hashlib.md5(encoding.force_bytes("{0}_{1}_country".format(starttime, endtime))) key_fed = key_fed.hexdigest() key_country = key_country.hexdigest() setCacheEntry(request, key_fed, None, 60, isData=True) setCacheEntry(request, key_country, None, 60, isData=True) t = loader.get_template('grafana-pledges.html') data = { 'request': request, 'date_from': starttime, 'date_to': endtime, 'days': total_days, 'info': "This page was cached: {0}".format(str(datetime.utcnow())) } setCacheEntry(request, "pledges", json.dumps(data, cls=DateEncoder), 60 * 60 * 24 * 30) return HttpResponse(t.render({"date_from": starttime, "date_to": endtime, "days": total_days}, request), content_type='text/html') def grafana_api_es(request): valid, response = initRequest(request) group_by = None split_series = None if 'groupby' in request.session['requestParams']: groupby_params = request.session['requestParams']['groupby'].split(',') if 'time' in groupby_params: pass else: group_by = groupby_params[0] if len(groupby_params) > 1: split_series = groupby_params[1] else: split_series = group_by result = [] q = Query() q = q.request_to_query(request) result = Grafana().get_data(q) return JsonResponse(result)
PanDAWMS/panda-bigmon-core
core/grafana/views.py
Python
apache-2.0
19,477
package org.stellasql.stella.session; public interface ResultTabHandler { public void closeSelectedTab(); public void selectNextTab(); public void selectPreviousTab(); }
shocksm/stella
src/main/java/org/stellasql/stella/session/ResultTabHandler.java
Java
apache-2.0
187
package com.sequenceiq.cloudbreak.service.user; import javax.inject.Inject; import org.springframework.stereotype.Service; import com.sequenceiq.cloudbreak.api.endpoint.v4.userprofile.responses.UserProfileV4Response; import com.sequenceiq.cloudbreak.auth.crn.Crn; import com.sequenceiq.cloudbreak.auth.altus.EntitlementService; @Service public class UserProfileDecorator { @Inject private EntitlementService entitlementService; public UserProfileV4Response decorate(UserProfileV4Response userProfileV4Response, String userCrn) { userProfileV4Response.setEntitlements(entitlementService.getEntitlements(Crn.safeFromString(userCrn).getAccountId())); return userProfileV4Response; } }
hortonworks/cloudbreak
core/src/main/java/com/sequenceiq/cloudbreak/service/user/UserProfileDecorator.java
Java
apache-2.0
720
# coding=utf-8 # Copyright 2020 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for trajectory.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import from tf_agents.drivers import dynamic_episode_driver from tf_agents.drivers import test_utils as drivers_test_utils from tf_agents.environments import tf_py_environment from tf_agents.trajectories import time_step as ts from tf_agents.trajectories import trajectory from tf_agents.utils import test_utils class TrajectoryTest(test_utils.TestCase): def testFirstTensors(self): observation = () action = () policy_info = () reward = tf.constant([1.0, 1.0, 2.0]) discount = tf.constant([1.0, 1.0, 1.0]) traj = trajectory.first(observation, action, policy_info, reward, discount) self.assertTrue(tf.is_tensor(traj.step_type)) traj_val = self.evaluate(traj) self.assertAllEqual(traj_val.step_type, [ts.StepType.FIRST] * 3) self.assertAllEqual(traj_val.next_step_type, [ts.StepType.MID] * 3) def testFirstArrays(self): observation = () action = () policy_info = () reward = np.array([1.0, 1.0, 2.0]) discount = np.array([1.0, 1.0, 1.0]) traj = trajectory.first(observation, action, policy_info, reward, discount) self.assertFalse(tf.is_tensor(traj.step_type)) self.assertAllEqual(traj.step_type, [ts.StepType.FIRST] * 3) self.assertAllEqual(traj.next_step_type, [ts.StepType.MID] * 3) def testMidTensors(self): observation = () action = () policy_info = () reward = tf.constant([1.0, 1.0, 2.0]) discount = tf.constant([1.0, 1.0, 1.0]) traj = trajectory.mid(observation, action, policy_info, reward, discount) self.assertTrue(tf.is_tensor(traj.step_type)) traj_val = self.evaluate(traj) self.assertAllEqual(traj_val.step_type, [ts.StepType.MID] * 3) self.assertAllEqual(traj_val.next_step_type, [ts.StepType.MID] * 3) def testMidArrays(self): observation = () action = () policy_info = () reward = np.array([1.0, 1.0, 2.0]) discount = np.array([1.0, 1.0, 1.0]) traj = trajectory.mid(observation, action, policy_info, reward, discount) self.assertFalse(tf.is_tensor(traj.step_type)) self.assertAllEqual(traj.step_type, [ts.StepType.MID] * 3) self.assertAllEqual(traj.next_step_type, [ts.StepType.MID] * 3) def testLastTensors(self): observation = () action = () policy_info = () reward = tf.constant([1.0, 1.0, 2.0]) discount = tf.constant([1.0, 1.0, 1.0]) traj = trajectory.last(observation, action, policy_info, reward, discount) self.assertTrue(tf.is_tensor(traj.step_type)) traj_val = self.evaluate(traj) self.assertAllEqual(traj_val.step_type, [ts.StepType.MID] * 3) self.assertAllEqual(traj_val.next_step_type, [ts.StepType.LAST] * 3) def testLastArrays(self): observation = () action = () policy_info = () reward = np.array([1.0, 1.0, 2.0]) discount = np.array([1.0, 1.0, 1.0]) traj = trajectory.last(observation, action, policy_info, reward, discount) self.assertFalse(tf.is_tensor(traj.step_type)) self.assertAllEqual(traj.step_type, [ts.StepType.MID] * 3) self.assertAllEqual(traj.next_step_type, [ts.StepType.LAST] * 3) def testSingleStepTensors(self): observation = () action = () policy_info = () reward = tf.constant([1.0, 1.0, 2.0]) discount = tf.constant([1.0, 1.0, 1.0]) traj = trajectory.single_step(observation, action, policy_info, reward, discount) self.assertTrue(tf.is_tensor(traj.step_type)) traj_val = self.evaluate(traj) self.assertAllEqual(traj_val.step_type, [ts.StepType.FIRST] * 3) self.assertAllEqual(traj_val.next_step_type, [ts.StepType.LAST] * 3) def testSingleStepArrays(self): observation = () action = () policy_info = () reward = np.array([1.0, 1.0, 2.0]) discount = np.array([1.0, 1.0, 1.0]) traj = trajectory.single_step(observation, action, policy_info, reward, discount) self.assertFalse(tf.is_tensor(traj.step_type)) self.assertAllEqual(traj.step_type, [ts.StepType.FIRST] * 3) self.assertAllEqual(traj.next_step_type, [ts.StepType.LAST] * 3) def testFromEpisodeTensor(self): observation = tf.random.uniform((4, 5)) action = () policy_info = () reward = tf.random.uniform((4,)) traj = trajectory.from_episode( observation, action, policy_info, reward, discount=None) self.assertTrue(tf.is_tensor(traj.step_type)) traj_val, obs_val, reward_val = self.evaluate((traj, observation, reward)) first = ts.StepType.FIRST mid = ts.StepType.MID last = ts.StepType.LAST self.assertAllEqual( traj_val.step_type, [first, mid, mid, mid]) self.assertAllEqual( traj_val.next_step_type, [mid, mid, mid, last]) self.assertAllClose(traj_val.observation, obs_val) self.assertAllEqual(traj_val.reward, reward_val) self.assertAllEqual(traj_val.discount, [1.0, 1.0, 1.0, 1.0]) def testFromEpisodeWithCompositeTensorOfTensors(self): observation = tf.SparseTensor( indices=tf.random.uniform((7, 2), maxval=9, dtype=tf.int64), values=tf.random.uniform((7,)), dense_shape=[4, 10]) # The 4 is important, it must match reward length. action = () policy_info = () reward = tf.random.uniform((4,)) traj = trajectory.from_episode( observation, action, policy_info, reward, discount=None) self.assertTrue(tf.is_tensor(traj.step_type)) traj_val, obs_val, reward_val = self.evaluate((traj, observation, reward)) first = ts.StepType.FIRST mid = ts.StepType.MID last = ts.StepType.LAST self.assertAllEqual( traj_val.step_type, [first, mid, mid, mid]) self.assertAllEqual( traj_val.next_step_type, [mid, mid, mid, last]) self.assertAllClose(traj_val.observation, obs_val) self.assertAllEqual(traj_val.reward, reward_val) self.assertAllEqual(traj_val.discount, [1.0, 1.0, 1.0, 1.0]) def testFromEpisodeArray(self): observation = np.random.rand(4, 5) action = () policy_info = () reward = np.random.rand(4) traj = trajectory.from_episode( observation, action, policy_info, reward, discount=None) self.assertFalse(tf.is_tensor(traj.step_type)) first = ts.StepType.FIRST mid = ts.StepType.MID last = ts.StepType.LAST self.assertAllEqual( traj.step_type, [first, mid, mid, mid]) self.assertAllEqual( traj.next_step_type, [mid, mid, mid, last]) self.assertAllEqual(traj.observation, observation) self.assertAllEqual(traj.reward, reward) self.assertAllEqual(traj.discount, [1.0, 1.0, 1.0, 1.0]) def testToTransition(self): first = ts.StepType.FIRST mid = ts.StepType.MID last = ts.StepType.LAST # Define a batch size 1, 3-step trajectory. traj = trajectory.Trajectory( step_type=np.array([[first, mid, last]]), next_step_type=np.array([[mid, last, first]]), observation=np.array([[10.0, 20.0, 30.0]]), action=np.array([[11.0, 22.0, 33.0]]), # reward at step 2 is an invalid dummy reward. reward=np.array([[0.0, 1.0, 2.0]]), discount=np.array([[1.0, 1.0, 0.0]]), policy_info=np.array([[1.0, 2.0, 3.0]])) transition = trajectory.to_transition(traj) self.assertIsInstance(transition, trajectory.Transition) time_steps, policy_steps, next_time_steps = transition self.assertAllEqual(time_steps.step_type, np.array([[first, mid]])) self.assertAllEqual(time_steps.observation, np.array([[10.0, 20.0]])) # reward and discount are filled with zero (dummy) values self.assertAllEqual(time_steps.reward, np.array([[0.0, 0.0]])) self.assertAllEqual(time_steps.discount, np.array([[0.0, 0.0]])) self.assertAllEqual(next_time_steps.step_type, np.array([[mid, last]])) self.assertAllEqual(next_time_steps.observation, np.array([[20.0, 30.0]])) self.assertAllEqual(next_time_steps.reward, np.array([[0.0, 1.0]])) self.assertAllEqual(next_time_steps.discount, np.array([[1.0, 1.0]])) self.assertAllEqual(policy_steps.action, np.array([[11.0, 22.0]])) self.assertAllEqual(policy_steps.info, np.array([[1.0, 2.0]])) def testToNStepTransitionForNEquals1(self): first = ts.StepType.FIRST last = ts.StepType.LAST # Define a batch size 1, 2-step trajectory. traj = trajectory.Trajectory( step_type=np.array([[first, last]]), next_step_type=np.array([[last, first]]), observation=np.array([[10.0, 20.0]]), action=np.array([[11.0, 22.0]]), # reward & discount values at step 1 is an invalid dummy reward. reward=np.array([[-1.0, 0.0]]), discount=np.array([[0.9, 0.0]]), policy_info=np.array([[10.0, 20.0]])) transition = trajectory.to_n_step_transition(traj, gamma=0.5) self.assertIsInstance(transition, trajectory.Transition) time_steps, policy_steps, next_time_steps = transition self.assertAllEqual(time_steps.step_type, np.array([first])) self.assertAllEqual(time_steps.observation, np.array([10.0])) self.assertAllEqual(time_steps.reward, np.array([np.nan])) self.assertAllEqual(time_steps.discount, np.array([np.nan])) self.assertAllEqual(next_time_steps.step_type, np.array([last])) self.assertAllEqual(next_time_steps.observation, np.array([20.0])) # r0 self.assertAllEqual(next_time_steps.reward, np.array([-1.0])) # d0 self.assertAllEqual(next_time_steps.discount, np.array([0.9])) self.assertAllEqual(policy_steps.action, np.array([11.0])) self.assertAllEqual(policy_steps.info, np.array([10.0])) def testToNStepTransition(self): first = ts.StepType.FIRST mid = ts.StepType.MID last = ts.StepType.LAST gamma = 0.5 # Define a batch size 1, 4-step trajectory. traj = trajectory.Trajectory( step_type=np.array([[first, mid, mid, last]]), next_step_type=np.array([[mid, mid, last, first]]), observation=np.array([[10.0, 20.0, 30.0, 40.0]]), action=np.array([[11.0, 22.0, 33.0, 44.0]]), # reward & discount values at step 3 is an invalid dummy reward. reward=np.array([[-1.0, 1.0, 2.0, 0.0]]), discount=np.array([[0.9, 0.95, 1.0, 0.0]]), policy_info=np.array([[10.0, 20.0, 30.0, 40.0]])) transition = trajectory.to_n_step_transition(traj, gamma=gamma) self.assertIsInstance(transition, trajectory.Transition) time_steps, policy_steps, next_time_steps = transition self.assertAllEqual(time_steps.step_type, np.array([first])) self.assertAllEqual(time_steps.observation, np.array([10.0])) self.assertAllEqual(time_steps.reward, np.array([np.nan])) self.assertAllEqual(time_steps.discount, np.array([np.nan])) self.assertAllEqual(next_time_steps.step_type, np.array([last])) self.assertAllEqual(next_time_steps.observation, np.array([40.0])) # r0 + r1 * g * d0 + r2 * g * d0 * d1 # == -1.0 + 1.0*0.5*(0.9) + 2.0*(0.5**2)*(0.9*0.95) self.assertAllEqual( next_time_steps.reward, np.array([-1.0 + 1.0 * gamma * 0.9 + 2.0 * gamma**2 * 0.9 * 0.95])) # gamma**2 * (d0 * d1 * d2) self.assertAllEqual( next_time_steps.discount, np.array([gamma**2 * (0.9 * 0.95 * 1.0)])) self.assertAllEqual(policy_steps.action, np.array([11.0])) self.assertAllEqual(policy_steps.info, np.array([10.0])) def testToTransitionHandlesTrajectoryFromDriverCorrectly(self): env = tf_py_environment.TFPyEnvironment( drivers_test_utils.PyEnvironmentMock()) policy = drivers_test_utils.TFPolicyMock( env.time_step_spec(), env.action_spec()) replay_buffer = drivers_test_utils.make_replay_buffer(policy) driver = dynamic_episode_driver.DynamicEpisodeDriver( env, policy, num_episodes=3, observers=[replay_buffer.add_batch]) run_driver = driver.run() rb_gather_all = replay_buffer.gather_all() self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(run_driver) trajectories = self.evaluate(rb_gather_all) transitions = trajectory.to_transition(trajectories) self.assertIsInstance(transitions, trajectory.Transition) time_steps, policy_step, next_time_steps = transitions self.assertAllEqual(time_steps.observation, trajectories.observation[:, :-1]) self.assertAllEqual(time_steps.step_type, trajectories.step_type[:, :-1]) self.assertAllEqual(next_time_steps.observation, trajectories.observation[:, 1:]) self.assertAllEqual(next_time_steps.step_type, trajectories.step_type[:, 1:]) self.assertAllEqual(next_time_steps.reward, trajectories.reward[:, :-1]) self.assertAllEqual(next_time_steps.discount, trajectories.discount[:, :-1]) self.assertAllEqual(policy_step.action, trajectories.action[:, :-1]) self.assertAllEqual(policy_step.info, trajectories.policy_info[:, :-1]) def testToTransitionSpec(self): env = tf_py_environment.TFPyEnvironment( drivers_test_utils.PyEnvironmentMock()) policy = drivers_test_utils.TFPolicyMock( env.time_step_spec(), env.action_spec()) trajectory_spec = policy.trajectory_spec transition_spec = trajectory.to_transition_spec(trajectory_spec) self.assertIsInstance(transition_spec, trajectory.Transition) ts_spec, ps_spec, nts_spec = transition_spec self.assertAllEqual(ts_spec, env.time_step_spec()) self.assertAllEqual(ps_spec.action, env.action_spec()) self.assertAllEqual(nts_spec, env.time_step_spec()) if __name__ == '__main__': tf.test.main()
tensorflow/agents
tf_agents/trajectories/trajectory_test.py
Python
apache-2.0
14,383
package com.hunt.system.exception; /** * @Author ouyangan * @Date 2016/10/29/17:32 * @Description */ public class ForbiddenIpException extends Exception { /** * Constructs a new exception with the specified detail message. The * cause is not initialized, and may subsequently be initialized by * a call to {@link #initCause}. * * @param message the detail message. The detail message is saved for * later retrieval by the {@link #getMessage()} method. */ public ForbiddenIpException(String message) { super(message); } }
Ouyangan/hunt-admin
hunt-web/src/main/java/com/hunt/system/exception/ForbiddenIpException.java
Java
apache-2.0
597
/* Copyright 2015 Chris Hannon Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ namespace FluentBoilerplate.Traits { /// <summary> /// Represents a trait that allows contract requirements for rights /// </summary> /// <typeparam name="TContext">The context type</typeparam> public interface IRolesBasedTrait<TContext> { /// <summary> /// Indicates that the current identity must have a set of roles prior to performing a context action /// </summary> /// <param name="rights">The required rights</param> /// <returns>An instance of <typeparamref name="TContext"/> that contains the new requirements</returns> TContext RequireRoles(params IRole[] roles); /// <summary> /// Indicates that the current identity must not have a set of roles prior to performing a context action /// </summary> /// <param name="rights">The restricted rights</param> /// <returns>An instance of <typeparamref name="TContext"/> that contains the new requirements</returns> TContext MustNotHaveRoles(params IRole[] roles); } }
Norhaven/FluentBoilerplate
DotNet/FluentBoilerplate/PublicContract/Traits/IRolesBasedTrait.cs
C#
apache-2.0
1,638
class yamMessageHandlerBase(object): """ Base class for message handlers for a :class:`ZMQProcess`. Inheriting classes only need to implement a handler function for each message type. It must assign the protobuf Message class to self.cls and create a mapping of message types to handler functions """ def __init__(self, rep_stream, stop): self._rep_stream = rep_stream self._stop = stop self.cls = None self.funcMap = {} self.subMessageHandler = False pass def __call__(self, msg): """ Gets called when a messages is received by the stream this handlers is registered at. *msg* is a list as return by :meth:`zmq.core.socket.Socket.recv_multipart`. """ if self.subMessageHandler: yamMessage = msg else: yamMessage = self.cls() fullMsg = "".join(msg) yamMessage.ParseFromString(fullMsg) handlerFunc = self.funcMap[yamMessage.type] responseMessage = handlerFunc(yamMessage) return responseMessage
dpquigl/YAM
src/pyyam/yam/handlers/yamMessageHandlerBase.py
Python
apache-2.0
1,148
/* * Copyright 2016-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.facebook.buck.versions; import com.facebook.buck.log.Logger; import com.facebook.buck.model.BuildTarget; import com.facebook.buck.model.Flavor; import com.facebook.buck.model.InternalFlavor; import com.facebook.buck.rules.TargetGraph; import com.facebook.buck.rules.TargetGraphAndBuildTargets; import com.facebook.buck.rules.TargetNode; import com.facebook.buck.util.MoreCollectors; import com.google.common.base.Charsets; import com.google.common.base.Preconditions; import com.google.common.base.Predicate; import com.google.common.base.Predicates; import com.google.common.base.Throwables; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.google.common.collect.FluentIterable; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; import com.google.common.collect.Sets; import com.google.common.hash.Hasher; import com.google.common.hash.Hashing; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.SortedMap; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.RecursiveAction; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.StreamSupport; /** * Takes a regular {@link TargetGraph}, resolves any versioned nodes, and returns a new graph with * the versioned nodes removed. */ public class VersionedTargetGraphBuilder { private static final Logger LOG = Logger.get(VersionedTargetGraphBuilder.class); private final ForkJoinPool pool; private final VersionSelector versionSelector; private final TargetGraphAndBuildTargets unversionedTargetGraphAndBuildTargets; /** * The resolved version graph being built. */ private final VersionedTargetGraph.Builder targetGraphBuilder = VersionedTargetGraph.builder(); /** * Map of the build targets to nodes in the resolved graph. */ private final ConcurrentHashMap<BuildTarget, TargetNode<?, ?>> index; /** * Fork-join actions for each root node. */ private final ConcurrentHashMap<BuildTarget, RootAction> rootActions; /** * Intermediate version info for each node. */ private final ConcurrentHashMap<BuildTarget, VersionInfo> versionInfo; /** * Count of root nodes. */ private final AtomicInteger roots = new AtomicInteger(); VersionedTargetGraphBuilder( ForkJoinPool pool, VersionSelector versionSelector, TargetGraphAndBuildTargets unversionedTargetGraphAndBuildTargets) { this.pool = pool; this.versionSelector = versionSelector; this.unversionedTargetGraphAndBuildTargets = unversionedTargetGraphAndBuildTargets; this.index = new ConcurrentHashMap<>( unversionedTargetGraphAndBuildTargets.getTargetGraph().getNodes().size() * 4, 0.75f, pool.getParallelism()); this.rootActions = new ConcurrentHashMap<>( unversionedTargetGraphAndBuildTargets.getTargetGraph().getNodes().size() / 2, 0.75f, pool.getParallelism()); this.versionInfo = new ConcurrentHashMap<>( 2 * unversionedTargetGraphAndBuildTargets.getTargetGraph().getNodes().size(), 0.75f, pool.getParallelism()); } private TargetNode<?, ?> getNode(BuildTarget target) { return unversionedTargetGraphAndBuildTargets.getTargetGraph().get(target); } private Optional<TargetNode<?, ?>> getNodeOptional(BuildTarget target) { return unversionedTargetGraphAndBuildTargets.getTargetGraph().getOptional(target); } private TargetNode<?, ?> indexPutIfAbsent(TargetNode<?, ?> node) { return index.putIfAbsent(node.getBuildTarget(), node); } /** * Get/cache the transitive version info for this node. */ private VersionInfo getVersionInfo(TargetNode<?, ?> node) { VersionInfo info = this.versionInfo.get(node.getBuildTarget()); if (info != null) { return info; } Map<BuildTarget, ImmutableSet<Version>> versionDomain = new HashMap<>(); Optional<TargetNode<VersionedAliasDescription.Arg, ?>> versionedNode = TargetGraphVersionTransformations.getVersionedNode(node); if (versionedNode.isPresent()) { ImmutableMap<Version, BuildTarget> versions = versionedNode.get().getConstructorArg().versions; // Merge in the versioned deps and the version domain. versionDomain.put(node.getBuildTarget(), versions.keySet()); // If this version has only one possible choice, there's no need to wrap the constraints from // it's transitive deps in an implication constraint. if (versions.size() == 1) { Map.Entry<Version, BuildTarget> ent = versions.entrySet().iterator().next(); VersionInfo depInfo = getVersionInfo(getNode(ent.getValue())); versionDomain.putAll(depInfo.getVersionDomain()); } else { // For each version choice, inherit the transitive constraints by wrapping them in an // implication dependent on the specific version that pulls them in. for (Map.Entry<Version, BuildTarget> ent : versions.entrySet()) { VersionInfo depInfo = getVersionInfo(getNode(ent.getValue())); versionDomain.putAll(depInfo.getVersionDomain()); } } } else { // Merge in the constraints and version domain/deps from transitive deps. for (BuildTarget depTarget : TargetGraphVersionTransformations.getDeps(node)) { TargetNode<?, ?> dep = getNode(depTarget); if (TargetGraphVersionTransformations.isVersionPropagator(dep) || TargetGraphVersionTransformations.getVersionedNode(dep).isPresent()) { VersionInfo depInfo = getVersionInfo(dep); versionDomain.putAll(depInfo.getVersionDomain()); } } } info = VersionInfo.of(versionDomain); this.versionInfo.put(node.getBuildTarget(), info); return info; } /** * @return a flavor to which summarizes the given version selections. */ static Flavor getVersionedFlavor(SortedMap<BuildTarget, Version> versions) { Preconditions.checkArgument(!versions.isEmpty()); Hasher hasher = Hashing.md5().newHasher(); for (Map.Entry<BuildTarget, Version> ent : versions.entrySet()) { hasher.putString(ent.getKey().toString(), Charsets.UTF_8); hasher.putString(ent.getValue().getName(), Charsets.UTF_8); } return InternalFlavor.of("v" + hasher.hash().toString().substring(0, 7)); } private TargetNode<?, ?> resolveVersions( TargetNode<?, ?> node, ImmutableMap<BuildTarget, Version> selectedVersions) { Optional<TargetNode<VersionedAliasDescription.Arg, ?>> versionedNode = node.castArg(VersionedAliasDescription.Arg.class); if (versionedNode.isPresent()) { node = getNode( Preconditions.checkNotNull( versionedNode.get().getConstructorArg().versions.get( selectedVersions.get(node.getBuildTarget())))); } return node; } /** * @return the {@link BuildTarget} to use in the resolved target graph, formed by adding a * flavor generated from the given version selections. */ private Optional<BuildTarget> getTranslateBuildTarget( TargetNode<?, ?> node, ImmutableMap<BuildTarget, Version> selectedVersions) { BuildTarget originalTarget = node.getBuildTarget(); node = resolveVersions(node, selectedVersions); BuildTarget newTarget = node.getBuildTarget(); if (TargetGraphVersionTransformations.isVersionPropagator(node)) { VersionInfo info = getVersionInfo(node); Collection<BuildTarget> versionedDeps = info.getVersionDomain().keySet(); TreeMap<BuildTarget, Version> versions = new TreeMap<>(); for (BuildTarget depTarget : versionedDeps) { versions.put(depTarget, selectedVersions.get(depTarget)); } if (!versions.isEmpty()) { Flavor versionedFlavor = getVersionedFlavor(versions); newTarget = node.getBuildTarget().withAppendedFlavors(versionedFlavor); } } return newTarget.equals(originalTarget) ? Optional.empty() : Optional.of(newTarget); } public TargetGraph build() throws VersionException, InterruptedException { LOG.debug( "Starting version target graph transformation (nodes %d)", unversionedTargetGraphAndBuildTargets.getTargetGraph().getNodes().size()); long start = System.currentTimeMillis(); // Walk through explicit built targets, separating them into root and non-root nodes. ImmutableList<RootAction> actions = unversionedTargetGraphAndBuildTargets.getBuildTargets().stream() .map(this::getNode) .map(RootAction::new) .collect(MoreCollectors.toImmutableList()); // Add actions to the `rootActions` member for bookkeeping. actions.forEach(a -> rootActions.put(a.getRoot().getBuildTarget(), a)); // Kick off the jobs to process the root nodes. actions.forEach(pool::submit); // Wait for actions to complete. for (RootAction action : actions) { action.getChecked(); } long end = System.currentTimeMillis(); LOG.debug( "Finished version target graph transformation in %.2f (nodes %d, roots: %d)", (end - start) / 1000.0, index.size(), roots.get()); return targetGraphBuilder.build(); } public static TargetGraphAndBuildTargets transform( VersionSelector versionSelector, TargetGraphAndBuildTargets unversionedTargetGraphAndBuildTargets, ForkJoinPool pool) throws VersionException, InterruptedException { return unversionedTargetGraphAndBuildTargets.withTargetGraph( new VersionedTargetGraphBuilder( pool, versionSelector, unversionedTargetGraphAndBuildTargets) .build()); } /** * Transform a version sub-graph at the given root node. */ private class RootAction extends RecursiveAction { private final TargetNode<?, ?> node; RootAction(TargetNode<?, ?> node) { this.node = node; } private final Predicate<BuildTarget> isVersionPropagator = target -> TargetGraphVersionTransformations.isVersionPropagator(getNode(target)); private final Predicate<BuildTarget> isVersioned = target -> TargetGraphVersionTransformations.getVersionedNode(getNode(target)).isPresent(); /** * Process a non-root node in the graph. */ private TargetNode<?, ?> processNode(TargetNode<?, ?> node) throws VersionException { // If we've already processed this node, exit now. TargetNode<?, ?> processed = index.get(node.getBuildTarget()); if (processed != null) { return processed; } // Add the node to the graph and recurse on its deps. TargetNode<?, ?> oldNode = indexPutIfAbsent(node); if (oldNode != null) { node = oldNode; } else { targetGraphBuilder.addNode(node.getBuildTarget().withFlavors(), node); for (TargetNode<?, ?> dep : process(node.getParseDeps())) { targetGraphBuilder.addEdge(node, dep); } } return node; } /** * Dispatch new jobs to transform the given nodes in parallel and wait for their results. */ private Iterable<TargetNode<?, ?>> process(Iterable<BuildTarget> targets) throws VersionException { int size = Iterables.size(targets); List<RootAction> newActions = new ArrayList<>(size); List<RootAction> oldActions = new ArrayList<>(size); List<TargetNode<?, ?>> nonRootNodes = new ArrayList<>(size); for (BuildTarget target : targets) { TargetNode<?, ?> node = getNode(target); // If we see a root node, create an action to process it using the pool, since it's // potentially heavy-weight. if (TargetGraphVersionTransformations.isVersionRoot(node)) { RootAction oldAction = rootActions.get(target); if (oldAction != null) { oldActions.add(oldAction); } else { RootAction newAction = new RootAction(getNode(target)); oldAction = rootActions.putIfAbsent(target, newAction); if (oldAction == null) { newActions.add(newAction); } else { oldActions.add(oldAction); } } } else { nonRootNodes.add(node); } } // Kick off all new rootActions in parallel. invokeAll(newActions); // For non-root nodes, just process them in-place, as they are inexpensive. for (TargetNode<?, ?> node : nonRootNodes) { processNode(node); } // Wait for any existing rootActions to finish. for (RootAction action : oldActions) { action.join(); } // Now that everything is ready, return all the results. return StreamSupport.stream(targets.spliterator(), false) .map(index::get) .collect(MoreCollectors.toImmutableList()); } public Void getChecked() throws VersionException, InterruptedException { try { return get(); } catch (ExecutionException e) { Throwable rootCause = Throwables.getRootCause(e); Throwables.throwIfInstanceOf(rootCause, VersionException.class); Throwables.throwIfInstanceOf(rootCause, RuntimeException.class); throw new IllegalStateException( String.format("Unexpected exception: %s: %s", e.getClass(), e.getMessage()), e); } } @SuppressWarnings("unchecked") private TargetNode<?, ?> processVersionSubGraphNode( TargetNode<?, ?> node, ImmutableMap<BuildTarget, Version> selectedVersions, TargetNodeTranslator targetTranslator) throws VersionException { Optional<BuildTarget> newTarget = targetTranslator.translateBuildTarget(node.getBuildTarget()); TargetNode<?, ?> processed = index.get(newTarget.orElse(node.getBuildTarget())); if (processed != null) { return processed; } // Create the new target node, with the new target and deps. TargetNode<?, ?> newNode = ((Optional<TargetNode<?, ?>>) (Optional<?>) targetTranslator.translateNode(node)) .orElse(node); LOG.verbose( "%s: new node declared deps %s, extra deps %s, arg %s", newNode.getBuildTarget(), newNode.getDeclaredDeps(), newNode.getExtraDeps(), newNode.getConstructorArg()); // Add the new node, and it's dep edges, to the new graph. TargetNode<?, ?> oldNode = indexPutIfAbsent(newNode); if (oldNode != null) { newNode = oldNode; } else { // Insert the node into the graph, indexing it by a base target containing only the version // flavor, if one exists. targetGraphBuilder.addNode( node.getBuildTarget().withFlavors( Sets.difference( newNode.getBuildTarget().getFlavors(), node.getBuildTarget().getFlavors())), newNode); for (BuildTarget depTarget : FluentIterable.from(node.getParseDeps()) .filter(Predicates.or(isVersionPropagator, isVersioned))) { targetGraphBuilder.addEdge( newNode, processVersionSubGraphNode( resolveVersions(getNode(depTarget), selectedVersions), selectedVersions, targetTranslator)); } for (TargetNode<?, ?> dep : process( FluentIterable.from(node.getParseDeps()) .filter(Predicates.not(Predicates.or(isVersionPropagator, isVersioned))))) { targetGraphBuilder.addEdge(newNode, dep); } } return newNode; } // Transform a root node and its version sub-graph. private TargetNode<?, ?> processRoot(TargetNode<?, ?> root) throws VersionException { // If we've already processed this root, exit now. final TargetNode<?, ?> processedRoot = index.get(root.getBuildTarget()); if (processedRoot != null) { return processedRoot; } // For stats collection. roots.incrementAndGet(); VersionInfo versionInfo = getVersionInfo(root); // Select the versions to use for this sub-graph. final ImmutableMap<BuildTarget, Version> selectedVersions = versionSelector.resolve( root.getBuildTarget(), versionInfo.getVersionDomain()); // Build a target translator object to translate build targets. ImmutableList<TargetTranslator<?>> translators = ImmutableList.of( new QueryTargetTranslator()); TargetNodeTranslator targetTranslator = new TargetNodeTranslator(translators) { private final LoadingCache<BuildTarget, Optional<BuildTarget>> cache = CacheBuilder.newBuilder() .build( CacheLoader.from( target -> { // If we're handling the root node, there's nothing to translate. if (root.getBuildTarget().equals(target)) { return Optional.empty(); } // If this target isn't in the target graph, which can be the case // of build targets in the `tests` parameter, don't do any // translation. Optional<TargetNode<?, ?>> node = getNodeOptional(target); if (!node.isPresent()) { return Optional.empty(); } return getTranslateBuildTarget(getNode(target), selectedVersions); })); @Override public Optional<BuildTarget> translateBuildTarget(BuildTarget target) { return cache.getUnchecked(target); } @Override public Optional<ImmutableMap<BuildTarget, Version>> getSelectedVersions( BuildTarget target) { ImmutableMap.Builder<BuildTarget, Version> builder = ImmutableMap.builder(); for (BuildTarget dep : getVersionInfo(getNode(target)).getVersionDomain().keySet()) { builder.put(dep, selectedVersions.get(dep)); } return Optional.of(builder.build()); } }; return processVersionSubGraphNode(root, selectedVersions, targetTranslator); } @Override protected void compute() { try { processRoot(node); } catch (VersionException e) { completeExceptionally(e); } } public TargetNode<?, ?> getRoot() { return node; } } }
vschs007/buck
src/com/facebook/buck/versions/VersionedTargetGraphBuilder.java
Java
apache-2.0
19,928
package org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.policies; import org.eclipse.gef.commands.Command; import org.eclipse.gmf.runtime.emf.type.core.requests.CreateElementRequest; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.APIResourceEndpointCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.AddressEndPointCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.AddressingEndpointCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.AggregateMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.BAMMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.BeanMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.BuilderMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CacheMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CallMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CallTemplateMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CalloutMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.ClassMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CloneMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CloudConnectorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CloudConnectorOperationCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CommandMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.ConditionalRouterMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.DBLookupMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.DBReportMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.DataMapperMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.DefaultEndPointCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.DropMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.EJBMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.EnqueueMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.EnrichMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.EntitlementMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.EventMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.FailoverEndPointCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.FastXSLTMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.FaultMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.FilterMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.HTTPEndpointCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.HeaderMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.IterateMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.LoadBalanceEndPointCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.LogMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.LoopBackMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.NamedEndpointCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.OAuthMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.PayloadFactoryMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.PropertyMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.RMSequenceMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.RecipientListEndPointCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.RespondMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.RouterMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.RuleMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.ScriptMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.SendMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.SequenceCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.SmooksMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.SpringMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.StoreMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.SwitchMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.TemplateEndpointCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.ThrottleMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.TransactionMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.URLRewriteMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.ValidateMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.WSDLEndPointCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.XQueryMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.XSLTMediatorCreateCommand; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.providers.EsbElementTypes; /** * @generated */ public class MediatorFlowMediatorFlowCompartment14ItemSemanticEditPolicy extends EsbBaseItemSemanticEditPolicy { /** * @generated */ public MediatorFlowMediatorFlowCompartment14ItemSemanticEditPolicy() { super(EsbElementTypes.MediatorFlow_3627); } /** * @generated */ protected Command getCreateCommand(CreateElementRequest req) { if (EsbElementTypes.DropMediator_3491 == req.getElementType()) { return getGEFWrapper(new DropMediatorCreateCommand(req)); } if (EsbElementTypes.PropertyMediator_3492 == req.getElementType()) { return getGEFWrapper(new PropertyMediatorCreateCommand(req)); } if (EsbElementTypes.ThrottleMediator_3493 == req.getElementType()) { return getGEFWrapper(new ThrottleMediatorCreateCommand(req)); } if (EsbElementTypes.FilterMediator_3494 == req.getElementType()) { return getGEFWrapper(new FilterMediatorCreateCommand(req)); } if (EsbElementTypes.LogMediator_3495 == req.getElementType()) { return getGEFWrapper(new LogMediatorCreateCommand(req)); } if (EsbElementTypes.EnrichMediator_3496 == req.getElementType()) { return getGEFWrapper(new EnrichMediatorCreateCommand(req)); } if (EsbElementTypes.XSLTMediator_3497 == req.getElementType()) { return getGEFWrapper(new XSLTMediatorCreateCommand(req)); } if (EsbElementTypes.SwitchMediator_3498 == req.getElementType()) { return getGEFWrapper(new SwitchMediatorCreateCommand(req)); } if (EsbElementTypes.Sequence_3503 == req.getElementType()) { return getGEFWrapper(new SequenceCreateCommand(req)); } if (EsbElementTypes.EventMediator_3504 == req.getElementType()) { return getGEFWrapper(new EventMediatorCreateCommand(req)); } if (EsbElementTypes.EntitlementMediator_3505 == req.getElementType()) { return getGEFWrapper(new EntitlementMediatorCreateCommand(req)); } if (EsbElementTypes.ClassMediator_3506 == req.getElementType()) { return getGEFWrapper(new ClassMediatorCreateCommand(req)); } if (EsbElementTypes.SpringMediator_3507 == req.getElementType()) { return getGEFWrapper(new SpringMediatorCreateCommand(req)); } if (EsbElementTypes.ScriptMediator_3508 == req.getElementType()) { return getGEFWrapper(new ScriptMediatorCreateCommand(req)); } if (EsbElementTypes.FaultMediator_3509 == req.getElementType()) { return getGEFWrapper(new FaultMediatorCreateCommand(req)); } if (EsbElementTypes.XQueryMediator_3510 == req.getElementType()) { return getGEFWrapper(new XQueryMediatorCreateCommand(req)); } if (EsbElementTypes.CommandMediator_3511 == req.getElementType()) { return getGEFWrapper(new CommandMediatorCreateCommand(req)); } if (EsbElementTypes.DBLookupMediator_3512 == req.getElementType()) { return getGEFWrapper(new DBLookupMediatorCreateCommand(req)); } if (EsbElementTypes.DBReportMediator_3513 == req.getElementType()) { return getGEFWrapper(new DBReportMediatorCreateCommand(req)); } if (EsbElementTypes.SmooksMediator_3514 == req.getElementType()) { return getGEFWrapper(new SmooksMediatorCreateCommand(req)); } if (EsbElementTypes.SendMediator_3515 == req.getElementType()) { return getGEFWrapper(new SendMediatorCreateCommand(req)); } if (EsbElementTypes.HeaderMediator_3516 == req.getElementType()) { return getGEFWrapper(new HeaderMediatorCreateCommand(req)); } if (EsbElementTypes.CloneMediator_3517 == req.getElementType()) { return getGEFWrapper(new CloneMediatorCreateCommand(req)); } if (EsbElementTypes.CacheMediator_3518 == req.getElementType()) { return getGEFWrapper(new CacheMediatorCreateCommand(req)); } if (EsbElementTypes.IterateMediator_3519 == req.getElementType()) { return getGEFWrapper(new IterateMediatorCreateCommand(req)); } if (EsbElementTypes.CalloutMediator_3520 == req.getElementType()) { return getGEFWrapper(new CalloutMediatorCreateCommand(req)); } if (EsbElementTypes.TransactionMediator_3521 == req.getElementType()) { return getGEFWrapper(new TransactionMediatorCreateCommand(req)); } if (EsbElementTypes.RMSequenceMediator_3522 == req.getElementType()) { return getGEFWrapper(new RMSequenceMediatorCreateCommand(req)); } if (EsbElementTypes.RuleMediator_3523 == req.getElementType()) { return getGEFWrapper(new RuleMediatorCreateCommand(req)); } if (EsbElementTypes.OAuthMediator_3524 == req.getElementType()) { return getGEFWrapper(new OAuthMediatorCreateCommand(req)); } if (EsbElementTypes.AggregateMediator_3525 == req.getElementType()) { return getGEFWrapper(new AggregateMediatorCreateCommand(req)); } if (EsbElementTypes.StoreMediator_3588 == req.getElementType()) { return getGEFWrapper(new StoreMediatorCreateCommand(req)); } if (EsbElementTypes.BuilderMediator_3591 == req.getElementType()) { return getGEFWrapper(new BuilderMediatorCreateCommand(req)); } if (EsbElementTypes.CallTemplateMediator_3594 == req.getElementType()) { return getGEFWrapper(new CallTemplateMediatorCreateCommand(req)); } if (EsbElementTypes.PayloadFactoryMediator_3597 == req.getElementType()) { return getGEFWrapper(new PayloadFactoryMediatorCreateCommand(req)); } if (EsbElementTypes.EnqueueMediator_3600 == req.getElementType()) { return getGEFWrapper(new EnqueueMediatorCreateCommand(req)); } if (EsbElementTypes.URLRewriteMediator_3620 == req.getElementType()) { return getGEFWrapper(new URLRewriteMediatorCreateCommand(req)); } if (EsbElementTypes.ValidateMediator_3623 == req.getElementType()) { return getGEFWrapper(new ValidateMediatorCreateCommand(req)); } if (EsbElementTypes.RouterMediator_3628 == req.getElementType()) { return getGEFWrapper(new RouterMediatorCreateCommand(req)); } if (EsbElementTypes.ConditionalRouterMediator_3635 == req .getElementType()) { return getGEFWrapper(new ConditionalRouterMediatorCreateCommand(req)); } if (EsbElementTypes.BAMMediator_3680 == req.getElementType()) { return getGEFWrapper(new BAMMediatorCreateCommand(req)); } if (EsbElementTypes.BeanMediator_3683 == req.getElementType()) { return getGEFWrapper(new BeanMediatorCreateCommand(req)); } if (EsbElementTypes.EJBMediator_3686 == req.getElementType()) { return getGEFWrapper(new EJBMediatorCreateCommand(req)); } if (EsbElementTypes.DefaultEndPoint_3609 == req.getElementType()) { return getGEFWrapper(new DefaultEndPointCreateCommand(req)); } if (EsbElementTypes.AddressEndPoint_3610 == req.getElementType()) { return getGEFWrapper(new AddressEndPointCreateCommand(req)); } if (EsbElementTypes.FailoverEndPoint_3611 == req.getElementType()) { return getGEFWrapper(new FailoverEndPointCreateCommand(req)); } if (EsbElementTypes.RecipientListEndPoint_3692 == req.getElementType()) { return getGEFWrapper(new RecipientListEndPointCreateCommand(req)); } if (EsbElementTypes.WSDLEndPoint_3612 == req.getElementType()) { return getGEFWrapper(new WSDLEndPointCreateCommand(req)); } if (EsbElementTypes.NamedEndpoint_3660 == req.getElementType()) { return getGEFWrapper(new NamedEndpointCreateCommand(req)); } if (EsbElementTypes.LoadBalanceEndPoint_3613 == req.getElementType()) { return getGEFWrapper(new LoadBalanceEndPointCreateCommand(req)); } if (EsbElementTypes.APIResourceEndpoint_3674 == req.getElementType()) { return getGEFWrapper(new APIResourceEndpointCreateCommand(req)); } if (EsbElementTypes.AddressingEndpoint_3689 == req.getElementType()) { return getGEFWrapper(new AddressingEndpointCreateCommand(req)); } if (EsbElementTypes.HTTPEndpoint_3709 == req.getElementType()) { return getGEFWrapper(new HTTPEndpointCreateCommand(req)); } if (EsbElementTypes.TemplateEndpoint_3716 == req.getElementType()) { return getGEFWrapper(new TemplateEndpointCreateCommand(req)); } if (EsbElementTypes.CloudConnector_3719 == req.getElementType()) { return getGEFWrapper(new CloudConnectorCreateCommand(req)); } if (EsbElementTypes.CloudConnectorOperation_3722 == req .getElementType()) { return getGEFWrapper(new CloudConnectorOperationCreateCommand(req)); } if (EsbElementTypes.LoopBackMediator_3736 == req.getElementType()) { return getGEFWrapper(new LoopBackMediatorCreateCommand(req)); } if (EsbElementTypes.RespondMediator_3739 == req.getElementType()) { return getGEFWrapper(new RespondMediatorCreateCommand(req)); } if (EsbElementTypes.CallMediator_3742 == req.getElementType()) { return getGEFWrapper(new CallMediatorCreateCommand(req)); } if (EsbElementTypes.DataMapperMediator_3761 == req.getElementType()) { return getGEFWrapper(new DataMapperMediatorCreateCommand(req)); } if (EsbElementTypes.FastXSLTMediator_3764 == req.getElementType()) { return getGEFWrapper(new FastXSLTMediatorCreateCommand(req)); } return super.getCreateCommand(req); } }
rajeevanv89/developer-studio
esb/org.wso2.developerstudio.eclipse.gmf.esb.diagram/src/org/wso2/developerstudio/eclipse/gmf/esb/diagram/edit/policies/MediatorFlowMediatorFlowCompartment14ItemSemanticEditPolicy.java
Java
apache-2.0
15,236
using Nancy; public class HomeModule : NancyModule { public HomeModule() { Get("/", args => "Aloha from .NET, using the NancyFX framework. This is version 2.0 of this program."); Get("/os", x => { return System.Runtime.InteropServices.RuntimeInformation.OSDescription; }); } }
redhat-dotnet-msa/aloha
HomeModule.cs
C#
apache-2.0
375
package com.planet_ink.coffee_mud.Abilities.Spells; import com.planet_ink.coffee_mud.core.interfaces.*; import com.planet_ink.coffee_mud.core.*; import com.planet_ink.coffee_mud.core.collections.*; import com.planet_ink.coffee_mud.Abilities.interfaces.*; import com.planet_ink.coffee_mud.Areas.interfaces.*; import com.planet_ink.coffee_mud.Behaviors.interfaces.*; import com.planet_ink.coffee_mud.CharClasses.interfaces.*; import com.planet_ink.coffee_mud.Commands.interfaces.*; import com.planet_ink.coffee_mud.Common.interfaces.*; import com.planet_ink.coffee_mud.Exits.interfaces.*; import com.planet_ink.coffee_mud.Items.interfaces.*; import com.planet_ink.coffee_mud.Libraries.interfaces.*; import com.planet_ink.coffee_mud.Locales.interfaces.*; import com.planet_ink.coffee_mud.MOBS.interfaces.*; import com.planet_ink.coffee_mud.Races.interfaces.*; import java.util.*; /* Copyright 2002-2016 Bo Zimmerman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ public class Spell_CombatPrecognition extends Spell { @Override public String ID() { return "Spell_CombatPrecognition"; } private final static String localizedName = CMLib.lang().L("Combat Precognition"); @Override public String name() { return localizedName; } private final static String localizedStaticDisplay = CMLib.lang().L("(Combat Precognition)"); @Override public String displayText() { return localizedStaticDisplay; } @Override public int abstractQuality(){return Ability.QUALITY_BENEFICIAL_SELF;} @Override protected int canAffectCode(){return CAN_MOBS;} @Override protected int overrideMana(){return 100;} boolean lastTime=false; @Override public int classificationCode(){ return Ability.ACODE_SPELL|Ability.DOMAIN_DIVINATION;} @Override public boolean okMessage(final Environmental myHost, final CMMsg msg) { if(!(affected instanceof MOB)) return true; final MOB mob=(MOB)affected; if(msg.amITarget(mob) &&(mob.location()!=null) &&(CMLib.flags().isAliveAwakeMobile(mob,true))) { if(msg.targetMinor()==CMMsg.TYP_WEAPONATTACK) { final CMMsg msg2=CMClass.getMsg(mob,msg.source(),null,CMMsg.MSG_QUIETMOVEMENT,L("<S-NAME> avoid(s) the attack by <T-NAME>!")); if((proficiencyCheck(null,mob.charStats().getStat(CharStats.STAT_DEXTERITY)-60,false)) &&(!lastTime) &&(msg.source().getVictim()==mob) &&(msg.source().rangeToTarget()==0) &&(mob.location().okMessage(mob,msg2))) { lastTime=true; mob.location().send(mob,msg2); helpProficiency(mob, 0); return false; } lastTime=false; } else if((msg.value()<=0) &&(CMath.bset(msg.targetMajor(),CMMsg.MASK_MALICIOUS)) &&((mob.fetchAbility(ID())==null)||proficiencyCheck(null,mob.charStats().getStat(CharStats.STAT_DEXTERITY)-50,false))) { String tool=null; if((msg.tool() instanceof Ability)) tool=((Ability)msg.tool()).name(); CMMsg msg2=null; switch(msg.targetMinor()) { case CMMsg.TYP_JUSTICE: if((CMath.bset(msg.targetMajor(),CMMsg.MASK_MOVE)) &&(tool!=null)) msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",tool)); break; case CMMsg.TYP_GAS: msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"noxious fumes":tool))); break; case CMMsg.TYP_COLD: msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"cold blast":tool))); break; case CMMsg.TYP_ELECTRIC: msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"electrical attack":tool))); break; case CMMsg.TYP_FIRE: msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"blast of heat":tool))); break; case CMMsg.TYP_WATER: msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"weat blast":tool))); break; case CMMsg.TYP_ACID: msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"acid attack":tool))); break; case CMMsg.TYP_SONIC: msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"sonic attack":tool))); break; case CMMsg.TYP_LASER: msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"laser attack":tool))); break; } if((msg2!=null)&&(mob.location()!=null)&&(mob.location().okMessage(mob,msg2))) { mob.location().send(mob,msg2); return false; } } } return true; } @Override public void unInvoke() { // undo the affects of this spell if(!(affected instanceof MOB)) return; final MOB mob=(MOB)affected; super.unInvoke(); mob.tell(L("Your combat precognition fades away.")); } @Override public boolean invoke(MOB mob, List<String> commands, Physical givenTarget, boolean auto, int asLevel) { MOB target=mob; if((auto)&&(givenTarget!=null)&&(givenTarget instanceof MOB)) target=(MOB)givenTarget; if(target.fetchEffect(ID())!=null) { mob.tell(target,null,null,L("<S-NAME> already <S-HAS-HAVE> the sight.")); return false; } if(!super.invoke(mob,commands,givenTarget,auto,asLevel)) return false; final boolean success=proficiencyCheck(mob,0,auto); if(success) { invoker=mob; final CMMsg msg=CMClass.getMsg(mob,target,this,verbalCastCode(mob,target,auto),L(auto?"<T-NAME> shout(s) combatively!":"^S<S-NAME> shout(s) a combative spell!^?")); if(mob.location().okMessage(mob,msg)) { mob.location().send(mob,msg); beneficialAffect(mob,target,asLevel,0); } } else return beneficialWordsFizzle(mob,target,L("<S-NAME> shout(s) combatively, but nothing more happens.")); // return whether it worked return success; } }
oriontribunal/CoffeeMud
com/planet_ink/coffee_mud/Abilities/Spells/Spell_CombatPrecognition.java
Java
apache-2.0
6,760
package edu.neu.coe.info6205; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Scanner; /* * This is program to check whether the tickets have been used by students. It will take input all the tickets. * After this user need to input the ticket number used by students. * The program can be terminated by entering 7889- Exit code * * Input Format-- * * [ticket_number_1,ticket_number_2,ticket_number_3,ticket_number_4,ticket_number_5....ticket_number_n ] * * Output-- * * ======================================================================== * Final Tally of tickets * Tickets Used Status * ======================================================================== * 182051 1 * 167929 2 * 154421 Not Used * 160561 Not Used * * */ class Checker { public void checkValid(int[] nums) { Scanner input = new Scanner(System.in); System.out.println("Total tickets: " + nums.length); HashMap<Integer, Integer> ticketCounter = new HashMap<>(); for (int num : nums) { ticketCounter.put(num, 0); } while (true) { System.out.println("Enter the ticket number: "); int ticket = input.nextInt(); if (ticket == 7889) break; if (!ticketCounter.containsKey(ticket)) { System.out.println("Invalid Ticket: " + ticket); } else { int value = ticketCounter.get(ticket); if (value == 0) { ticketCounter.put(ticket, value + 1); System.out.println("Valid Ticket: " + ticket); } else { ticketCounter.put(ticket, value + 1); System.out.println("Ticket already used by another User"); System.out.println("Number of user: " + ticketCounter.get(ticket)); } } } System.out.println("========================================================================"); System.out.println("Final Tally of tickets"); System.out.println("Tickets Used Status"); System.out.println("========================================================================"); for (int num : nums) { System.out.println(num + " " + (ticketCounter.get(num) == 0 ? "Not Used" : ticketCounter.get(num))); } } } public class TicketChecker { public static int[] stringToIntegerArray(String input) { input = input.trim(); input = input.substring(1, input.length() - 1); if (input.length() == 0) { return new int[0]; } String[] parts = input.split(","); int[] output = new int[parts.length]; for (int index = 0; index < parts.length; index++) { String part = parts[index].trim(); output[index] = Integer.parseInt(part); } return output; } public static String integerArrayListToString(List<Integer> nums, int length) { if (length == 0) { return "[]"; } StringBuilder result = new StringBuilder(); for (int index = 0; index < length; index++) { Integer number = nums.get(index); result.append(number).append(", "); } return "[" + result.substring(0, result.length() - 2) + "]"; } public static String integerArrayListToString(List<Integer> nums) { return integerArrayListToString(nums, nums.size()); } public static String int2dListToString(Collection<List<Integer>> nums) { StringBuilder sb = new StringBuilder("["); for (List<Integer> list : nums) { sb.append(integerArrayListToString(list)); sb.append(","); } sb.setCharAt(sb.length() - 1, ']'); return sb.toString(); } public static void main(String[] args) throws IOException { System.out.println("Enter the total tickets"); BufferedReader in = new BufferedReader(new InputStreamReader(System.in)); String line; // TODO figure out what was meant here: while does not loop! while ((line = in.readLine()) != null) { int[] nums = stringToIntegerArray(line); new Checker().checkValid(nums); break; } } }
rchillyard/INFO6205
src/main/java/edu/neu/coe/info6205/TicketChecker.java
Java
apache-2.0
4,562
// GUI Animator FREE // Version: 1.1.0 // Compatilble: Unity 5.4.0 or higher, see more info in Readme.txt file. // // Developer: Gold Experience Team (https://www.ge-team.com) // // Unity Asset Store: https://www.assetstore.unity3d.com/en/#!/content/58843 // GE Store: https://www.ge-team.com/en/products/gui-animator-free/ // Full version on Unity Asset Store: https://www.assetstore.unity3d.com/en/#!/content/28709 // Full version on GE Store: https://www.ge-team.com/en/products/gui-animator-for-unity-ui/ // // Please direct any bugs/comments/suggestions to geteamdev@gmail.com #region Namespaces using UnityEngine; using System.Collections; #endregion // Namespaces // ###################################################################### // GA_FREE_OpenOtherScene class // This class handles 8 buttons for changing scene. // ###################################################################### public class GA_FREE_OpenOtherScene : MonoBehaviour { // ######################################## // MonoBehaviour Functions // ######################################## #region MonoBehaviour // Start is called on the frame when a script is enabled just before any of the Update methods is called the first time. // http://docs.unity3d.com/ScriptReference/MonoBehaviour.Start.html void Start () { } // Update is called every frame, if the MonoBehaviour is enabled. // http://docs.unity3d.com/ScriptReference/MonoBehaviour.Update.html void Update () { } #endregion // MonoBehaviour // ######################################## // UI Responder functions // ######################################## #region UI Responder // Open Demo Scene 1 public void ButtonOpenDemoScene1 () { // Disable all buttons GUIAnimSystemFREE.Instance.EnableAllButtons(false); // Waits 1.5 secs for Moving Out animation then load next level GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo01 (960x600px)", 1.5f); gameObject.SendMessage("HideAllGUIs"); } // Open Demo Scene 2 public void ButtonOpenDemoScene2 () { // Disable all buttons GUIAnimSystemFREE.Instance.EnableAllButtons(false); // Waits 1.5 secs for Moving Out animation then load next level GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo02 (960x600px)", 1.5f); gameObject.SendMessage("HideAllGUIs"); } // Open Demo Scene 3 public void ButtonOpenDemoScene3 () { // Disable all buttons GUIAnimSystemFREE.Instance.EnableAllButtons(false); // Waits 1.5 secs for Moving Out animation then load next level GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo03 (960x600px)", 1.5f); gameObject.SendMessage("HideAllGUIs"); } // Open Demo Scene 4 public void ButtonOpenDemoScene4 () { // Disable all buttons GUIAnimSystemFREE.Instance.EnableAllButtons(false); // Waits 1.5 secs for Moving Out animation then load next level GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo04 (960x600px)", 1.5f); gameObject.SendMessage("HideAllGUIs"); } // Open Demo Scene 5 public void ButtonOpenDemoScene5 () { // Disable all buttons GUIAnimSystemFREE.Instance.EnableAllButtons(false); // Waits 1.5 secs for Moving Out animation then load next level GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo05 (960x600px)", 1.5f); gameObject.SendMessage("HideAllGUIs"); } // Open Demo Scene 6 public void ButtonOpenDemoScene6 () { // Disable all buttons GUIAnimSystemFREE.Instance.EnableAllButtons(false); // Waits 1.5 secs for Moving Out animation then load next level GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo06 (960x600px)", 1.5f); gameObject.SendMessage("HideAllGUIs"); } // Open Demo Scene 7 public void ButtonOpenDemoScene7 () { // Disable all buttons GUIAnimSystemFREE.Instance.EnableAllButtons(false); // Waits 1.5 secs for Moving Out animation then load next level GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo07 (960x600px)", 1.5f); gameObject.SendMessage("HideAllGUIs"); } // Open Demo Scene 8 public void ButtonOpenDemoScene8 () { // Disable all buttons GUIAnimSystemFREE.Instance.EnableAllButtons(false); // Waits 1.5 secs for Moving Out animation then load next level GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo08 (960x600px)", 1.5f); gameObject.SendMessage("HideAllGUIs"); } #endregion // UI Responder }
Kuraikari/Modern-Times
Modern Time (J)RPG/Assets/Plugins/GUI Animator/GUI Animator FREE/Demo (CSharp)/Scripts/GA_FREE_OpenOtherScene.cs
C#
apache-2.0
4,394
//+build linux package notification import "os/exec" func Send(title, summary string) error { return exec.Command("notify-send", title, summary).Run() }
jamesrr39/goutil
notification/notification_linux.go
GO
apache-2.0
157
define(function(require, exports, module) { var EditorManager = brackets.getModule("editor/EditorManager"); var ExtensionUtils = brackets.getModule("utils/ExtensionUtils"); var HTMLUtils = brackets.getModule("language/HTMLUtils"); var PreferencesManager = brackets.getModule("preferences/PreferencesManager"); function wrapBrackets(str) { if (typeof str !== "string") { return null; } var result = str; if (!result.startsWith("<")) { result = "<" + result; } if (!result.endsWith(">")) { result = result + ">"; } return result; } var TauDocumentParser; module.exports = TauDocumentParser = (function() { function TauDocumentParser() { this.tauAPIs = {}; this.tauHTML = {}; this.tauGuideData = {}; this.tauGuidePaths = {}; this.readJson(); } TauDocumentParser.prototype.readJson = function() { var self = this; ExtensionUtils.loadFile(module, "tau-document-config.json").done( function (data) { self.tauGuideData = data; self.setTauGuideData(); } ); }; TauDocumentParser.prototype.setTauGuideData = function() { var profile, version; profile = PreferencesManager.getViewState("projectProfile"); version = PreferencesManager.getViewState("projectVersion"); this.tauGuidePaths = this.tauGuideData[version][profile].doc; this.tauAPIs = this.tauGuideData[version][profile].api; this.tauHTML = this.tauGuideData[version][profile].html; return this.tauAPIs; }; TauDocumentParser.prototype.parse = function() { var api = this.tauAPIs; var html = this.tauHTML; var href = null; var name = null; var editor = EditorManager.getFocusedEditor(); var language = editor.getLanguageForSelection(); var langId = language.getId(); var pos = editor.getSelection(); var line = editor.document.getLine(pos.start.line); if (langId === "html") { var tagInfo = HTMLUtils.getTagInfo(editor, editor.getCursorPos()); if (tagInfo.position.tokenType === HTMLUtils.TAG_NAME || tagInfo.position.tokenType === HTMLUtils.ATTR_VALUE) { var start = 0; var end = 0; // Find a start tag for (var cur = pos.start.ch; cur >= 0; cur--) { if (line[cur] === "<") { start = cur; break; } } // Find a end tag for (var cur = pos.start.ch; cur < line.length; cur++) { if (line[cur] === ">" || line[cur] === "/") { end = cur; break; } } var result = line.slice(start, end); result = wrapBrackets(result); var element = $.parseHTML(result); if (element && element.length > 0) { Object.keys(html).forEach((value) => { if (element[0].matches(value)) { if (html[value].href) { href = this.tauGuidePaths.local + html[value].href; name = html[value].name; } } }); } } } else if (langId === "javascript") { var start = line.lastIndexOf("tau."); var end = 0; if (start === -1) { return null; } for (var cur = pos.start.ch; cur < line.length; cur++) { if (line[cur] === " " || line[cur] === "(" || line[cur] === ".") { end = cur; break; } } var data = line.slice(start, end); if (data) { data = data.split("."); for (var i=0; i<data.length; i++) { api = api[data[i]]; if (!api) { break; } } if (api && api.href) { // TODO: Should change the href to use the network // href = this.tauGuidePaths.network + api.href; href = this.tauGuidePaths.local + api.href; name = api.name; } } } return { href: href, name: name }; }; return TauDocumentParser; }()); });
HunseopJeong/WATT
libs/brackets-server/embedded-ext/tau-document/tau-document-parser.js
JavaScript
apache-2.0
5,250
/* * Copyright © 2013-2018 camunda services GmbH and various authors (info@camunda.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.container.impl.ejb; import java.util.List; import java.util.Set; import javax.annotation.PostConstruct; import javax.ejb.EJB; import javax.ejb.Local; import javax.ejb.Stateless; import javax.ejb.TransactionAttribute; import javax.ejb.TransactionAttributeType; import org.camunda.bpm.ProcessEngineService; import org.camunda.bpm.engine.ProcessEngine; /** * <p>Exposes the {@link ProcessEngineService} as EJB inside the container.</p> * * @author Daniel Meyer * */ @Stateless(name="ProcessEngineService", mappedName="ProcessEngineService") @Local(ProcessEngineService.class) @TransactionAttribute(TransactionAttributeType.SUPPORTS) public class EjbProcessEngineService implements ProcessEngineService { @EJB protected EjbBpmPlatformBootstrap ejbBpmPlatform; /** the processEngineServiceDelegate */ protected ProcessEngineService processEngineServiceDelegate; @PostConstruct protected void initProcessEngineServiceDelegate() { processEngineServiceDelegate = ejbBpmPlatform.getProcessEngineService(); } public ProcessEngine getDefaultProcessEngine() { return processEngineServiceDelegate.getDefaultProcessEngine(); } public List<ProcessEngine> getProcessEngines() { return processEngineServiceDelegate.getProcessEngines(); } public Set<String> getProcessEngineNames() { return processEngineServiceDelegate.getProcessEngineNames(); } public ProcessEngine getProcessEngine(String name) { return processEngineServiceDelegate.getProcessEngine(name); } }
xasx/camunda-bpm-platform
javaee/ejb-service/src/main/java/org/camunda/bpm/container/impl/ejb/EjbProcessEngineService.java
Java
apache-2.0
2,196
package fr.sii.ogham.core.util; import static java.util.stream.Collectors.toList; import java.util.ArrayList; import java.util.List; /** * Helper class that registers objects with associated priority. Each registered * object is then returned as list ordered by priority. The higher priority * value comes first in the list. * * @author Aurélien Baudet * * @param <P> * the type of priorized objects */ public class PriorizedList<P> { private final List<WithPriority<P>> priorities; /** * Initializes with an empty list */ public PriorizedList() { this(new ArrayList<>()); } /** * Initializes with some priorized objects * * @param priorities * the priorized objects */ public PriorizedList(List<WithPriority<P>> priorities) { super(); this.priorities = priorities; } /** * Registers a new priorized object * * @param priorized * the wrapped object with its priority * @return this instance for fluent chaining */ public PriorizedList<P> register(WithPriority<P> priorized) { priorities.add(priorized); return this; } /** * Registers an object with its priority * * @param priorized * the object to register * @param priority * the associated priority * @return this instance for fluent chaining */ public PriorizedList<P> register(P priorized, int priority) { priorities.add(new WithPriority<>(priorized, priority)); return this; } /** * Merge all priorities of another {@link PriorizedList} into this one. * * @param other * the priority list * @return this isntance for fluent chaining */ public PriorizedList<P> register(PriorizedList<P> other) { priorities.addAll(other.getPriorities()); return this; } /** * Returns true if this list contains no elements. * * @return if this list contains no elements */ public boolean isEmpty() { return priorities.isEmpty(); } /** * Get the list of priorities ordered by priority * * @return ordered list of priorities */ public List<WithPriority<P>> getPriorities() { return sort(); } /** * Get the list of priorized objects ordered by highest priority. * * @return list of objects ordered by highet priority */ public List<P> getOrdered() { return sort().stream().map(WithPriority::getPriorized).collect(toList()); } private List<WithPriority<P>> sort() { priorities.sort(WithPriority.comparator()); return priorities; } }
groupe-sii/ogham
ogham-core/src/main/java/fr/sii/ogham/core/util/PriorizedList.java
Java
apache-2.0
2,496
/* * Copyright 2016-present Open Networking Laboratory * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.onosproject.drivers.lumentum; import com.google.common.collect.Lists; import org.apache.commons.lang3.tuple.Pair; import org.onosproject.net.ChannelSpacing; import org.onosproject.net.GridType; import org.onosproject.net.OchSignal; import org.onosproject.net.OchSignalType; import org.onosproject.net.Port; import org.onosproject.net.PortNumber; import org.onosproject.net.device.DeviceService; import org.onosproject.net.driver.AbstractHandlerBehaviour; import org.onosproject.net.flow.DefaultFlowEntry; import org.onosproject.net.flow.DefaultFlowRule; import org.onosproject.net.flow.DefaultTrafficSelector; import org.onosproject.net.flow.DefaultTrafficTreatment; import org.onosproject.net.flow.FlowEntry; import org.onosproject.net.flow.FlowId; import org.onosproject.net.flow.FlowRule; import org.onosproject.net.flow.FlowRuleProgrammable; import org.onosproject.net.flow.TrafficSelector; import org.onosproject.net.flow.TrafficTreatment; import org.onosproject.net.flow.criteria.Criteria; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.snmp4j.PDU; import org.snmp4j.event.ResponseEvent; import org.snmp4j.smi.Integer32; import org.snmp4j.smi.OID; import org.snmp4j.smi.UnsignedInteger32; import org.snmp4j.smi.VariableBinding; import org.snmp4j.util.TreeEvent; import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import static com.google.common.base.Preconditions.checkArgument; // TODO: need to convert between OChSignal and XC channel number public class LumentumSdnRoadmFlowRuleProgrammable extends AbstractHandlerBehaviour implements FlowRuleProgrammable { private static final Logger log = LoggerFactory.getLogger(LumentumSdnRoadmFlowRuleProgrammable.class); // Default values private static final int DEFAULT_TARGET_GAIN_PREAMP = 150; private static final int DEFAULT_TARGET_GAIN_BOOSTER = 200; private static final int DISABLE_CHANNEL_TARGET_POWER = -650; private static final int DEFAULT_CHANNEL_TARGET_POWER = -30; private static final int DISABLE_CHANNEL_ABSOLUTE_ATTENUATION = 160; private static final int DEFAULT_CHANNEL_ABSOLUTE_ATTENUATION = 50; private static final int DISABLE_CHANNEL_ADD_DROP_PORT_INDEX = 1; private static final int OUT_OF_SERVICE = 1; private static final int IN_SERVICE = 2; private static final int OPEN_LOOP = 1; private static final int CLOSED_LOOP = 2; // First 20 ports are add/mux ports, next 20 are drop/demux private static final int DROP_PORT_OFFSET = 20; // OIDs private static final String CTRL_AMP_MODULE_SERVICE_STATE_PREAMP = ".1.3.6.1.4.1.46184.1.4.4.1.2.1"; private static final String CTRL_AMP_MODULE_SERVICE_STATE_BOOSTER = ".1.3.6.1.4.1.46184.1.4.4.1.2.2"; private static final String CTRL_AMP_MODULE_TARGET_GAIN_PREAMP = ".1.3.6.1.4.1.46184.1.4.4.1.8.1"; private static final String CTRL_AMP_MODULE_TARGET_GAIN_BOOSTER = ".1.3.6.1.4.1.46184.1.4.4.1.8.2"; private static final String CTRL_CHANNEL_STATE = ".1.3.6.1.4.1.46184.1.4.2.1.3."; private static final String CTRL_CHANNEL_MODE = ".1.3.6.1.4.1.46184.1.4.2.1.4."; private static final String CTRL_CHANNEL_TARGET_POWER = ".1.3.6.1.4.1.46184.1.4.2.1.6."; private static final String CTRL_CHANNEL_ADD_DROP_PORT_INDEX = ".1.3.6.1.4.1.46184.1.4.2.1.13."; private static final String CTRL_CHANNEL_ABSOLUTE_ATTENUATION = ".1.3.6.1.4.1.46184.1.4.2.1.5."; private LumentumSnmpDevice snmp; @Override public Collection<FlowEntry> getFlowEntries() { try { snmp = new LumentumSnmpDevice(handler().data().deviceId()); } catch (IOException e) { log.error("Failed to connect to device: ", e); return Collections.emptyList(); } // Line in is last but one port, line out is last DeviceService deviceService = this.handler().get(DeviceService.class); List<Port> ports = deviceService.getPorts(data().deviceId()); if (ports.size() < 2) { return Collections.emptyList(); } PortNumber lineIn = ports.get(ports.size() - 2).number(); PortNumber lineOut = ports.get(ports.size() - 1).number(); Collection<FlowEntry> entries = Lists.newLinkedList(); // Add rules OID addOid = new OID(CTRL_CHANNEL_STATE + "1"); entries.addAll( fetchRules(addOid, true, lineOut).stream() .map(fr -> new DefaultFlowEntry(fr, FlowEntry.FlowEntryState.ADDED, 0, 0, 0)) .collect(Collectors.toList()) ); // Drop rules OID dropOid = new OID(CTRL_CHANNEL_STATE + "2"); entries.addAll( fetchRules(dropOid, false, lineIn).stream() .map(fr -> new DefaultFlowEntry(fr, FlowEntry.FlowEntryState.ADDED, 0, 0, 0)) .collect(Collectors.toList()) ); return entries; } @Override public Collection<FlowRule> applyFlowRules(Collection<FlowRule> rules) { try { snmp = new LumentumSnmpDevice(data().deviceId()); } catch (IOException e) { log.error("Failed to connect to device: ", e); } // Line ports DeviceService deviceService = this.handler().get(DeviceService.class); List<Port> ports = deviceService.getPorts(data().deviceId()); List<PortNumber> linePorts = ports.subList(ports.size() - 2, ports.size()).stream() .map(p -> p.number()) .collect(Collectors.toList()); // Apply the valid rules on the device Collection<FlowRule> added = rules.stream() .map(r -> new CrossConnectFlowRule(r, linePorts)) .filter(xc -> installCrossConnect(xc)) .collect(Collectors.toList()); // Cache the cookie/priority CrossConnectCache cache = this.handler().get(CrossConnectCache.class); added.forEach(xc -> cache.set( Objects.hash(data().deviceId(), xc.selector(), xc.treatment()), xc.id(), xc.priority())); return added; } @Override public Collection<FlowRule> removeFlowRules(Collection<FlowRule> rules) { try { snmp = new LumentumSnmpDevice(data().deviceId()); } catch (IOException e) { log.error("Failed to connect to device: ", e); } // Line ports DeviceService deviceService = this.handler().get(DeviceService.class); List<Port> ports = deviceService.getPorts(data().deviceId()); List<PortNumber> linePorts = ports.subList(ports.size() - 2, ports.size()).stream() .map(p -> p.number()) .collect(Collectors.toList()); // Apply the valid rules on the device Collection<FlowRule> removed = rules.stream() .map(r -> new CrossConnectFlowRule(r, linePorts)) .filter(xc -> removeCrossConnect(xc)) .collect(Collectors.toList()); // Remove flow rule from cache CrossConnectCache cache = this.handler().get(CrossConnectCache.class); removed.forEach(xc -> cache.remove( Objects.hash(data().deviceId(), xc.selector(), xc.treatment()))); return removed; } // Installs cross connect on device private boolean installCrossConnect(CrossConnectFlowRule xc) { int channel = toChannel(xc.ochSignal()); long addDrop = xc.addDrop().toLong(); if (!xc.isAddRule()) { addDrop -= DROP_PORT_OFFSET; } // Create the PDU object PDU pdu = new PDU(); pdu.setType(PDU.SET); // Enable preamp & booster List<OID> oids = Arrays.asList(new OID(CTRL_AMP_MODULE_SERVICE_STATE_PREAMP), new OID(CTRL_AMP_MODULE_SERVICE_STATE_BOOSTER)); oids.forEach( oid -> pdu.add(new VariableBinding(oid, new Integer32(IN_SERVICE))) ); // Set target gain on preamp & booster OID ctrlAmpModuleTargetGainPreamp = new OID(CTRL_AMP_MODULE_TARGET_GAIN_PREAMP); pdu.add(new VariableBinding(ctrlAmpModuleTargetGainPreamp, new Integer32(DEFAULT_TARGET_GAIN_PREAMP))); OID ctrlAmpModuleTargetGainBooster = new OID(CTRL_AMP_MODULE_TARGET_GAIN_BOOSTER); pdu.add(new VariableBinding(ctrlAmpModuleTargetGainBooster, new Integer32(DEFAULT_TARGET_GAIN_BOOSTER))); // Make cross connect OID ctrlChannelAddDropPortIndex = new OID(CTRL_CHANNEL_ADD_DROP_PORT_INDEX + (xc.isAddRule() ? "1." : "2.") + channel); pdu.add(new VariableBinding(ctrlChannelAddDropPortIndex, new UnsignedInteger32(addDrop))); // Add rules use closed loop, drop rules open loop // Add rules are set to target power, drop rules are attenuated if (xc.isAddRule()) { OID ctrlChannelMode = new OID(CTRL_CHANNEL_MODE + "1." + channel); pdu.add(new VariableBinding(ctrlChannelMode, new Integer32(CLOSED_LOOP))); OID ctrlChannelTargetPower = new OID(CTRL_CHANNEL_TARGET_POWER + "1." + channel); pdu.add(new VariableBinding(ctrlChannelTargetPower, new Integer32(DEFAULT_CHANNEL_TARGET_POWER))); } else { OID ctrlChannelMode = new OID(CTRL_CHANNEL_MODE + "2." + channel); pdu.add(new VariableBinding(ctrlChannelMode, new Integer32(OPEN_LOOP))); OID ctrlChannelAbsoluteAttenuation = new OID(CTRL_CHANNEL_ABSOLUTE_ATTENUATION + "2." + channel); pdu.add(new VariableBinding( ctrlChannelAbsoluteAttenuation, new UnsignedInteger32(DEFAULT_CHANNEL_ABSOLUTE_ATTENUATION))); } // Final step is to enable the channel OID ctrlChannelState = new OID(CTRL_CHANNEL_STATE + (xc.isAddRule() ? "1." : "2.") + channel); pdu.add(new VariableBinding(ctrlChannelState, new Integer32(IN_SERVICE))); try { ResponseEvent response = snmp.set(pdu); // TODO: parse response } catch (IOException e) { log.error("Failed to create cross connect, unable to connect to device: ", e); } return true; } // Removes cross connect on device private boolean removeCrossConnect(CrossConnectFlowRule xc) { int channel = toChannel(xc.ochSignal()); // Create the PDU object PDU pdu = new PDU(); pdu.setType(PDU.SET); // Disable the channel OID ctrlChannelState = new OID(CTRL_CHANNEL_STATE + (xc.isAddRule() ? "1." : "2.") + channel); pdu.add(new VariableBinding(ctrlChannelState, new Integer32(OUT_OF_SERVICE))); // Put cross connect back into default port 1 OID ctrlChannelAddDropPortIndex = new OID(CTRL_CHANNEL_ADD_DROP_PORT_INDEX + (xc.isAddRule() ? "1." : "2.") + channel); pdu.add(new VariableBinding(ctrlChannelAddDropPortIndex, new UnsignedInteger32(DISABLE_CHANNEL_ADD_DROP_PORT_INDEX))); // Put port/channel back to open loop OID ctrlChannelMode = new OID(CTRL_CHANNEL_MODE + (xc.isAddRule() ? "1." : "2.") + channel); pdu.add(new VariableBinding(ctrlChannelMode, new Integer32(OPEN_LOOP))); // Add rules are set to target power, drop rules are attenuated if (xc.isAddRule()) { OID ctrlChannelTargetPower = new OID(CTRL_CHANNEL_TARGET_POWER + "1." + channel); pdu.add(new VariableBinding(ctrlChannelTargetPower, new Integer32(DISABLE_CHANNEL_TARGET_POWER))); } else { OID ctrlChannelAbsoluteAttenuation = new OID(CTRL_CHANNEL_ABSOLUTE_ATTENUATION + "2." + channel); pdu.add(new VariableBinding( ctrlChannelAbsoluteAttenuation, new UnsignedInteger32(DISABLE_CHANNEL_ABSOLUTE_ATTENUATION))); } try { ResponseEvent response = snmp.set(pdu); // TODO: parse response } catch (IOException e) { log.error("Failed to remove cross connect, unable to connect to device: ", e); return false; } return true; } /** * Convert OCh signal to Lumentum channel ID. * * @param ochSignal OCh signal * @return Lumentum channel ID */ public static int toChannel(OchSignal ochSignal) { // FIXME: move to cross connect validation checkArgument(ochSignal.channelSpacing() == ChannelSpacing.CHL_50GHZ); checkArgument(LumentumSnmpDevice.START_CENTER_FREQ.compareTo(ochSignal.centralFrequency()) <= 0); checkArgument(LumentumSnmpDevice.END_CENTER_FREQ.compareTo(ochSignal.centralFrequency()) >= 0); return ochSignal.spacingMultiplier() + LumentumSnmpDevice.MULTIPLIER_SHIFT; } /** * Convert Lumentum channel ID to OCh signal. * * @param channel Lumentum channel ID * @return OCh signal */ public static OchSignal toOchSignal(int channel) { checkArgument(1 <= channel); checkArgument(channel <= 96); return new OchSignal(GridType.DWDM, ChannelSpacing.CHL_50GHZ, channel - LumentumSnmpDevice.MULTIPLIER_SHIFT, 4); } // Returns the currently configured add/drop port for the given channel. private PortNumber getAddDropPort(int channel, boolean isAddPort) { OID oid = new OID(CTRL_CHANNEL_ADD_DROP_PORT_INDEX + (isAddPort ? "1" : "2")); for (TreeEvent event : snmp.get(oid)) { if (event == null) { return null; } VariableBinding[] varBindings = event.getVariableBindings(); for (VariableBinding varBinding : varBindings) { if (varBinding.getOid().last() == channel) { int port = varBinding.getVariable().toInt(); if (!isAddPort) { port += DROP_PORT_OFFSET; } return PortNumber.portNumber(port); } } } return null; } // Returns the currently installed flow entries on the device. private List<FlowRule> fetchRules(OID oid, boolean isAdd, PortNumber linePort) { List<FlowRule> rules = new LinkedList<>(); for (TreeEvent event : snmp.get(oid)) { if (event == null) { continue; } VariableBinding[] varBindings = event.getVariableBindings(); for (VariableBinding varBinding : varBindings) { CrossConnectCache cache = this.handler().get(CrossConnectCache.class); if (varBinding.getVariable().toInt() == IN_SERVICE) { int channel = varBinding.getOid().removeLast(); PortNumber addDropPort = getAddDropPort(channel, isAdd); if (addDropPort == null) { continue; } TrafficSelector selector = DefaultTrafficSelector.builder() .matchInPort(isAdd ? addDropPort : linePort) .add(Criteria.matchOchSignalType(OchSignalType.FIXED_GRID)) .add(Criteria.matchLambda(toOchSignal(channel))) .build(); TrafficTreatment treatment = DefaultTrafficTreatment.builder() .setOutput(isAdd ? linePort : addDropPort) .build(); // Lookup flow ID and priority int hash = Objects.hash(data().deviceId(), selector, treatment); Pair<FlowId, Integer> lookup = cache.get(hash); if (lookup == null) { continue; } FlowRule fr = DefaultFlowRule.builder() .forDevice(data().deviceId()) .makePermanent() .withSelector(selector) .withTreatment(treatment) .withPriority(lookup.getRight()) .withCookie(lookup.getLeft().value()) .build(); rules.add(fr); } } } return rules; } }
donNewtonAlpha/onos
drivers/lumentum/src/main/java/org/onosproject/drivers/lumentum/LumentumSdnRoadmFlowRuleProgrammable.java
Java
apache-2.0
17,118
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2017 Serge Rider (serge@jkiss.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.sqlite.model; import org.jkiss.dbeaver.ext.generic.model.GenericSQLDialect; import org.jkiss.dbeaver.model.exec.jdbc.JDBCDatabaseMetaData; import org.jkiss.dbeaver.model.impl.jdbc.JDBCDataSource; import org.jkiss.dbeaver.model.impl.sql.BasicSQLDialect; import org.jkiss.dbeaver.model.sql.SQLConstants; public class SQLiteSQLDialect extends GenericSQLDialect { public SQLiteSQLDialect() { super("SQLite"); } public void initDriverSettings(JDBCDataSource dataSource, JDBCDatabaseMetaData metaData) { super.initDriverSettings(dataSource, metaData); } public String[][] getIdentifierQuoteStrings() { return BasicSQLDialect.DEFAULT_QUOTE_STRINGS; } }
ruspl-afed/dbeaver
plugins/org.jkiss.dbeaver.ext.sqlite/src/org/jkiss/dbeaver/ext/sqlite/model/SQLiteSQLDialect.java
Java
apache-2.0
1,391
// Copyright 2017 MongoDB Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "../microbench.hpp" #include <algorithm> #include <fstream> #include <bsoncxx/stdx/make_unique.hpp> #include <bsoncxx/stdx/optional.hpp> #include <mongocxx/client.hpp> #include <mongocxx/gridfs/bucket.hpp> #include <mongocxx/instance.hpp> #include <mongocxx/uri.hpp> namespace benchmark { using bsoncxx::builder::basic::kvp; using bsoncxx::builder::basic::make_document; using bsoncxx::stdx::make_unique; class gridfs_download : public microbench { public: // The task size comes from the Driver Perfomance Benchmarking Reference Doc. gridfs_download(std::string file_name) : microbench{"TestGridFsDownload", 52.43, std::set<benchmark_type>{benchmark_type::multi_bench, benchmark_type::read_bench}}, _conn{mongocxx::uri{}}, _file_name{std::move(file_name)} {} void setup(); void teardown(); protected: void task(); private: mongocxx::client _conn; mongocxx::gridfs::bucket _bucket; bsoncxx::stdx::optional<bsoncxx::types::bson_value::view> _id; std::string _file_name; }; void gridfs_download::setup() { mongocxx::database db = _conn["perftest"]; db.drop(); std::ifstream stream{_file_name}; _bucket = db.gridfs_bucket(); auto result = _bucket.upload_from_stream(_file_name, &stream); _id = result.id(); } void gridfs_download::teardown() { _conn["perftest"].drop(); } void gridfs_download::task() { auto downloader = _bucket.open_download_stream(_id.value()); auto file_length = downloader.file_length(); auto buffer_size = std::min(file_length, static_cast<std::int64_t>(downloader.chunk_size())); auto buffer = make_unique<std::uint8_t[]>(static_cast<std::size_t>(buffer_size)); while (auto length_read = downloader.read(buffer.get(), static_cast<std::size_t>(buffer_size))) { } } } // namespace benchmark
mongodb/mongo-cxx-driver
benchmark/multi_doc/gridfs_download.hpp
C++
apache-2.0
2,561
// Copyright 2016 PlanBase Inc. & Glen Peterson // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package org.organicdesign.fp.tuple; import java.io.Serializable; import java.util.Objects; import static org.organicdesign.fp.FunctionUtils.stringify; // ====================================================================================== // THIS CLASS IS GENERATED BY /tupleGenerator/TupleGenerator.java. DO NOT EDIT MANUALLY! // ====================================================================================== /** Holds 12 items of potentially different types. Designed to let you easily create immutable subclasses (to give your data structures meaningful names) with correct equals(), hashCode(), and toString() methods. */ public class Tuple12<A,B,C,D,E,F,G,H,I,J,K,L> implements Serializable { // For serializable. Make sure to change whenever internal data format changes. // Implemented because implementing serializable only on a sub-class of an // immutable class requires a serialization proxy. That's probably worse than // the conceptual burdeon of all tuples being Serializable. private static final long serialVersionUID = 20160906065500L; // Fields are protected so that sub-classes can make accessor methods with meaningful names. protected final A _1; protected final B _2; protected final C _3; protected final D _4; protected final E _5; protected final F _6; protected final G _7; protected final H _8; protected final I _9; protected final J _10; protected final K _11; protected final L _12; /** Constructor is protected (not public) for easy inheritance. Josh Bloch's "Item 1" says public static factory methods are better than constructors because they have names, they can return an existing object instead of a new one, and they can return a sub-type. Therefore, you have more flexibility with a static factory as part of your public API then with a public constructor. */ protected Tuple12(A a, B b, C c, D d, E e, F f, G g, H h, I i, J j, K k, L l) { _1 = a; _2 = b; _3 = c; _4 = d; _5 = e; _6 = f; _7 = g; _8 = h; _9 = i; _10 = j; _11 = k; _12 = l; } /** Public static factory method */ public static <A,B,C,D,E,F,G,H,I,J,K,L> Tuple12<A,B,C,D,E,F,G,H,I,J,K,L> of(A a, B b, C c, D d, E e, F f, G g, H h, I i, J j, K k, L l) { return new Tuple12<>(a, b, c, d, e, f, g, h, i, j, k, l); } /** Returns the 1st field */ public A _1() { return _1; } /** Returns the 2nd field */ public B _2() { return _2; } /** Returns the 3rd field */ public C _3() { return _3; } /** Returns the 4th field */ public D _4() { return _4; } /** Returns the 5th field */ public E _5() { return _5; } /** Returns the 6th field */ public F _6() { return _6; } /** Returns the 7th field */ public G _7() { return _7; } /** Returns the 8th field */ public H _8() { return _8; } /** Returns the 9th field */ public I _9() { return _9; } /** Returns the 10th field */ public J _10() { return _10; } /** Returns the 11th field */ public K _11() { return _11; } /** Returns the 12th field */ public L _12() { return _12; } @Override public String toString() { return getClass().getSimpleName() + "(" + stringify(_1) + "," + stringify(_2) + "," + stringify(_3) + "," + stringify(_4) + "," + stringify(_5) + "," + stringify(_6) + "," + stringify(_7) + "," + stringify(_8) + "," + stringify(_9) + "," + stringify(_10) + "," + stringify(_11) + "," + stringify(_12) + ")"; } @Override public boolean equals(Object other) { // Cheapest operation first... if (this == other) { return true; } if (!(other instanceof Tuple12)) { return false; } // Details... @SuppressWarnings("rawtypes") final Tuple12 that = (Tuple12) other; return Objects.equals(this._1, that._1()) && Objects.equals(this._2, that._2()) && Objects.equals(this._3, that._3()) && Objects.equals(this._4, that._4()) && Objects.equals(this._5, that._5()) && Objects.equals(this._6, that._6()) && Objects.equals(this._7, that._7()) && Objects.equals(this._8, that._8()) && Objects.equals(this._9, that._9()) && Objects.equals(this._10, that._10()) && Objects.equals(this._11, that._11()) && Objects.equals(this._12, that._12()); } @Override public int hashCode() { // First 2 fields match Tuple2 which implements java.util.Map.Entry as part of the map // contract and therefore must match java.util.HashMap.Node.hashCode(). int ret = 0; if (_1 != null) { ret = _1.hashCode(); } if (_2 != null) { ret = ret ^ _2.hashCode(); } if (_3 != null) { ret = ret + _3.hashCode(); } if (_4 != null) { ret = ret + _4.hashCode(); } if (_5 != null) { ret = ret + _5.hashCode(); } if (_6 != null) { ret = ret + _6.hashCode(); } if (_7 != null) { ret = ret + _7.hashCode(); } if (_8 != null) { ret = ret + _8.hashCode(); } if (_9 != null) { ret = ret + _9.hashCode(); } if (_10 != null) { ret = ret + _10.hashCode(); } if (_11 != null) { ret = ret + _11.hashCode(); } if (_12 != null) { ret = ret + _12.hashCode(); } return ret; } }
GlenKPeterson/UncleJim
src/main/java/org/organicdesign/fp/tuple/Tuple12.java
Java
apache-2.0
6,112
package com.zhongdan.lobby.bl.ai.chinesechess.engine; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.RandomAccessFile; import java.net.URL; import java.util.Calendar; import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; public class SearchEngine { private static Log log = LogFactory.getLog(SearchEngine.class); public static final int MaxBookMove = 40;// 使用开局库的最大步数 public static final int MaxKiller = 4;// 搜索杀着的最大步数 private static final int BookUnique = 1;// 指示结点类型,下同 private static final int BookMulti = 2; private static final int HashAlpha = 4; private static final int HashBeta = 8; private static final int HashPv = 16; private static final int ObsoleteValue = -CCEvalue.MaxValue - 1; private static final int UnknownValue = -CCEvalue.MaxValue - 2; // private static final int BookUniqueValue = CCEvalue.MaxValue + 1; // private static final int BookMultiValue = CCEvalue.MaxValue + 2;//推荐使用开局库,值要足够大 public static final int CLOCK_S = 1000;// 1秒=1000毫秒 public static final int CLOCK_M = 1000 * 60;// 1分=60秒 private static final Random rand = new Random(); private MoveNode bestMove = null; // for search control private int depth; private long properTimer, limitTimer; // 搜索过程的全局变量,包括: // 1. 搜索树和历史表 private ActiveBoard activeBoard; private int histTab[][]; public void setActiveBoard(ActiveBoard activeBoard) { this.activeBoard = activeBoard; } // 2. 搜索选项 private int selectMask, style;// 下棋风格 default = EngineOption.Normal; private boolean wideQuiesc, futility, nullMove; // SelectMask:随机性 , WideQuiesc(保守true if Style == EngineOption.solid) // Futility(true if Style == EngineOption.risky冒进) // NullMove 是否空着剪裁 private boolean ponder; // 3. 时间控制参数 private long startTimer, minTimer, maxTimer; private int startMove; private boolean stop; // 4. 统计信息:Main Search Nodes, Quiescence Search Nodes and Hash Nodes private int nodes, nullNodes, hashNodes, killerNodes, betaNodes, pvNodes, alphaNodes, mateNodes, leafNodes; private int quiescNullNodes, quiescBetaNodes, quiescPvNodes, quiescAlphaNodes, quiescMateNodes; private int hitBeta, hitPv, hitAlpha; // 5. 搜索结果 private int lastScore, pvLineNum; private MoveNode pvLine[] = new MoveNode[ActiveBoard.MAX_MOVE_NUM]; // 6. Hash and Book Structure private int hashMask, maxBookPos, bookPosNum; private HashRecord[] hashList; private BookRecord[] bookList; public SearchEngine(ActiveBoard chessP) { this(); activeBoard = chessP; } public SearchEngine() { int i; // Position = new ChessPosition(); histTab = new int[90][90]; ; nodes = nullNodes = hashNodes = killerNodes = betaNodes = pvNodes = alphaNodes = mateNodes = leafNodes = 0; selectMask = 0;// 1<<10-1;//随机性 style = EngineOption.Normal; wideQuiesc = style == EngineOption.Solid; futility = style == EngineOption.Risky; nullMove = true; // Search results lastScore = 0; pvLineNum = 0; MoveNode PvLine[] = new MoveNode[ActiveBoard.MAX_MOVE_NUM]; for (i = 0; i < ActiveBoard.MAX_MOVE_NUM; i++) { PvLine[i] = new MoveNode(); } newHash(17, 14); // 设置超时和迭代搜索层数 setup timeout and search depth // depth = 8; // properTimer = CLOCK_M * 1; // limitTimer = CLOCK_M * 20; depth = 8; properTimer = CLOCK_S * 2; limitTimer = CLOCK_S * 2; } // Begin History and Hash Table Procedures public void newHash(int HashScale, int BookScale) { histTab = new int[90][90]; hashMask = (1 << HashScale) - 1; maxBookPos = 1 << BookScale; hashList = new HashRecord[hashMask + 1]; for (int i = 0; i < hashMask + 1; i++) { hashList[i] = new HashRecord(); } bookList = new BookRecord[maxBookPos]; // for (int i=0; i< MaxBookPos; i++){ // BookList[i]=new BookRecord(); // } clearHistTab(); clearHash(); // BookRand = rand.nextLong();//(unsigned long) time(NULL); } public void delHash() { histTab = null; hashList = null; bookList = null; } public void clearHistTab() { int i, j; for (i = 0; i < 90; i++) { for (j = 0; j < 90; j++) { histTab[i][j] = 0; } } } public void clearHash() { int i; for (i = 0; i <= hashMask; i++) { hashList[i].flag = 0; } } private int probeHash(MoveNode HashMove, int Alpha, int Beta, int Depth) { boolean MateNode; HashRecord TempHash; int tmpInt = (int) (activeBoard.getZobristKey() & hashMask); long tmpLong1 = activeBoard.getZobristLock(), tmpLong2; TempHash = hashList[(int) (activeBoard.getZobristKey() & hashMask)]; tmpLong2 = TempHash.zobristLock; if (TempHash.flag != 0 && TempHash.zobristLock == activeBoard.getZobristLock()) { MateNode = false; if (TempHash.value > CCEvalue.MaxValue - ActiveBoard.MAX_MOVE_NUM / 2) { TempHash.value -= activeBoard.getMoveNum() - startMove; MateNode = true; } else if (TempHash.value < ActiveBoard.MAX_MOVE_NUM / 2 - CCEvalue.MaxValue) { TempHash.value += activeBoard.getMoveNum() - startMove; MateNode = true; } if (MateNode || TempHash.depth >= Depth) { if ((TempHash.flag & HashBeta) != 0) { if (TempHash.value >= Beta) { hitBeta++; return TempHash.value; } } else if ((TempHash.flag & HashAlpha) != 0) { if (TempHash.value <= Alpha) { hitAlpha++; return TempHash.value; } } else if ((TempHash.flag & HashPv) != 0) { hitPv++; return TempHash.value; } else { return UnknownValue; } } if (TempHash.bestMove.src == -1) { return UnknownValue; } else { HashMove = TempHash.bestMove; return ObsoleteValue; } } return UnknownValue; } private void recordHash(MoveNode hashMove, int hashFlag, int value, int depth) { HashRecord tempHash; tempHash = hashList[(int) (activeBoard.getZobristKey() & hashMask)]; if ((tempHash.flag != 0) && tempHash.depth > depth) { return; } tempHash.zobristLock = activeBoard.getZobristLock(); tempHash.flag = hashFlag; tempHash.depth = depth; tempHash.value = value; if (tempHash.value > CCEvalue.MaxValue - ActiveBoard.MAX_MOVE_NUM / 2) { tempHash.value += activeBoard.getMoveNum() - startMove; } else if (tempHash.value < ActiveBoard.MAX_MOVE_NUM / 2 - CCEvalue.MaxValue) { tempHash.value -= activeBoard.getMoveNum() - startMove; } tempHash.bestMove = hashMove; hashList[(int) (activeBoard.getZobristKey() & hashMask)] = tempHash; } private void GetPvLine() { HashRecord tempHash; tempHash = hashList[(int) (activeBoard.getZobristKey() & hashMask)]; if ((tempHash.flag != 0) && tempHash.bestMove.src != -1 && tempHash.zobristLock == activeBoard.getZobristLock()) { pvLine[pvLineNum] = tempHash.bestMove; activeBoard.movePiece(tempHash.bestMove); pvLineNum++; if (activeBoard.isLoop(1) == 0) {// ??????? GetPvLine(); } activeBoard.undoMove(); } } // record example: i0h0 4 rnbakabr1/9/4c1c1n/p1p1N3p/9/6p2/P1P1P3P/2N1C2C1/9/R1BAKAB1R w - - 0 7 // i0h0:Move , 4: evalue, other: FEN String public void loadBook(final String bookFile) throws IOException {// 开局库 int bookMoveNum, value, i; BufferedReader inFile; String lineStr; // LineStr; int index = 0; MoveNode bookMove = new MoveNode();// note:wrong HashRecord tempHash; ActiveBoard BookPos = new ActiveBoard();// note:wrong InputStream is = SearchEngine.class.getResourceAsStream(bookFile); inFile = new BufferedReader(new InputStreamReader(is), 1024 * 1024); if (inFile == null) return; bookPosNum = 0; int recordedToHash = 0;// for test while ((lineStr = inFile.readLine()) != null) { bookMove = new MoveNode(); bookMove.move(lineStr); index = 0; if (bookMove.src != -1) { index += 5; while (lineStr.charAt(index) == ' ') { index++; } BookPos.loadFen(lineStr.substring(index)); long tmpZob = BookPos.getZobristKey(); int tmp = BookPos.getSquares(bookMove.src);// for test if (BookPos.getSquares(bookMove.src) != 0) { tempHash = hashList[(int) (BookPos.getZobristKey() & hashMask)]; if (tempHash.flag != 0) {// 占用 if (tempHash.zobristLock == BookPos.getZobristLock()) {// 局面相同 if ((tempHash.flag & BookMulti) != 0) {// 多个相同走法 bookMoveNum = bookList[tempHash.value].moveNum; if (bookMoveNum < MaxBookMove) { bookList[tempHash.value].moveList[bookMoveNum] = bookMove; bookList[tempHash.value].moveNum++; recordedToHash++;// for test } } else { if (bookPosNum < maxBookPos) { tempHash.flag = BookMulti; bookList[bookPosNum] = new BookRecord(); bookList[bookPosNum].moveNum = 2; bookList[bookPosNum].moveList[0] = tempHash.bestMove; bookList[bookPosNum].moveList[1] = bookMove; tempHash.value = bookPosNum; bookPosNum++; hashList[(int) (BookPos.getZobristKey() & hashMask)] = tempHash; recordedToHash++;// for test } } } } else { tempHash.zobristLock = BookPos.getZobristLock(); tempHash.flag = BookUnique; tempHash.depth = 0; tempHash.value = 0; tempHash.bestMove = bookMove; hashList[(int) (BookPos.getZobristKey() & hashMask)] = tempHash; recordedToHash++; } } } } inFile.close(); } // End History and Hash Tables Procedures // Begin Search Procedures // Search Procedures private int RAdapt(int depth) { // 根据不同情况来调整R值的做法,称为“适应性空着裁剪”(Adaptive Null-Move Pruning), // 它首先由Ernst Heinz发表在1999年的ICCA杂志上。其内容可以概括为 // a. 深度小于或等于6时,用R = 2的空着裁剪进行搜索 // b. 深度大于8时,用R = 3; // c. 深度是6或7时,如果每方棋子都大于或等于3个,则用 R = 3,否则用 R = 2。 if (depth <= 6) { return 2; } else if (depth <= 8) { return activeBoard.getEvalue(0) < CCEvalue.EndgameMargin || activeBoard.getEvalue(1) < CCEvalue.EndgameMargin ? 2 : 3; } else { return 3; } } private int quiesc(int Alpha, int Beta) {// 只对吃子 int i, bestValue, thisAlpha, thisValue; boolean inCheck, movable; MoveNode thisMove; SortedMoveNodes moveSort = new SortedMoveNodes(); // 1. Return if a Loop position is detected if (activeBoard.getMoveNum() > startMove) { thisValue = activeBoard.isLoop(1);// note:wrong if (thisValue != 0) { return activeBoard.loopValue(thisValue, activeBoard.getMoveNum() - startMove); } } // 2. Initialize inCheck = activeBoard.lastMove().chk; movable = false; bestValue = -CCEvalue.MaxValue; thisAlpha = Alpha; // 3. For non-check position, try Null-Move before generate moves if (!inCheck) { movable = true; thisValue = activeBoard.evaluation() + (selectMask != 0 ? (rand.nextInt() & selectMask) - (rand.nextInt() & selectMask) : 0); if (thisValue > bestValue) { if (thisValue >= Beta) { quiescNullNodes++; return thisValue; } bestValue = thisValue; if (thisValue > thisAlpha) { thisAlpha = thisValue; } } } // 4. Generate and sort all moves for check position, or capture moves for non-check position moveSort.GenMoves(activeBoard, inCheck ? histTab : null); for (i = 0; i < moveSort.MoveNum; i++) { moveSort.BubbleSortMax(i); thisMove = moveSort.MoveList[i]; if (inCheck || activeBoard.narrowCap(thisMove, wideQuiesc)) { if (activeBoard.movePiece(thisMove)) { movable = true; // 5. Call Quiescence Alpha-Beta Search for every leagal moves thisValue = -quiesc(-Beta, -thisAlpha); // for debug String tmpStr = ""; for (int k = 0; k < activeBoard.getMoveNum(); k++) { tmpStr = tmpStr + activeBoard.moveList[k] + ","; } tmpStr = tmpStr + "Value:" + thisValue + "\n"; activeBoard.undoMove(); // 6. Select the best move for Fail-Soft Alpha-Beta if (thisValue > bestValue) { if (thisValue >= Beta) { quiescBetaNodes++; return thisValue; } bestValue = thisValue; if (thisValue > thisAlpha) { thisAlpha = thisValue; } } } } } // 7. Return a loose value if no leagal moves if (!movable) { quiescMateNodes++; return activeBoard.getMoveNum() - startMove - CCEvalue.MaxValue; } if (thisAlpha > Alpha) { quiescPvNodes++; } else { quiescAlphaNodes++; } return bestValue; } // 搜索算法,包括 // 1. Hash Table; // 2. 超出边界的Alpha-Beta搜索 // 3. 适应性空着裁减 // 4. 选择性扩展 // 5. 使用Hash Table的迭代加深; // 6. 杀着表 // 7. 将军扩展 // 8. 主要变例搜索 // 9. 历史启发表 private int search(KillerStruct KillerTab, int Alpha, int Beta, int Depth) { int i, j, thisDepth, futPrune, hashFlag; boolean inCheck, movable, searched; int hashValue, bestValue, thisAlpha, thisValue, futValue = 0; MoveNode thisMove = new MoveNode(); MoveNode bestMove = new MoveNode(); SortedMoveNodes moveSort = new SortedMoveNodes(); KillerStruct subKillerTab = new KillerStruct(); // Alpha-Beta Search: // 1. 重复循环检测 if (activeBoard.getMoveNum() > startMove) { thisValue = activeBoard.isLoop(1);// if (thisValue != 0) { return activeBoard.loopValue(thisValue, activeBoard.getMoveNum() - startMove); } } // 2. 是否需要扩展 inCheck = activeBoard.lastMove().chk; thisDepth = Depth; if (inCheck) { thisDepth++; } // 3. Return if hit the Hash Table hashValue = probeHash(thisMove, Alpha, Beta, thisDepth); if (hashValue >= -CCEvalue.MaxValue && hashValue <= CCEvalue.MaxValue) { return hashValue; } // 4. Return if interrupted or timeout if (interrupt()) { return 0; } ; // 5. 正式开始搜索: if (thisDepth > 0) { movable = false; searched = false; bestValue = -CCEvalue.MaxValue; thisAlpha = Alpha; hashFlag = HashAlpha; subKillerTab.moveNum = 0; // 6. 是否需要空着裁减与冒进? futPrune = 0; if (futility) { // 冒进 if (thisDepth == 3 && !inCheck && activeBoard.evaluation() + CCEvalue.RazorMargin <= Alpha && activeBoard.getEvalue(activeBoard.getOppPlayer()) > CCEvalue.EndgameMargin) { thisDepth = 2; } if (thisDepth < 3) { futValue = activeBoard.evaluation() + (thisDepth == 2 ? CCEvalue.ExtFutMargin : CCEvalue.SelFutMargin); if (!inCheck && futValue <= Alpha) { futPrune = thisDepth; bestValue = futValue; } } } // 7. 空着裁减 if (nullMove && futPrune == 0 && !inCheck && activeBoard.lastMove().src != -1 && activeBoard.getEvalue(activeBoard.getPlayer()) > CCEvalue.EndgameMargin) { activeBoard.nullMove(); thisValue = -search(subKillerTab, -Beta, 1 - Beta, thisDepth - 1 - RAdapt(thisDepth)); activeBoard.undoNull(); if (thisValue >= Beta) { nullNodes++; return Beta; } } // 8. 搜索命中Hash Table if (hashValue == ObsoleteValue) { // System.out.println(ThisMove.Coord()); if (activeBoard.movePiece(thisMove)) { movable = true; if (futPrune != 0 && -activeBoard.evaluation() + (futPrune == 2 ? CCEvalue.ExtFutMargin : CCEvalue.SelFutMargin) <= Alpha && activeBoard.lastMove().chk) { activeBoard.undoMove(); } else { thisValue = -search(subKillerTab, -Beta, -thisAlpha, thisDepth - 1); searched = true; activeBoard.undoMove(); if (stop) { return 0; } if (thisValue > bestValue) { if (thisValue >= Beta) { histTab[thisMove.src][thisMove.dst] += 1 << (thisDepth - 1); recordHash(thisMove, HashBeta, Beta, thisDepth); hashNodes++; return thisValue; } bestValue = thisValue; bestMove = thisMove; if (thisValue > thisAlpha) { thisAlpha = thisValue; hashFlag = HashPv; if (activeBoard.getMoveNum() == startMove) { recordHash(bestMove, hashFlag, thisAlpha, thisDepth); popInfo(thisAlpha, Depth); } } } } } } // 9. 命中杀着表 for (i = 0; i < KillerTab.moveNum; i++) { thisMove = KillerTab.moveList[i]; if (activeBoard.leagalMove(thisMove)) { if (activeBoard.movePiece(thisMove)) { movable = true; if (futPrune != 0 && -activeBoard.evaluation() + (futPrune == 2 ? CCEvalue.ExtFutMargin : CCEvalue.SelFutMargin) <= Alpha && activeBoard.lastMove().chk) { activeBoard.undoMove(); } else { if (searched) { thisValue = -search(subKillerTab, -thisAlpha - 1, -thisAlpha, thisDepth - 1); if (thisValue > thisAlpha && thisValue < Beta) { thisValue = -search(subKillerTab, -Beta, -thisAlpha, thisDepth - 1); } } else { thisValue = -search(subKillerTab, -Beta, -thisAlpha, thisDepth - 1); searched = true; } activeBoard.undoMove(); if (stop) { return 0; } if (thisValue > bestValue) { if (thisValue >= Beta) { killerNodes++; histTab[thisMove.src][thisMove.dst] += 1 << (thisDepth - 1); recordHash(thisMove, HashBeta, Beta, thisDepth); return thisValue; } bestValue = thisValue; bestMove = thisMove; if (thisValue > thisAlpha) { thisAlpha = thisValue; hashFlag = HashPv; if (activeBoard.getMoveNum() == startMove) { recordHash(bestMove, hashFlag, thisAlpha, thisDepth); popInfo(thisAlpha, Depth); } } } } } } } // 10. 生成当前所有合法着法并排序 moveSort.GenMoves(activeBoard, histTab); nodes += moveSort.MoveNum; for (i = 0; i < moveSort.MoveNum; i++) { moveSort.BubbleSortMax(i); thisMove = moveSort.MoveList[i]; if (activeBoard.movePiece(thisMove)) { movable = true; // 11. Alpha-Beta Search if (futPrune != 0 && -activeBoard.evaluation() + (futPrune == 2 ? CCEvalue.ExtFutMargin : CCEvalue.SelFutMargin) <= Alpha && activeBoard.lastMove().chk) { activeBoard.undoMove(); } else { if (searched) { thisValue = -search(subKillerTab, -thisAlpha - 1, -thisAlpha, thisDepth - 1); if (thisValue > thisAlpha && thisValue < Beta) { thisValue = -search(subKillerTab, -Beta, -thisAlpha, thisDepth - 1); } } else { thisValue = -search(subKillerTab, -Beta, -thisAlpha, thisDepth - 1); searched = true; } activeBoard.undoMove(); if (stop) { return 0; } // 12. 超出边界Alpha-Beta if (thisValue > bestValue) { if (thisValue >= Beta) { betaNodes++; histTab[thisMove.src][thisMove.dst] += 1 << (thisDepth - 1); recordHash(thisMove, HashBeta, Beta, thisDepth); if (KillerTab.moveNum < MaxKiller) { KillerTab.moveList[KillerTab.moveNum] = thisMove; KillerTab.moveNum++; } return thisValue; } bestValue = thisValue; bestMove = thisMove; if (thisValue > thisAlpha) { thisAlpha = thisValue; hashFlag = HashPv; if (activeBoard.getMoveNum() == startMove) { recordHash(bestMove, hashFlag, thisAlpha, thisDepth); popInfo(thisAlpha, Depth); } } } } } } // 13.无路可走,输棋! if (!movable) { mateNodes++; return activeBoard.getMoveNum() - startMove - CCEvalue.MaxValue; } // 14. Update History Tables and Hash Tables if (futPrune != 0 && bestValue == futValue) { bestMove.src = bestMove.dst = -1; } if ((hashFlag & HashAlpha) != 0) { alphaNodes++; } else { pvNodes++; histTab[bestMove.src][bestMove.dst] += 1 << (thisDepth - 1); if (KillerTab.moveNum < MaxKiller) { KillerTab.moveList[KillerTab.moveNum] = bestMove; KillerTab.moveNum++; } } recordHash(bestMove, hashFlag, thisAlpha, thisDepth); return bestValue; // 15. 静态搜索 } else { thisValue = quiesc(Alpha, Beta); thisMove.src = bestMove.dst = -1; if (thisValue <= Alpha) { recordHash(thisMove, HashAlpha, Alpha, 0); } else if (thisValue >= Beta) { recordHash(thisMove, HashBeta, Beta, 0); } else { recordHash(thisMove, HashPv, thisValue, 0); } leafNodes++; return thisValue; } } // End Search Procedures // Start Control Procedures private boolean interrupt() { if (stop) return true; return false; } public void stopSearch() { this.stop = true; } private void popInfo(int value, int depth) { int i, quiescNodes, nps, npsQuiesc; char[] moveStr; long tempLong; if (depth != 0) { String logString = "PVNode: depth=" + depth + ",score=" + value + ",Move: " + "\n"; pvLineNum = 0; GetPvLine(); for (i = 0; i < pvLineNum; i++) { moveStr = pvLine[i].location(); logString += " " + String.copyValueOf(moveStr) + "\n"; } if (ponder && System.currentTimeMillis() > minTimer && value + CCEvalue.InadequateValue > lastScore) { stop = true; } if (log.isDebugEnabled()) log.debug(logString); } } public void setupControl(int depth, long proper, long limit) { this.depth = depth; this.properTimer = proper; this.limitTimer = limit; } public void control() throws LostException { // int Depth, int ProperTimer, int LimitTimer) throws IOException { int i, MoveNum, ThisValue; char[] MoveStr; stop = false; bestMove = null; MoveNode ThisMove = new MoveNode(), UniqueMove = new MoveNode(); HashRecord TempHash; SortedMoveNodes MoveSort = new SortedMoveNodes(); KillerStruct SubKillerTab = new KillerStruct(); // The Computer Thinking Procedure: // 1. Search the moveNodes in Book int tmpInt = (int) (activeBoard.getZobristKey() & hashMask); TempHash = hashList[(int) (activeBoard.getZobristKey() & hashMask)]; if (TempHash.flag != 0 && TempHash.zobristLock == activeBoard.getZobristLock()) { if ((TempHash.flag == BookUnique)) { MoveStr = TempHash.bestMove.location(); bestMove = new MoveNode(String.copyValueOf(MoveStr)); return; } else if (TempHash.flag == BookMulti) { ThisValue = 0; i = Math.abs(rand.nextInt()) % (bookList[TempHash.value].moveNum); MoveStr = bookList[TempHash.value].moveList[i].location(); bestMove = new MoveNode(String.copyValueOf(MoveStr)); return; } } // 2. Initailize Timer and other Counter startTimer = System.currentTimeMillis(); minTimer = startTimer + (properTimer >> 1); maxTimer = properTimer << 1; if (maxTimer > limitTimer) { maxTimer = limitTimer; } maxTimer += startTimer; stop = false; startMove = activeBoard.getMoveNum(); nodes = nullNodes = hashNodes = killerNodes = betaNodes = pvNodes = alphaNodes = mateNodes = leafNodes = 0; quiescNullNodes = quiescBetaNodes = quiescPvNodes = quiescAlphaNodes = quiescMateNodes = 0; hitBeta = hitPv = hitAlpha = 0; pvLineNum = 0; // 3. 不合法:主动送将 if (activeBoard.checked(activeBoard.getOppPlayer())) { return; } ThisValue = activeBoard.isLoop(3); if (ThisValue != 0) { throw new LostException("不可常捉!"); } if (activeBoard.getMoveNum() > ActiveBoard.MAX_CONSECUTIVE_MOVES) { throw new LostException("最大步数,和棋!"); } // 4. 测试所有应将的着法 if (activeBoard.lastMove().chk) { MoveNum = 0; MoveSort.GenMoves(activeBoard, histTab); for (i = 0; i < MoveSort.MoveNum; i++) { ThisMove = MoveSort.MoveList[i]; if (activeBoard.movePiece(ThisMove)) { activeBoard.undoMove(); UniqueMove = ThisMove; MoveNum++; if (MoveNum > 1) { break; } } } if (MoveNum == 0) { if (log.isDebugEnabled()) log.debug("score " + -CCEvalue.MaxValue + "\n"); } if (MoveNum == 1) { MoveStr = UniqueMove.location(); if (log.isDebugEnabled()) log.debug("bestmove " + String.copyValueOf(MoveStr) + "\n"); bestMove = new MoveNode(String.copyValueOf(MoveStr)); return; } } // 5. 迭代加深 if (depth == 0) { return; } for (i = 4; i <= depth; i++) { if (log.isDebugEnabled()) log.debug("info depth " + i + "\n"); SubKillerTab.moveNum = 0; ThisValue = search(SubKillerTab, -CCEvalue.MaxValue, CCEvalue.MaxValue, i); popInfo(ThisValue, depth); if (stop) { break; } lastScore = ThisValue; // 6. Stop thinking if timeout or solved if (!ponder && System.currentTimeMillis() > minTimer) { break; } if (ThisValue > CCEvalue.MaxValue - ActiveBoard.MAX_MOVE_NUM / 2 || ThisValue < ActiveBoard.MAX_MOVE_NUM / 2 - CCEvalue.MaxValue) { break; } } // 7. 得到最佳着法及其线路 if (pvLineNum != 0) { MoveStr = pvLine[0].location(); bestMove = new MoveNode(String.copyValueOf(MoveStr)); if (log.isDebugEnabled()) log.debug("bestmove: " + String.copyValueOf(MoveStr) + "\n"); if (pvLineNum > 1) { MoveStr = pvLine[1].location(); if (log.isDebugEnabled()) log.debug("ponder:" + String.copyValueOf(MoveStr) + "\n"); } } else { if (log.isDebugEnabled()) log.info("score:" + ThisValue); } } // End Control Procedures public MoveNode getBestMove() throws LostException { control(); MoveNode retVal = bestMove; return bestMove; } // for test public static void main(String[] args) throws IOException { long start, end; RandomAccessFile testResult; log.info("begin search, please wait......"); start = System.currentTimeMillis(); int steps = 8; ActiveBoard cp = new ActiveBoard(); String FenStr = "1c1k1abR1/4a4/4b4/6NP1/4P4/2C1n1P2/r5p2/4B4/4A4/2BAK4 w - - 0 20"; cp.loadFen(FenStr); SearchEngine searchMove = new SearchEngine(cp); searchMove.loadBook("/data/book.txt"); // System.out.println(cp.AllPieces); // searchMove.Control(steps,CLOCK_M*2,CLOCK_M*4); log.info(FenStr); end = System.currentTimeMillis(); long second = (end - start) / 1000; if (second == 0) second = 1; long minutes = second / 60; URL url = SearchEngine.class.getResource("/data/test.log"); String uri = url.toString().replaceAll("file:/", ""); testResult = new RandomAccessFile(uri, "rw"); Calendar c = Calendar.getInstance(); String tmpStr = "\n\n********************************************************************\n"; tmpStr = tmpStr + "[Test Time] " + c.getTime() + "\n"; tmpStr = tmpStr + "[Fen String] " + FenStr + "\n"; tmpStr = tmpStr + " Deep =" + steps + ",Used Time:" + minutes + ":" + second % 60 + "\n"; tmpStr = tmpStr + "[Nodes] " + searchMove.nodes + "\n"; tmpStr = tmpStr + "[AlphaNodes] " + searchMove.alphaNodes + "\n"; tmpStr = tmpStr + "[BetaNodes] " + searchMove.betaNodes + "\n"; tmpStr = tmpStr + "[HashNodes] " + searchMove.hashNodes + "\n"; tmpStr = tmpStr + "[KillerNodes] " + searchMove.killerNodes + "\n"; tmpStr = tmpStr + "[LeafNodes] " + searchMove.leafNodes + "\n"; tmpStr = tmpStr + "[NullNodes] " + searchMove.nullNodes + "\n"; tmpStr = tmpStr + "[QuiescAlphaNodes] " + searchMove.quiescAlphaNodes + "\n"; tmpStr = tmpStr + "[QuiescBetaNodesNodes] " + searchMove.quiescBetaNodes + "\n"; tmpStr = tmpStr + "[QuiescMateNodes] " + searchMove.quiescMateNodes + "\n"; tmpStr = tmpStr + "[QuiescNullNodes] " + searchMove.quiescNullNodes + "\n"; tmpStr = tmpStr + "[QuiescPvNodes] " + searchMove.quiescPvNodes + "\n"; tmpStr = tmpStr + "[HitAlpha] " + searchMove.hitAlpha + "\n"; tmpStr = tmpStr + "[HitBeta] " + searchMove.hitBeta + "\n"; tmpStr = tmpStr + "[HitPv] " + searchMove.hitPv + "\n"; tmpStr = tmpStr + "[BetaNode] " + searchMove.betaNodes + "\n"; tmpStr = tmpStr + "[BPS] " + searchMove.nodes / second; int count = 0; for (int i = 1; i < searchMove.hashList.length; i++) { if (searchMove.hashList[i].flag != 0) count++; } tmpStr = tmpStr + "[HashTable] length=" + searchMove.hashList.length + ", occupied=" + count; testResult.seek(testResult.length()); testResult.writeBytes(tmpStr); testResult.close(); System.out.println(tmpStr); searchMove = null; cp = null; System.gc(); } } class BookRecord { int moveNum; MoveNode[] moveList;// [MaxBookMove]; public BookRecord() { moveList = new MoveNode[SearchEngine.MaxBookMove]; moveNum = 0; } }; class KillerStruct { int moveNum; MoveNode[] moveList;// [MaxKiller]; public KillerStruct() { moveList = new MoveNode[SearchEngine.MaxKiller]; for (int i = 0; i < SearchEngine.MaxKiller; i++) moveList[i] = new MoveNode(); moveNum = 0; } }; class HashRecord { public HashRecord() { flag = 0; depth = 0; value = 0; zobristLock = 0; bestMove = new MoveNode(); } long zobristLock; int flag, depth; int value; MoveNode bestMove; };
dyzhxsl3897/goliveiptv
lobby/src/main/java/com/zhongdan/lobby/bl/ai/chinesechess/engine/SearchEngine.java
Java
apache-2.0
29,025
/* * Copyright 2017 Mahesh Gaya * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.drake.research.android.lipswithmaps.adapter; import android.content.Context; import android.support.v7.widget.RecyclerView; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.TextView; import java.util.List; import butterknife.BindView; import butterknife.ButterKnife; import edu.drake.research.android.lipswithmaps.R; import edu.drake.research.lipswithmaps.WifiItem; /** * Created by Mahesh Gaya on 1/15/17. */ public class WifiAdapter extends RecyclerView.Adapter<WifiAdapter.ViewHolder> { private List<WifiItem> mWifiItemList; public WifiAdapter(List<WifiItem> wifiItemList){ this.mWifiItemList = wifiItemList; } @Override public ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) { View rootView = LayoutInflater.from(parent.getContext()) .inflate(R.layout.list_wifi_item, parent, false); return new ViewHolder(rootView); } @Override public void onBindViewHolder(ViewHolder holder, int position) { WifiItem wifiItem = mWifiItemList.get(position); holder. levelTextView.setText(String.valueOf(wifiItem.getLevel())); holder.ssidTextView.setText(wifiItem.getSsid()); holder.bssidTextView.setText(wifiItem.getBssid()); } @Override public int getItemCount() { return mWifiItemList.size(); } public class ViewHolder extends RecyclerView.ViewHolder{ @BindView(R.id.textview_wifi_level)TextView levelTextView; @BindView(R.id.textview_wifi_ssid)TextView ssidTextView; @BindView(R.id.textview_wifi_bssid)TextView bssidTextView; public ViewHolder(View itemView) { super(itemView); ButterKnife.bind(this, itemView); } } }
maheshgaya/lips-with-maps
android/src/main/java/edu/drake/research/android/lipswithmaps/adapter/WifiAdapter.java
Java
apache-2.0
2,440
import Vue from 'vue'; import axios from 'axios'; import VueAxios from 'vue-axios'; Vue.use(VueAxios, axios); let ajax = (options) => { let p = new Promise(function(resolve, reject) { Vue.axios(options).catch(err => { if(err.code === 401) { //未登录 login().catch(err => reject(err)) .then(() => ajax(options)) .catch(err => reject(err)) .then(data => resolve(data)); } }).then(data => resolve(data)).finally(() => { }); }) return p; }; ajax.decorator = function(promiseFn, {locked, animated}) { }
dgmpk/vue-music-app
src/assets/js/request.js
JavaScript
apache-2.0
659
import requests class Client(object): def __init__(self, tornado_server): self.tornado_server = tornado_server @property def base_url(self): return "http://localhost:{}/api/v1".format(self.tornado_server.port) def request(self, method, url, **kwargs): headers = {} if method.lower() in ("put", "post"): headers["Content-type"] = "application/json" return requests.request( method, self.base_url + url, headers=headers, **kwargs ) def get(self, url, **kwargs): return self.request("GET", url, **kwargs) def post(self, url, **kwargs): return self.request("POST", url, **kwargs) def put(self, url, **kwargs): return self.request("PUT", url, **kwargs) def delete(self, url, **kwargs): return self.request("DELETE", url, **kwargs) def create(self, url, **kwargs): return self.post(url, data=json.dumps(kwargs)) def update(self, url, **kwargs): return self.put(url, data=json.dumps(kwargs))
dropbox/notouch
tests/api_tests/util.py
Python
apache-2.0
1,077
package com.braulio.cassule.designfocus.fragment; import android.content.Intent; import android.net.Uri; import android.os.Bundle; import android.support.v4.app.Fragment; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import com.braulio.cassule.designfocus.ui.PostViewHolder; import com.braulio.cassule.designfocus.R; import com.braulio.cassule.designfocus.activity.PostDetailActivity; import com.braulio.cassule.designfocus.model.Post; import com.firebase.ui.database.FirebaseRecyclerAdapter; import com.google.firebase.auth.FirebaseAuth; import com.google.firebase.database.DataSnapshot; import com.google.firebase.database.DatabaseError; import com.google.firebase.database.DatabaseReference; import com.google.firebase.database.FirebaseDatabase; import com.google.firebase.database.MutableData; import com.google.firebase.database.Query; import com.google.firebase.database.Transaction; import com.squareup.picasso.Picasso; public abstract class PostListFragment extends Fragment { private static final String TAG = "PostListFragment"; // [START define_database_reference] private DatabaseReference mDatabase; // [END define_database_reference] private FirebaseRecyclerAdapter<Post, PostViewHolder> mAdapter; private RecyclerView mRecycler; private LinearLayoutManager mManager; public PostListFragment() {} @Override public View onCreateView (LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { super.onCreateView(inflater, container, savedInstanceState); View rootView = inflater.inflate(R.layout.fragment_all_posts, container, false); // [START create_database_reference] mDatabase = FirebaseDatabase.getInstance().getReference(); // [END create_database_reference] mRecycler = (RecyclerView) rootView.findViewById(R.id.messages_list); mRecycler.setHasFixedSize(true); return rootView; } @Override public void onActivityCreated(Bundle savedInstanceState) { super.onActivityCreated(savedInstanceState); // Set up Layout Manager, reverse layout mManager = new LinearLayoutManager(getActivity()); mManager.setReverseLayout(true); mManager.setStackFromEnd(true); mRecycler.setLayoutManager(mManager); // Set up FirebaseRecyclerAdapter with the Query Query postsQuery = getQuery(mDatabase); mAdapter = new FirebaseRecyclerAdapter<Post, PostViewHolder>(Post.class, R.layout.item_post, PostViewHolder.class, postsQuery) { @Override protected void populateViewHolder(final PostViewHolder viewHolder, final Post model, final int position) { final DatabaseReference postRef = getRef(position); // Set click listener for the whole post view final String postKey = postRef.getKey(); viewHolder.itemView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { // Launch PostDetailActivity Intent intent = new Intent(getActivity(), PostDetailActivity.class); intent.putExtra(PostDetailActivity.EXTRA_POST_KEY, postKey); startActivity(intent); } }); if (model.image == null){ return; } else { Picasso.with(getContext()).load(Uri.parse(model.image)).fit().centerCrop().into(viewHolder.imageView); } // Determine if the current user has liked this post and set UI accordingly if (model.stars.containsKey(getUid())) { viewHolder.starView.setImageResource(R.drawable.ic_toggle_star_fill_24); } else { viewHolder.starView.setImageResource(R.drawable.ic_toggle_star_outline_24); } // Bind Post to ViewHolder, setting OnClickListener for the star button viewHolder.bindToPost(model, new View.OnClickListener() { @Override public void onClick(View starView) { // Need to write to both places the post is stored DatabaseReference globalPostRef = mDatabase.child("posts").child(postRef.getKey()); DatabaseReference userPostRef = mDatabase.child("user-posts").child(model.uid).child(postRef.getKey()); // Run two transactions onStarClicked(globalPostRef); onStarClicked(userPostRef); } }); } }; mRecycler.setAdapter(mAdapter); } // [START post_stars_transaction] private void onStarClicked(DatabaseReference postRef) { postRef.runTransaction(new Transaction.Handler() { @Override public Transaction.Result doTransaction(MutableData mutableData) { Post p = mutableData.getValue(Post.class); if (p == null) { return Transaction.success(mutableData); } if (p.stars.containsKey(getUid())) { // Unstar the post and remove self from stars p.starCount = p.starCount - 1; p.stars.remove(getUid()); } else { // Star the post and add self to stars p.starCount = p.starCount + 1; p.stars.put(getUid(), true); } // Set value and report transaction success mutableData.setValue(p); return Transaction.success(mutableData); } @Override public void onComplete(DatabaseError databaseError, boolean b, DataSnapshot dataSnapshot) { // Transaction completed Log.d(TAG, "postTransaction:onComplete:" + databaseError); } }); } // [END post_stars_transaction] @Override public void onDestroy() { super.onDestroy(); if (mAdapter != null) { mAdapter.cleanup(); } } public String getUid() { return FirebaseAuth.getInstance().getCurrentUser().getUid(); } public abstract Query getQuery(DatabaseReference databaseReference); }
braulio94/Quadro
app/src/main/java/com/braulio/cassule/designfocus/fragment/PostListFragment.java
Java
apache-2.0
6,726
using System; namespace Com.Koushikdutta.Async.Wrapper { partial interface IAsyncSocketWrapper { new void Close(); } }
thefactory/AndroidAsync-Sharp
Additions/IAsyncSocketWrapper.cs
C#
apache-2.0
141
/*global Phaser, Assets, Screen*/ var Player = function (game) { "use strict"; this.game = game; this.sprite = null; }; Player.DISTANCE_TO_BORDER = 50; Player.VELOCITY_X = 300; Player.SPRITE_ANCHOR_X = 0.5; Player.SPRITE_ANCHOR_Y = 0.5; Player.prototype = { create: function () { "use strict"; var y = Screen.HEIGHT - Player.DISTANCE_TO_BORDER; this.sprite = this.game.add.sprite(this.game.world.centerX, y, Assets.PLAYER_SPRITE_KEY); this.sprite.anchor.set(Player.SPRITE_ANCHOR_X, Player.SPRITE_ANCHOR_Y); this.game.physics.enable(this.sprite, Phaser.Physics.ARCADE); }, update: function () { "use strict"; if (this.game.input.keyboard.isDown(Phaser.Keyboard.LEFT)) { this.sprite.body.velocity.x = -Player.VELOCITY_X; } else if (this.game.input.keyboard.isDown(Phaser.Keyboard.RIGHT)) { this.sprite.body.velocity.x = Player.VELOCITY_X; } else { this.sprite.body.velocity.x = 0; } } };
fpbfabio/river-raid-remake
js/game/player.js
JavaScript
apache-2.0
1,075
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.query.dimension; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; import com.google.common.base.Predicate; import it.unimi.dsi.fastutil.ints.Int2IntMap; import it.unimi.dsi.fastutil.ints.Int2IntOpenHashMap; import org.apache.druid.common.config.NullHandling; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.query.filter.DimFilterUtils; import org.apache.druid.segment.DimensionSelector; import javax.annotation.Nullable; import java.nio.ByteBuffer; import java.util.regex.Pattern; /** */ public class RegexFilteredDimensionSpec extends BaseFilteredDimensionSpec { private static final byte CACHE_TYPE_ID = 0x2; private final String pattern; private final Pattern compiledRegex; public RegexFilteredDimensionSpec( @JsonProperty("delegate") DimensionSpec delegate, @JsonProperty("pattern") String pattern //rows not matching the pattern will be discarded ) { super(delegate); this.pattern = Preconditions.checkNotNull(pattern, "pattern must not be null"); this.compiledRegex = Pattern.compile(pattern); } @JsonProperty public String getPattern() { return pattern; } @Override public DimensionSelector decorate(final DimensionSelector selector) { if (selector == null) { return null; } final int selectorCardinality = selector.getValueCardinality(); if (selectorCardinality < 0 || !selector.nameLookupPossibleInAdvance()) { return new PredicateFilteredDimensionSelector( selector, new Predicate<String>() { @Override public boolean apply(@Nullable String input) { return compiledRegex.matcher(NullHandling.nullToEmptyIfNeeded(input)).matches(); } } ); } int count = 0; final Int2IntOpenHashMap forwardMapping = new Int2IntOpenHashMap(); forwardMapping.defaultReturnValue(-1); for (int i = 0; i < selectorCardinality; i++) { String val = NullHandling.nullToEmptyIfNeeded(selector.lookupName(i)); if (val != null && compiledRegex.matcher(val).matches()) { forwardMapping.put(i, count++); } } final int[] reverseMapping = new int[forwardMapping.size()]; for (Int2IntMap.Entry e : forwardMapping.int2IntEntrySet()) { reverseMapping[e.getIntValue()] = e.getIntKey(); } return new ForwardingFilteredDimensionSelector(selector, forwardMapping, reverseMapping); } @Override public byte[] getCacheKey() { byte[] delegateCacheKey = delegate.getCacheKey(); byte[] regexBytes = StringUtils.toUtf8(pattern); return ByteBuffer.allocate(2 + delegateCacheKey.length + regexBytes.length) .put(CACHE_TYPE_ID) .put(delegateCacheKey) .put(DimFilterUtils.STRING_SEPARATOR) .put(regexBytes) .array(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } RegexFilteredDimensionSpec that = (RegexFilteredDimensionSpec) o; if (!delegate.equals(that.delegate)) { return false; } return pattern.equals(that.pattern); } @Override public int hashCode() { int result = delegate.hashCode(); result = 31 * result + pattern.hashCode(); return result; } @Override public String toString() { return "RegexFilteredDimensionSpec{" + "pattern='" + pattern + '\'' + '}'; } }
dkhwangbo/druid
processing/src/main/java/org/apache/druid/query/dimension/RegexFilteredDimensionSpec.java
Java
apache-2.0
4,458
from django.conf.urls import patterns, url urlpatterns = patterns('accounts.views', url(r'^$', 'home_view', name='home'), url(r'^login/$', 'login_view', name='login'), url(r'^logout/$', 'logout_view', name='logout'), url(r'^register/$', 'register_view', name='register'), url(r'^password/$', 'password_view', name='password'), url(r'^profile/$', 'profile_view', name='profile'), url(r'^hello/$', 'hello_view', name='hello'), )
goncha/django-accounts
urls.py
Python
apache-2.0
449
/* * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.ads.googleads.v10.resources; import com.google.api.pathtemplate.PathTemplate; import com.google.api.resourcenames.ResourceName; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Objects; import javax.annotation.Generated; // AUTO-GENERATED DOCUMENTATION AND CLASS. @Generated("by gapic-generator-java") public class BiddingStrategySimulationName implements ResourceName { private static final PathTemplate CUSTOMER_ID_BIDDING_STRATEGY_ID_TYPE_MODIFICATION_METHOD_START_DATE_END_DATE = PathTemplate.createWithoutUrlEncoding( "customers/{customer_id}/biddingStrategySimulations/{bidding_strategy_id}~{type}~{modification_method}~{start_date}~{end_date}"); private volatile Map<String, String> fieldValuesMap; private final String customerId; private final String biddingStrategyId; private final String type; private final String modificationMethod; private final String startDate; private final String endDate; @Deprecated protected BiddingStrategySimulationName() { customerId = null; biddingStrategyId = null; type = null; modificationMethod = null; startDate = null; endDate = null; } private BiddingStrategySimulationName(Builder builder) { customerId = Preconditions.checkNotNull(builder.getCustomerId()); biddingStrategyId = Preconditions.checkNotNull(builder.getBiddingStrategyId()); type = Preconditions.checkNotNull(builder.getType()); modificationMethod = Preconditions.checkNotNull(builder.getModificationMethod()); startDate = Preconditions.checkNotNull(builder.getStartDate()); endDate = Preconditions.checkNotNull(builder.getEndDate()); } public String getCustomerId() { return customerId; } public String getBiddingStrategyId() { return biddingStrategyId; } public String getType() { return type; } public String getModificationMethod() { return modificationMethod; } public String getStartDate() { return startDate; } public String getEndDate() { return endDate; } public static Builder newBuilder() { return new Builder(); } public Builder toBuilder() { return new Builder(this); } public static BiddingStrategySimulationName of( String customerId, String biddingStrategyId, String type, String modificationMethod, String startDate, String endDate) { return newBuilder() .setCustomerId(customerId) .setBiddingStrategyId(biddingStrategyId) .setType(type) .setModificationMethod(modificationMethod) .setStartDate(startDate) .setEndDate(endDate) .build(); } public static String format( String customerId, String biddingStrategyId, String type, String modificationMethod, String startDate, String endDate) { return newBuilder() .setCustomerId(customerId) .setBiddingStrategyId(biddingStrategyId) .setType(type) .setModificationMethod(modificationMethod) .setStartDate(startDate) .setEndDate(endDate) .build() .toString(); } public static BiddingStrategySimulationName parse(String formattedString) { if (formattedString.isEmpty()) { return null; } Map<String, String> matchMap = CUSTOMER_ID_BIDDING_STRATEGY_ID_TYPE_MODIFICATION_METHOD_START_DATE_END_DATE.validatedMatch( formattedString, "BiddingStrategySimulationName.parse: formattedString not in valid format"); return of( matchMap.get("customer_id"), matchMap.get("bidding_strategy_id"), matchMap.get("type"), matchMap.get("modification_method"), matchMap.get("start_date"), matchMap.get("end_date")); } public static List<BiddingStrategySimulationName> parseList(List<String> formattedStrings) { List<BiddingStrategySimulationName> list = new ArrayList<>(formattedStrings.size()); for (String formattedString : formattedStrings) { list.add(parse(formattedString)); } return list; } public static List<String> toStringList(List<BiddingStrategySimulationName> values) { List<String> list = new ArrayList<>(values.size()); for (BiddingStrategySimulationName value : values) { if (value == null) { list.add(""); } else { list.add(value.toString()); } } return list; } public static boolean isParsableFrom(String formattedString) { return CUSTOMER_ID_BIDDING_STRATEGY_ID_TYPE_MODIFICATION_METHOD_START_DATE_END_DATE.matches( formattedString); } @Override public Map<String, String> getFieldValuesMap() { if (fieldValuesMap == null) { synchronized (this) { if (fieldValuesMap == null) { ImmutableMap.Builder<String, String> fieldMapBuilder = ImmutableMap.builder(); if (customerId != null) { fieldMapBuilder.put("customer_id", customerId); } if (biddingStrategyId != null) { fieldMapBuilder.put("bidding_strategy_id", biddingStrategyId); } if (type != null) { fieldMapBuilder.put("type", type); } if (modificationMethod != null) { fieldMapBuilder.put("modification_method", modificationMethod); } if (startDate != null) { fieldMapBuilder.put("start_date", startDate); } if (endDate != null) { fieldMapBuilder.put("end_date", endDate); } fieldValuesMap = fieldMapBuilder.build(); } } } return fieldValuesMap; } public String getFieldValue(String fieldName) { return getFieldValuesMap().get(fieldName); } @Override public String toString() { return CUSTOMER_ID_BIDDING_STRATEGY_ID_TYPE_MODIFICATION_METHOD_START_DATE_END_DATE.instantiate( "customer_id", customerId, "bidding_strategy_id", biddingStrategyId, "type", type, "modification_method", modificationMethod, "start_date", startDate, "end_date", endDate); } @Override public boolean equals(Object o) { if (o == this) { return true; } if (o != null || getClass() == o.getClass()) { BiddingStrategySimulationName that = ((BiddingStrategySimulationName) o); return Objects.equals(this.customerId, that.customerId) && Objects.equals(this.biddingStrategyId, that.biddingStrategyId) && Objects.equals(this.type, that.type) && Objects.equals(this.modificationMethod, that.modificationMethod) && Objects.equals(this.startDate, that.startDate) && Objects.equals(this.endDate, that.endDate); } return false; } @Override public int hashCode() { int h = 1; h *= 1000003; h ^= Objects.hashCode(customerId); h *= 1000003; h ^= Objects.hashCode(biddingStrategyId); h *= 1000003; h ^= Objects.hashCode(type); h *= 1000003; h ^= Objects.hashCode(modificationMethod); h *= 1000003; h ^= Objects.hashCode(startDate); h *= 1000003; h ^= Objects.hashCode(endDate); return h; } /** * Builder for * customers/{customer_id}/biddingStrategySimulations/{bidding_strategy_id}~{type}~{modification_method}~{start_date}~{end_date}. */ public static class Builder { private String customerId; private String biddingStrategyId; private String type; private String modificationMethod; private String startDate; private String endDate; protected Builder() {} public String getCustomerId() { return customerId; } public String getBiddingStrategyId() { return biddingStrategyId; } public String getType() { return type; } public String getModificationMethod() { return modificationMethod; } public String getStartDate() { return startDate; } public String getEndDate() { return endDate; } public Builder setCustomerId(String customerId) { this.customerId = customerId; return this; } public Builder setBiddingStrategyId(String biddingStrategyId) { this.biddingStrategyId = biddingStrategyId; return this; } public Builder setType(String type) { this.type = type; return this; } public Builder setModificationMethod(String modificationMethod) { this.modificationMethod = modificationMethod; return this; } public Builder setStartDate(String startDate) { this.startDate = startDate; return this; } public Builder setEndDate(String endDate) { this.endDate = endDate; return this; } private Builder(BiddingStrategySimulationName biddingStrategySimulationName) { this.customerId = biddingStrategySimulationName.customerId; this.biddingStrategyId = biddingStrategySimulationName.biddingStrategyId; this.type = biddingStrategySimulationName.type; this.modificationMethod = biddingStrategySimulationName.modificationMethod; this.startDate = biddingStrategySimulationName.startDate; this.endDate = biddingStrategySimulationName.endDate; } public BiddingStrategySimulationName build() { return new BiddingStrategySimulationName(this); } } }
googleads/google-ads-java
google-ads-stubs-v10/src/main/java/com/google/ads/googleads/v10/resources/BiddingStrategySimulationName.java
Java
apache-2.0
10,111
import React, { useContext } from 'react'; import { css, cx } from 'emotion'; import { CompletionItem, selectThemeVariant, ThemeContext } from '../..'; import { GrafanaTheme, renderMarkdown, textUtil } from '@savantly/sprout-api'; const getStyles = (theme: GrafanaTheme, height: number, visible: boolean) => { return { typeaheadItem: css` label: type-ahead-item; z-index: 11; padding: ${theme.spacing.sm} ${theme.spacing.sm} ${theme.spacing.sm} ${theme.spacing.md}; border-radius: ${theme.border.radius.md}; border: ${selectThemeVariant( { light: `solid 1px ${theme.palette.gray5}`, dark: `solid 1px ${theme.palette.dark1}` }, theme.type )}; overflow-y: scroll; overflow-x: hidden; outline: none; background: ${selectThemeVariant({ light: theme.palette.white, dark: theme.palette.dark4 }, theme.type)}; color: ${theme.colors.text}; box-shadow: ${selectThemeVariant( { light: `0 5px 10px 0 ${theme.palette.gray5}`, dark: `0 5px 10px 0 ${theme.palette.black}` }, theme.type )}; visibility: ${visible === true ? 'visible' : 'hidden'}; width: 250px; height: ${height + parseInt(theme.spacing.xxs, 10)}px; position: relative; word-break: break-word; `, }; }; interface Props { item: CompletionItem; height: number; } export const TypeaheadInfo: React.FC<Props> = ({ item, height }) => { const visible = item && !!item.documentation; const label = item ? item.label : ''; const documentation = textUtil.sanitize(renderMarkdown(item?.documentation)); const theme = useContext(ThemeContext); const styles = getStyles(theme, height, visible); return ( <div className={cx([styles.typeaheadItem])}> <b>{label}</b> <hr /> <div dangerouslySetInnerHTML={{ __html: documentation }} /> </div> ); };
savantly-net/sprout-platform
frontend/libs/sprout-ui/src/components/Typeahead/TypeaheadInfo.tsx
TypeScript
apache-2.0
1,884
/** * Copyright (C) 2011 Xavier Jodoin (xavier@jodoin.me) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @author xjodoin * @version $Id: $Id */ package org.torpedoquery.jpa; import static org.torpedoquery.jpa.internal.TorpedoMagic.getTorpedoMethodHandler; import static org.torpedoquery.jpa.internal.TorpedoMagic.setQuery; import org.torpedoquery.core.QueryBuilder; import org.torpedoquery.jpa.internal.Selector; import org.torpedoquery.jpa.internal.TorpedoProxy; import org.torpedoquery.jpa.internal.functions.CoalesceFunction; import org.torpedoquery.jpa.internal.functions.DynamicInstantiationFunction; import org.torpedoquery.jpa.internal.handlers.ArrayCallHandler; import org.torpedoquery.jpa.internal.handlers.AscFunctionHandler; import org.torpedoquery.jpa.internal.handlers.AvgFunctionHandler; import org.torpedoquery.jpa.internal.handlers.ComparableConstantFunctionHandler; import org.torpedoquery.jpa.internal.handlers.ConstantFunctionHandler; import org.torpedoquery.jpa.internal.handlers.CustomFunctionHandler; import org.torpedoquery.jpa.internal.handlers.DescFunctionHandler; import org.torpedoquery.jpa.internal.handlers.DistinctFunctionHandler; import org.torpedoquery.jpa.internal.handlers.IndexFunctionHandler; import org.torpedoquery.jpa.internal.handlers.MathOperationHandler; import org.torpedoquery.jpa.internal.handlers.MaxFunctionHandler; import org.torpedoquery.jpa.internal.handlers.MinFunctionHandler; import org.torpedoquery.jpa.internal.handlers.SubstringFunctionHandler; import org.torpedoquery.jpa.internal.handlers.SumFunctionHandler; import org.torpedoquery.jpa.internal.handlers.ValueHandler; import org.torpedoquery.jpa.internal.utils.TorpedoMethodHandler; public class TorpedoFunction { // JPA Functions /** * <p>count.</p> * * @param object a {@link java.lang.Object} object. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static Function<Long> count(Object object) { return function("count", Long.class, object); } /** * <p>sum.</p> * * @param number a T object. * @param <V> a V object. * @return a {@link org.torpedoquery.jpa.ComparableFunction} object. * @param <T> a T object. */ public static <V, T extends Comparable<V>> ComparableFunction<V> sum( T number) { return getTorpedoMethodHandler().handle( new SumFunctionHandler<V>(number)); } /** * <p>sum.</p> * * @param number a {@link org.torpedoquery.jpa.Function} object. * @param <V> a V object. * @return a {@link org.torpedoquery.jpa.ComparableFunction} object. * @param <T> a T object. */ public static <V, T extends Comparable<V>> ComparableFunction<V> sum( Function<T> number) { return getTorpedoMethodHandler().handle( new SumFunctionHandler<V>(number)); } /** * <p>min.</p> * * @param number a T object. * @param <V> a V object. * @return a {@link org.torpedoquery.jpa.ComparableFunction} object. * @param <T> a T object. */ public static <V, T extends Comparable<V>> ComparableFunction<V> min( T number) { return getTorpedoMethodHandler().handle( new MinFunctionHandler<V>(number)); } /** * <p>min.</p> * * @param number a {@link org.torpedoquery.jpa.Function} object. * @param <V> a V object. * @return a {@link org.torpedoquery.jpa.ComparableFunction} object. * @param <T> a T object. */ public static <V, T extends Comparable<V>> ComparableFunction<V> min( Function<T> number) { return getTorpedoMethodHandler().handle( new MinFunctionHandler<V>(number)); } /** * <p>max.</p> * * @param number a T object. * @param <V> a V object. * @return a {@link org.torpedoquery.jpa.ComparableFunction} object. * @param <T> a T object. */ public static <V, T extends Comparable<V>> ComparableFunction<V> max( T number) { return getTorpedoMethodHandler().handle( new MaxFunctionHandler<V>(number)); } /** * <p>max.</p> * * @param number a {@link org.torpedoquery.jpa.Function} object. * @param <V> a V object. * @return a {@link org.torpedoquery.jpa.ComparableFunction} object. * @param <T> a T object. */ public static <V, T extends Comparable<V>> ComparableFunction<V> max( Function<T> number) { return getTorpedoMethodHandler().handle( new MaxFunctionHandler<V>(number)); } /** * <p>avg.</p> * * @param number a T object. * @param <V> a V object. * @return a {@link org.torpedoquery.jpa.ComparableFunction} object. * @param <T> a T object. */ public static <V, T extends Comparable<V>> ComparableFunction<V> avg( T number) { return getTorpedoMethodHandler().handle( new AvgFunctionHandler<V>(number)); } /** * <p>avg.</p> * * @param number a {@link org.torpedoquery.jpa.Function} object. * @param <V> a V object. * @return a {@link org.torpedoquery.jpa.ComparableFunction} object. * @param <T> a T object. */ public static <V, T extends Comparable<V>> ComparableFunction<V> avg( Function<T> number) { return getTorpedoMethodHandler().handle( new AvgFunctionHandler<V>(number)); } /** * <p>coalesce.</p> * * @param values a E object. * @param <T> a T object. * @return a E object. * @param <E> a E object. */ public static <T, E extends Function<T>> E coalesce(E... values) { CoalesceFunction<E> coalesceFunction = getCoalesceFunction(values); return (E) coalesceFunction; } /** * <p>coalesce.</p> * * @param values a T object. * @param <T> a T object. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static <T> Function<T> coalesce(T... values) { return getCoalesceFunction(values); } private static <T> CoalesceFunction<T> getCoalesceFunction(T... values) { final CoalesceFunction coalesceFunction = new CoalesceFunction(); getTorpedoMethodHandler().handle( new ArrayCallHandler(new ValueHandler<Void>() { @Override public Void handle(TorpedoProxy proxy, QueryBuilder queryBuilder, Selector selector) { coalesceFunction.setQuery(proxy); coalesceFunction.addSelector(selector); return null; } }, values)); return coalesceFunction; } private static <T> DynamicInstantiationFunction<T> getDynamicInstantiationFunction(T val) { final DynamicInstantiationFunction<T> dynFunction = new DynamicInstantiationFunction<>(val); TorpedoMethodHandler torpedoMethodHandler = getTorpedoMethodHandler(); Object[] params = torpedoMethodHandler.params(); torpedoMethodHandler.handle( new ArrayCallHandler(new ValueHandler<Void>() { @Override public Void handle(TorpedoProxy proxy, QueryBuilder queryBuilder, Selector selector) { dynFunction.setQuery(proxy); dynFunction.addSelector(selector); return null; } }, params)); return dynFunction; } /** * * Hibernate calls this "dynamic instantiation". JPQL supports some of this feature and calls it a "constructor expression". * * dyn(new ProjectionEntity( * param(entity.getCode()), param(entity.getIntegerField()) * * Important: you need to wrap each constructor parameter with a param() call * * @param object a T object. * @param <T> a T object. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static <T> Function<T> dyn(T object) { return getDynamicInstantiationFunction(object); } /** * <p>distinct.</p> * * @param object a T object. * @param <T> a T object. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static <T> Function<T> distinct(T object) { if (object instanceof TorpedoProxy) { setQuery((TorpedoProxy) object); } return getTorpedoMethodHandler().handle( new DistinctFunctionHandler<T>(object)); } /** * <p>constant.</p> * * @param constant a T object. * @param <T> a T object. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static <T> Function<T> constant(T constant) { return getTorpedoMethodHandler().handle( new ConstantFunctionHandler<T>(constant)); } /** * <p>constant.</p> * * @param constant a T object. * @param <V> a V object. * @return a {@link org.torpedoquery.jpa.ComparableFunction} object. * @param <T> a T object. */ public static <V, T extends Comparable<V>> ComparableFunction<T> constant( T constant) { return getTorpedoMethodHandler().handle( new ComparableConstantFunctionHandler<T>(constant)); } /** * <p>index.</p> * * @param object a T object. * @param <T> a T object. * @return a {@link org.torpedoquery.jpa.ComparableFunction} object. */ public static <T> ComparableFunction<Integer> index(T object) { if (object instanceof TorpedoProxy) { setQuery((TorpedoProxy) object); } return getTorpedoMethodHandler().handle( new IndexFunctionHandler(object)); } /** * Use this method to call functions witch are not supported natively by * Torpedo * * @return your custom function * @param name a {@link java.lang.String} object. * @param returnType a {@link java.lang.Class} object. * @param value a {@link java.lang.Object} object. * @param <T> a T object. */ public static <T> Function<T> function(String name, Class<T> returnType, Object value) { return getTorpedoMethodHandler().handle( new CustomFunctionHandler<T>(name, value)); } /** * <p>comparableFunction.</p> * * @param name a {@link java.lang.String} object. * @param returnType a {@link java.lang.Class} object. * @param value a {@link java.lang.Object} object. * @param <V> a V object. * @return a {@link org.torpedoquery.jpa.ComparableFunction} object. * @param <T> a T object. */ public static <V, T extends Comparable<V>> ComparableFunction<V> comparableFunction( String name, Class<V> returnType, Object value) { return getTorpedoMethodHandler().handle( new CustomFunctionHandler<V>(name, value)); } // orderBy function /** * <p>asc.</p> * * @param object a T object. * @param <T> a T object. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static <T> Function<T> asc(T object) { return getTorpedoMethodHandler().handle(new AscFunctionHandler<T>()); } /** * <p>desc.</p> * * @param object a T object. * @param <T> a T object. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static <T> Function<T> desc(T object) { return getTorpedoMethodHandler().handle(new DescFunctionHandler<T>()); } // math operation /** * <p>operation.</p> * * @param left a T object. * @param <T> a T object. * @return a {@link org.torpedoquery.jpa.OnGoingMathOperation} object. */ public static <T> OnGoingMathOperation<T> operation(T left) { return getTorpedoMethodHandler().handle( new MathOperationHandler<T>(null)); } /** * <p>operation.</p> * * @param left a {@link org.torpedoquery.jpa.Function} object. * @param <T> a T object. * @return a {@link org.torpedoquery.jpa.OnGoingMathOperation} object. */ public static <T> OnGoingMathOperation<T> operation(Function<T> left) { return getTorpedoMethodHandler().handle( new MathOperationHandler<T>(left)); } // string functions // substring(), trim(), lower(), upper(), length() /** * <p>trim.</p> * * @param field a {@link java.lang.String} object. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static Function<String> trim(String field) { return function("trim", String.class, field); } /** * <p>trim.</p> * * @param function a {@link org.torpedoquery.jpa.Function} object. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static Function<String> trim(Function<String> function) { return function("trim", String.class, function); } /** * <p>lower.</p> * * @param field a {@link java.lang.String} object. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static Function<String> lower(String field) { return function("lower", String.class, field); } /** * <p>lower.</p> * * @param function a {@link org.torpedoquery.jpa.Function} object. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static Function<String> lower(Function<String> function) { return function("lower", String.class, function); } /** * <p>upper.</p> * * @param field a {@link java.lang.String} object. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static Function<String> upper(String field) { return function("upper", String.class, field); } /** * <p>upper.</p> * * @param function a {@link org.torpedoquery.jpa.Function} object. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static Function<String> upper(Function<String> function) { return function("upper", String.class, function); } /** * <p>length.</p> * * @param field a {@link java.lang.String} object. * @return a {@link org.torpedoquery.jpa.ComparableFunction} object. */ public static ComparableFunction<Integer> length(String field) { return comparableFunction("length", Integer.class, field); } /** * <p>length.</p> * * @param function a {@link org.torpedoquery.jpa.Function} object. * @return a {@link org.torpedoquery.jpa.ComparableFunction} object. */ public static ComparableFunction<Integer> length(Function<String> function) { return comparableFunction("length", Integer.class, function); } /** * <p>substring.</p> * * @param param a {@link java.lang.String} object. * @param beginIndex a int. * @param endIndex a int. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static Function<String> substring(String param, int beginIndex, int endIndex) { return getTorpedoMethodHandler().handle( new SubstringFunctionHandler(param, beginIndex, endIndex)); } /** * <p>substring.</p> * * @param param a {@link org.torpedoquery.jpa.Function} object. * @param beginIndex a int. * @param endIndex a int. * @return a {@link org.torpedoquery.jpa.Function} object. */ public static Function<String> substring(Function<String> param, int beginIndex, int endIndex) { return getTorpedoMethodHandler().handle( new SubstringFunctionHandler(param, beginIndex, endIndex)); } }
xjodoin/torpedoquery
src/main/java/org/torpedoquery/jpa/TorpedoFunction.java
Java
apache-2.0
14,719
# pylint: skip-file # flake8: noqa # pylint: disable=too-many-lines # noqa: E301,E302,E303,T001 class OpenShiftCLIError(Exception): '''Exception class for openshiftcli''' pass ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] def locate_oc_binary(): ''' Find and return oc binary file ''' # https://github.com/openshift/openshift-ansible/issues/3410 # oc can be in /usr/local/bin in some cases, but that may not # be in $PATH due to ansible/sudo paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS oc_binary = 'oc' # Use shutil.which if it is available, otherwise fallback to a naive path search try: which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) if which_result is not None: oc_binary = which_result except AttributeError: for path in paths: if os.path.exists(os.path.join(path, oc_binary)): oc_binary = os.path.join(path, oc_binary) break return oc_binary # pylint: disable=too-few-public-methods class OpenShiftCLI(object): ''' Class to wrap the command line tools ''' def __init__(self, namespace, kubeconfig='/etc/origin/master/admin.kubeconfig', verbose=False, all_namespaces=False): ''' Constructor for OpenshiftCLI ''' self.namespace = namespace self.verbose = verbose self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) self.all_namespaces = all_namespaces self.oc_binary = locate_oc_binary() # Pylint allows only 5 arguments to be passed. # pylint: disable=too-many-arguments def _replace_content(self, resource, rname, content, force=False, sep='.'): ''' replace the current object with the content ''' res = self._get(resource, rname) if not res['results']: return res fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, res['results'][0], separator=sep) changes = [] for key, value in content.items(): changes.append(yed.put(key, value)) if any([change[0] for change in changes]): yed.write() atexit.register(Utils.cleanup, [fname]) return self._replace(fname, force) return {'returncode': 0, 'updated': False} def _replace(self, fname, force=False): '''replace the current object with oc replace''' # We are removing the 'resourceVersion' to handle # a race condition when modifying oc objects yed = Yedit(fname) results = yed.delete('metadata.resourceVersion') if results[0]: yed.write() cmd = ['replace', '-f', fname] if force: cmd.append('--force') return self.openshift_cmd(cmd) def _create_from_content(self, rname, content): '''create a temporary file and then call oc create on it''' fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, content=content) yed.write() atexit.register(Utils.cleanup, [fname]) return self._create(fname) def _create(self, fname): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' cmd = ['delete', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) else: raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501 '''process a template template_name: the name of the template to process create: whether to send to oc create after processing params: the parameters for the template template_data: the incoming template's data; instead of a file ''' cmd = ['process'] if template_data: cmd.extend(['-f', '-']) else: cmd.append(template_name) if params: param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) results = self.openshift_cmd(cmd, output=True, input_data=template_data) if results['returncode'] != 0 or not create: return results fname = Utils.create_tmpfile(template_name + '-') yed = Yedit(fname, results['results']) yed.write() atexit.register(Utils.cleanup, [fname]) return self.openshift_cmd(['create', '-f', fname]) def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) cmd.extend(['-o', 'json']) rval = self.openshift_cmd(cmd, output=True) # Ensure results are retuned in an array if 'items' in rval: rval['results'] = rval['items'] elif not isinstance(rval['results'], list): rval['results'] = [rval['results']] return rval def _schedulable(self, node=None, selector=None, schedulable=True): ''' perform oadm manage-node scheduable ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 def _list_pods(self, node=None, selector=None, pod_selector=None): ''' perform oadm list pods node: the node in which to list pods selector: the label selector filter if provided pod_selector: the pod selector filter if provided ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # pylint: disable=too-many-arguments def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): ''' perform oadm manage-node evacuate ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') cmd.append('--evacuate') return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') def _version(self): ''' return the openshift version''' return self.openshift_cmd(['version'], output=True, output_type='raw') def _import_image(self, url=None, name=None, tag=None): ''' perform image import ''' cmd = ['import-image'] image = '{0}'.format(name) if tag: image += ':{0}'.format(tag) cmd.append(image) if url: cmd.append('--from={0}/{1}'.format(url, image)) cmd.append('-n{0}'.format(self.namespace)) cmd.append('--confirm') return self.openshift_cmd(cmd) def _run(self, cmds, input_data): ''' Actually executes the command. This makes mocking easier. ''' curr_env = os.environ.copy() curr_env.update({'KUBECONFIG': self.kubeconfig}) proc = subprocess.Popen(cmds, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=curr_env) stdout, stderr = proc.communicate(input_data) return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): '''Base command for oc ''' cmds = [self.oc_binary] if oadm: cmds.append('adm') cmds.extend(cmd) if self.all_namespaces: cmds.extend(['--all-namespaces']) elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) if self.verbose: print(' '.join(cmds)) try: returncode, stdout, stderr = self._run(cmds, input_data) except OSError as ex: returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, "cmd": ' '.join(cmds)} if output_type == 'json': rval['results'] = {} if output and stdout: try: rval['results'] = json.loads(stdout) except ValueError as verr: if "No JSON object could be decoded" in verr.args: rval['err'] = verr.args elif output_type == 'raw': rval['results'] = stdout if output else '' if self.verbose: print("STDOUT: {0}".format(stdout)) print("STDERR: {0}".format(stderr)) if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, "stdout": stdout}) return rval class Utils(object): ''' utilities for openshiftcli modules ''' @staticmethod def _write(filename, contents): ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): ''' create a file in tmp with name and contents''' tmp = Utils.create_tmpfile(prefix=rname) if ftype == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripDumper'): Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) else: Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) elif ftype == 'json': Utils._write(tmp, json.dumps(data)) else: Utils._write(tmp, data) # Register cleanup when module is done atexit.register(Utils.cleanup, [tmp]) return tmp @staticmethod def create_tmpfile_copy(inc_file): '''create a temporary copy of a file''' tmpfile = Utils.create_tmpfile('lib_openshift-') Utils._write(tmpfile, open(inc_file).read()) # Cleanup the tmpfile atexit.register(Utils.cleanup, [tmpfile]) return tmpfile @staticmethod def create_tmpfile(prefix='tmp'): ''' Generates and returns a temporary file name ''' with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: return tmp.name @staticmethod def create_tmp_files_from_contents(content, content_type=None): '''Turn an array of dict: filename, content into a files array''' if not isinstance(content, list): content = [content] files = [] for item in content: path = Utils.create_tmp_file_from_contents(item['path'] + '-', item['data'], ftype=content_type) files.append({'name': os.path.basename(item['path']), 'path': path}) return files @staticmethod def cleanup(files): '''Clean up on exit ''' for sfile in files: if os.path.exists(sfile): if os.path.isdir(sfile): shutil.rmtree(sfile) elif os.path.isfile(sfile): os.remove(sfile) @staticmethod def exists(results, _name): ''' Check to see if the results include the name ''' if not results: return False if Utils.find_result(results, _name): return True return False @staticmethod def find_result(results, _name): ''' Find the specified result by name''' rval = None for result in results: if 'metadata' in result and result['metadata']['name'] == _name: rval = result break return rval @staticmethod def get_resource_file(sfile, sfile_type='yaml'): ''' return the service file ''' contents = None with open(sfile) as sfd: contents = sfd.read() if sfile_type == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripLoader'): contents = yaml.load(contents, yaml.RoundTripLoader) else: contents = yaml.safe_load(contents) elif sfile_type == 'json': contents = json.loads(contents) return contents @staticmethod def filter_versions(stdout): ''' filter the oc version output ''' version_dict = {} version_search = ['oc', 'openshift', 'kubernetes'] for line in stdout.strip().split('\n'): for term in version_search: if not line: continue if line.startswith(term): version_dict[term] = line.split()[-1] # horrible hack to get openshift version in Openshift 3.2 # By default "oc version in 3.2 does not return an "openshift" version if "openshift" not in version_dict: version_dict["openshift"] = version_dict["oc"] return version_dict @staticmethod def add_custom_versions(versions): ''' create custom versions strings ''' versions_dict = {} for tech, version in versions.items(): # clean up "-" from version if "-" in version: version = version.split("-")[0] if version.startswith('v'): versions_dict[tech + '_numeric'] = version[1:].split('+')[0] # "v3.3.0.33" is what we have, we want "3.3" versions_dict[tech + '_short'] = version[1:4] return versions_dict @staticmethod def openshift_installed(): ''' check if openshift is installed ''' import rpm transaction_set = rpm.TransactionSet() rpmquery = transaction_set.dbMatch("name", "atomic-openshift") return rpmquery.count() > 0 # Disabling too-many-branches. This is a yaml dictionary comparison function # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements @staticmethod def check_def_equal(user_def, result_def, skip_keys=None, debug=False): ''' Given a user defined definition, compare it with the results given back by our query. ''' # Currently these values are autogenerated and we do not need to check them skip = ['metadata', 'status'] if skip_keys: skip.extend(skip_keys) for key, value in result_def.items(): if key in skip: continue # Both are lists if isinstance(value, list): if key not in user_def: if debug: print('User data does not have key [%s]' % key) print('User data: %s' % user_def) return False if not isinstance(user_def[key], list): if debug: print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) return False if len(user_def[key]) != len(value): if debug: print("List lengths are not equal.") print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) print("user_def: %s" % user_def[key]) print("value: %s" % value) return False for values in zip(user_def[key], value): if isinstance(values[0], dict) and isinstance(values[1], dict): if debug: print('sending list - list') print(type(values[0])) print(type(values[1])) result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) if not result: print('list compare returned false') return False elif value != user_def[key]: if debug: print('value should be identical') print(user_def[key]) print(value) return False # recurse on a dictionary elif isinstance(value, dict): if key not in user_def: if debug: print("user_def does not have key [%s]" % key) return False if not isinstance(user_def[key], dict): if debug: print("dict returned false: not instance of dict") return False # before passing ensure keys match api_values = set(value.keys()) - set(skip) user_values = set(user_def[key].keys()) - set(skip) if api_values != user_values: if debug: print("keys are not equal in dict") print(user_values) print(api_values) return False result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) if not result: if debug: print("dict returned false") print(result) return False # Verify each key, value pair is the same else: if key not in user_def or value != user_def[key]: if debug: print("value not equal; user_def does not have key") print(key) print(value) if key in user_def: print(user_def[key]) return False if debug: print('returning true') return True class OpenShiftCLIConfig(object): '''Generic Config''' def __init__(self, rname, namespace, kubeconfig, options): self.kubeconfig = kubeconfig self.name = rname self.namespace = namespace self._options = options @property def config_options(self): ''' return config options ''' return self._options def to_option_list(self, ascommalist=''): '''return all options as a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs''' return self.stringify(ascommalist) def stringify(self, ascommalist=''): ''' return the options hash as cli params in a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs ''' rval = [] for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: val = data['value'] rval.append('--{}={}'.format(key.replace('_', '-'), val)) return rval
akubicharm/openshift-ansible
roles/lib_openshift/src/lib/base.py
Python
apache-2.0
21,165
package ru.job4j.testtask; /** * Class Account. */ public class Account { public double value; public String requisites; /** * Constructor. * @param value amount of the money. * @param requisites user's account. */ public Account(double value, String requisites) { this.value = value; this.requisites = requisites; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Account account = (Account) o; if (value != account.value) { return false; } return requisites.equals(account.requisites); } @Override public int hashCode() { int result = (int) value; result = 31 * result + requisites.hashCode(); return result; } }
TatyanaAlex/tfukova
chapter_003/src/main/java/ru/job4j/testtask/Account.java
Java
apache-2.0
919
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet eventlet.monkey_patch() import uuid import mock from oslo.config import cfg from mistral.tests import base from mistral.openstack.common import log as logging from mistral.openstack.common import importutils from mistral.engine import states from mistral.db import api as db_api from mistral.actions import std_actions from mistral import engine from mistral.engine import executor # We need to make sure that all configuration properties are registered. importutils.import_module("mistral.config") LOG = logging.getLogger(__name__) # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WORKBOOK_NAME = 'my_workbook' TASK_NAME = 'create-vms' SAMPLE_WORKBOOK = { 'id': str(uuid.uuid4()), 'name': WORKBOOK_NAME, 'description': 'my description', 'definition': base.get_resource("test_rest.yaml"), 'tags': [], 'scope': 'public', 'updated_at': None, 'project_id': '123', 'trust_id': '1234' } SAMPLE_EXECUTION = { 'id': str(uuid.uuid4()), 'workbook_name': WORKBOOK_NAME, 'task': TASK_NAME, 'state': states.RUNNING, 'updated_at': None, 'context': None } SAMPLE_TASK = { 'name': TASK_NAME, 'workbook_name': WORKBOOK_NAME, 'action_spec': { 'name': 'my-action', 'class': 'std.http', 'base-parameters': { 'url': 'http://localhost:8989/v1/workbooks', 'method': 'GET' }, 'namespace': 'MyRest' }, 'task_spec': { 'action': 'MyRest.my-action', 'name': TASK_NAME}, 'requires': {}, 'state': states.IDLE} SAMPLE_CONTEXT = { 'user': 'admin', 'tenant': 'mistral' } class TestExecutor(base.DbTestCase): def __init__(self, *args, **kwargs): super(TestExecutor, self).__init__(*args, **kwargs) self.transport = base.get_fake_transport() @mock.patch.object( executor.ExecutorClient, 'handle_task', mock.MagicMock(side_effect=base.EngineTestCase.mock_handle_task)) @mock.patch.object( std_actions.HTTPAction, 'run', mock.MagicMock(return_value={})) @mock.patch.object( engine.EngineClient, 'convey_task_result', mock.MagicMock(side_effect=base.EngineTestCase.mock_task_result)) def test_handle_task(self): # Create a new workbook. workbook = db_api.workbook_create(SAMPLE_WORKBOOK) self.assertIsInstance(workbook, dict) # Create a new execution. execution = db_api.execution_create(SAMPLE_EXECUTION['workbook_name'], SAMPLE_EXECUTION) self.assertIsInstance(execution, dict) # Create a new task. SAMPLE_TASK['execution_id'] = execution['id'] task = db_api.task_create(SAMPLE_TASK['workbook_name'], SAMPLE_TASK['execution_id'], SAMPLE_TASK) self.assertIsInstance(task, dict) self.assertIn('id', task) # Send the task request to the Executor. ex_client = executor.ExecutorClient(self.transport) ex_client.handle_task(SAMPLE_CONTEXT, task=task) # Check task execution state. db_task = db_api.task_get(task['workbook_name'], task['execution_id'], task['id']) self.assertEqual(db_task['state'], states.SUCCESS)
dmitryilyin/mistral
mistral/tests/unit/engine/default/test_executor.py
Python
apache-2.0
4,084
package com.osiris.component.bootstrap.menu.render; import java.io.IOException; import javax.faces.component.UIComponent; import javax.faces.context.FacesContext; import javax.faces.context.ResponseWriter; import javax.faces.render.FacesRenderer; import org.primefaces.renderkit.CoreRenderer; import com.osiris.component.bootstrap.menu.UIMenuItem; import com.osiris.component.util.HTML; import com.osiris.component.util.HtmlConstants; /** * * Classe que renderiza um item do menu. * * @author Cristian Urbainski<cristianurbainskips@gmail.com> * @since 13/07/2013 * @version 1.0 * */ @FacesRenderer(componentFamily = UIMenuItem.COMPONENT_FAMILY, rendererType = MenuItemRender.RENDERER_TYPE) public class MenuItemRender extends CoreRenderer { /** * Tipo do renderizador do componente. */ public static final String RENDERER_TYPE = "com.osiris.component.bootstrap.MenuItemRenderer"; @Override public void encodeEnd(FacesContext context, UIComponent component) throws IOException { UIMenuItem menuItem = (UIMenuItem) component; encodeMarkup(context, menuItem); } /** * Método responsável por fazer a construção do html para o componente. * * @param context do jsf * @param menuItem componente a ser transcrito para html * @throws IOException excecao que pode ocorrer */ protected void encodeMarkup(FacesContext context, UIMenuItem menuItem) throws IOException { ResponseWriter writer = context.getResponseWriter(); String clientId = menuItem.getClientId(context); String style = menuItem.getStyle(); String styleClass = ""; if (menuItem.isActive()) { styleClass = "active"; } if (menuItem.getStyleClass() != null) { styleClass += " " + menuItem.getStyleClass(); } writer.startElement(HTML.LI_ELEM, null); writer.writeAttribute(HtmlConstants.ID_ATTRIBUTE, clientId, null); if (styleClass.length() > 0) { writer.writeAttribute(HtmlConstants.CLASS_ATTR, styleClass, null); } if (style != null) { writer.writeAttribute(HtmlConstants.STYLE_ATTRIBUTE, style, null); } writer.startElement(HTML.A_ELEM, null); writer.writeAttribute(HtmlConstants.HREF_ATTR, menuItem.getLocation(), null); writer.writeText(menuItem.getLabel(), null); writer.endElement(HTML.A_ELEM); writer.endElement(HTML.LI_ELEM); } }
CristianUrbainski/osiris-faces
osiris-faces/src/main/java/com/osiris/component/bootstrap/menu/render/MenuItemRender.java
Java
apache-2.0
2,535
import codecs from pandas import read_csv import argparse import numpy as np import codecs import os FIELD_NAMES = ["context_id","target","target_pos","target_position","gold_sense_ids","predict_sense_ids", "golden_related","predict_related","context"] FIELD_TYPES = {"context_id":np.dtype(str),"target":np.dtype(str),"target_pos":np.dtype(str),"target_position":np.dtype(str),"gold_sense_ids":np.dtype(str),"predict_sense_ids":np.dtype(str), "golden_related":np.dtype(str),"predict_related":np.dtype(str),"context":np.dtype(str)} def cut_9_first(dataset_fpath, dataset_9_fpath): """ Cuts first 9 columns of the dataset file to make it openable with read_csv. """ with codecs.open(dataset_fpath, "r", "utf-8") as in_dataset, codecs.open(dataset_9_fpath, "w", "utf-8") as out_dataset: for line in in_dataset: print >> out_dataset, "\t".join(line.split("\t")[:9]) def convert_dataset2semevalkey(dataset_fpath, output_fpath, no_header=False): with codecs.open(output_fpath, "w", encoding="utf-8") as output: if no_header: df = read_csv(dataset_fpath, sep='\t', encoding='utf8', header=None, names=FIELD_NAMES, dtype=FIELD_TYPES, doublequote=False, quotechar='\0') df.target = df.target.astype(str) else: df = read_csv(dataset_fpath, encoding='utf-8', delimiter="\t", error_bad_lines=False, doublequote=False, quotechar='\0') for i, row in df.iterrows(): predicted_senses = " ".join(unicode(row.predict_sense_ids).split(",")) print >> output, "%s %s %s" % (row.target + "." + row.target_pos, row.context_id, predicted_senses) print "Key file:", output_fpath def main(): parser = argparse.ArgumentParser(description='Convert lexical sample dataset to SemEval 2013 key format.') parser.add_argument('input', help='Path to a file with input file.') parser.add_argument('output', help='Output file.') parser.add_argument('--no_header', action='store_true', help='No headers. Default -- false.') args = parser.parse_args() print "Input: ", args.input print "Output: ", args.output print "No header:", args.no_header tmp_fpath = args.input + "-9-columns.csv" cut_9_first(args.input, tmp_fpath) convert_dataset2semevalkey(tmp_fpath, args.output, args.no_header) #os.remove(tmp_fpath) if __name__ == '__main__': main()
mpelevina/context-eval
semeval_2013_13/dataset2key.py
Python
apache-2.0
2,427
/* * $Id: HtmlTag.java 54929 2004-10-16 16:38:42Z germuska $ * * Copyright 1999-2004 The Apache Software Foundation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.struts.taglib.html; import java.util.Locale; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpSession; import javax.servlet.jsp.JspException; import javax.servlet.jsp.PageContext; import javax.servlet.jsp.tagext.TagSupport; import org.apache.struts.Globals; import org.apache.struts.taglib.TagUtils; import org.apache.struts.util.MessageResources; /** * Renders an HTML <html> element with appropriate language attributes if * there is a current Locale available in the user's session. * * @version $Rev: 54929 $ $Date: 2004-10-17 01:38:42 +0900 (日, 17 10月 2004) $ */ public class HtmlTag extends TagSupport { // ------------------------------------------------------------- Properties /** * The message resources for this package. */ protected static MessageResources messages = MessageResources.getMessageResources(Constants.Package + ".LocalStrings"); /** * Should we set the current Locale for this user if needed? * @deprecated This will be removed after Struts 1.2. */ protected boolean locale = false; /** * @deprecated This will be removed after Struts 1.2. */ public boolean getLocale() { return (locale); } /** * @deprecated This will be removed after Struts 1.2. */ public void setLocale(boolean locale) { this.locale = locale; } /** * Are we rendering an xhtml page? */ protected boolean xhtml = false; /** * Are we rendering a lang attribute? * @since Struts 1.2 */ protected boolean lang = false; public boolean getXhtml() { return this.xhtml; } public void setXhtml(boolean xhtml) { this.xhtml = xhtml; } /** * Returns true if the tag should render a lang attribute. * @since Struts 1.2 */ public boolean getLang() { return this.lang; } /** * Sets whether the tag should render a lang attribute. * @since Struts 1.2 */ public void setLang(boolean lang) { this.lang = lang; } /** * Process the start of this tag. * * @exception JspException if a JSP exception has occurred */ public int doStartTag() throws JspException { TagUtils.getInstance().write(this.pageContext, this.renderHtmlStartElement()); return EVAL_BODY_INCLUDE; } /** * Renders an &lt;html&gt; element with appropriate language attributes. * @since Struts 1.2 */ protected String renderHtmlStartElement() { StringBuffer sb = new StringBuffer("<html"); String language = null; String country = ""; if (this.locale) { // provided for 1.1 backward compatibility, remove after 1.2 language = this.getCurrentLocale().getLanguage(); } else { Locale currentLocale = TagUtils.getInstance().getUserLocale(pageContext, Globals.LOCALE_KEY); language = currentLocale.getLanguage(); country = currentLocale.getCountry(); } boolean validLanguage = ((language != null) && (language.length() > 0)); boolean validCountry = country.length() > 0; if (this.xhtml) { this.pageContext.setAttribute( Globals.XHTML_KEY, "true", PageContext.PAGE_SCOPE); sb.append(" xmlns=\"http://www.w3.org/1999/xhtml\""); } if ((this.lang || this.locale || this.xhtml) && validLanguage) { sb.append(" lang=\""); sb.append(language); if (validCountry) { sb.append("-"); sb.append(country); } sb.append("\""); } if (this.xhtml && validLanguage) { sb.append(" xml:lang=\""); sb.append(language); if (validCountry) { sb.append("-"); sb.append(country); } sb.append("\""); } sb.append(">"); return sb.toString(); } /** * Process the end of this tag. * * @exception JspException if a JSP exception has occurred */ public int doEndTag() throws JspException { TagUtils.getInstance().write(pageContext, "</html>"); // Evaluate the remainder of this page return (EVAL_PAGE); } /** * Release any acquired resources. */ public void release() { this.locale = false; this.xhtml = false; this.lang=false; } // ------------------------------------------------------ Protected Methods /** * Return the current Locale for this request. If there is no locale in the session and * the locale attribute is set to "true", this method will create a Locale based on the * client's Accept-Language header or the server's default locale and store it in the * session. This will always return a Locale and never null. * @since Struts 1.1 * @deprecated This will be removed after Struts 1.2. */ protected Locale getCurrentLocale() { Locale userLocale = TagUtils.getInstance().getUserLocale(pageContext, Globals.LOCALE_KEY); // Store a new current Locale, if requested if (this.locale) { HttpSession session = ((HttpServletRequest) this.pageContext.getRequest()).getSession(); session.setAttribute(Globals.LOCALE_KEY, userLocale); } return userLocale; } }
codelibs/cl-struts
src/share/org/apache/struts/taglib/html/HtmlTag.java
Java
apache-2.0
6,309
package liquibase.snapshot.jvm; import java.util.stream.Collectors; import liquibase.Scope; import liquibase.database.Database; import liquibase.database.core.*; import liquibase.exception.DatabaseException; import liquibase.exception.UnexpectedLiquibaseException; import liquibase.executor.ExecutorService; import liquibase.snapshot.CachedRow; import liquibase.snapshot.DatabaseSnapshot; import liquibase.snapshot.JdbcDatabaseSnapshot; import liquibase.statement.core.RawSqlStatement; import liquibase.structure.DatabaseObject; import liquibase.structure.core.Catalog; import liquibase.structure.core.Column; import liquibase.structure.core.Index; import liquibase.structure.core.Relation; import liquibase.structure.core.Schema; import liquibase.structure.core.Table; import liquibase.structure.core.UniqueConstraint; import liquibase.util.StringUtil; import java.sql.SQLException; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; public class UniqueConstraintSnapshotGenerator extends JdbcSnapshotGenerator { public UniqueConstraintSnapshotGenerator() { super(UniqueConstraint.class, new Class[]{Table.class}); } @Override public int getPriority(Class<? extends DatabaseObject> objectType, Database database) { if (database instanceof SQLiteDatabase) { return PRIORITY_NONE; } return super.getPriority(objectType, database); } @Override protected DatabaseObject snapshotObject(DatabaseObject example, DatabaseSnapshot snapshot) throws DatabaseException { Database database = snapshot.getDatabase(); UniqueConstraint exampleConstraint = (UniqueConstraint) example; Relation table = exampleConstraint.getRelation(); List<Map<String, ?>> metadata = listColumns(exampleConstraint, database, snapshot); if (metadata.isEmpty()) { return null; } UniqueConstraint constraint = new UniqueConstraint(); constraint.setRelation(table); constraint.setName(example.getName()); constraint.setBackingIndex(exampleConstraint.getBackingIndex()); constraint.setInitiallyDeferred(((UniqueConstraint) example).isInitiallyDeferred()); constraint.setDeferrable(((UniqueConstraint) example).isDeferrable()); constraint.setClustered(((UniqueConstraint) example).isClustered()); for (Map<String, ?> col : metadata) { String ascOrDesc = (String) col.get("ASC_OR_DESC"); Boolean descending = "D".equals(ascOrDesc) ? Boolean.TRUE : ("A".equals(ascOrDesc) ? Boolean.FALSE : null); if (database instanceof H2Database) { for (String columnName : StringUtil.splitAndTrim((String) col.get("COLUMN_NAME"), ",")) { constraint.getColumns().add(new Column(columnName).setDescending(descending).setRelation(table)); } } else { constraint.getColumns().add(new Column((String) col.get("COLUMN_NAME")).setDescending(descending).setRelation(table)); } setValidateOptionIfAvailable(database, constraint, col); } return constraint; } /** * Method to map 'validate' option for UC. This thing works only for ORACLE * * @param database - DB where UC will be created * @param uniqueConstraint - UC object to persist validate option * @param columnsMetadata - it's a cache-map to get metadata about UC */ private void setValidateOptionIfAvailable(Database database, UniqueConstraint uniqueConstraint, Map<String, ?> columnsMetadata) { if (!(database instanceof OracleDatabase)) { return; } final Object constraintValidate = columnsMetadata.get("CONSTRAINT_VALIDATE"); final String VALIDATE = "VALIDATED"; if (constraintValidate != null && !constraintValidate.toString().trim().isEmpty()) { uniqueConstraint.setShouldValidate(VALIDATE.equals(cleanNameFromDatabase(constraintValidate.toString().trim(), database))); } } @Override protected void addTo(DatabaseObject foundObject, DatabaseSnapshot snapshot) throws DatabaseException { if (!snapshot.getSnapshotControl().shouldInclude(UniqueConstraint.class)) { return; } if (foundObject instanceof Table) { Table table = (Table) foundObject; Database database = snapshot.getDatabase(); Schema schema; schema = table.getSchema(); List<CachedRow> metadata; try { metadata = listConstraints(table, snapshot, schema); } catch (SQLException e) { throw new DatabaseException(e); } Set<String> seenConstraints = new HashSet<>(); for (CachedRow constraint : metadata) { UniqueConstraint uq = new UniqueConstraint() .setName(cleanNameFromDatabase((String) constraint.get("CONSTRAINT_NAME"), database)).setRelation(table); if (constraint.containsColumn("INDEX_NAME")) { uq.setBackingIndex(new Index((String) constraint.get("INDEX_NAME"), (String) constraint.get("INDEX_CATALOG"), (String) constraint.get("INDEX_SCHEMA"), table.getName())); } if ("CLUSTERED".equals(constraint.get("TYPE_DESC"))) { uq.setClustered(true); } if (seenConstraints.add(uq.getName())) { table.getUniqueConstraints().add(uq); } } } } protected List<CachedRow> listConstraints(Table table, DatabaseSnapshot snapshot, Schema schema) throws DatabaseException, SQLException { return ((JdbcDatabaseSnapshot) snapshot).getMetaDataFromCache().getUniqueConstraints(schema.getCatalogName(), schema.getName(), table.getName()); } protected List<Map<String, ?>> listColumns(UniqueConstraint example, Database database, DatabaseSnapshot snapshot) throws DatabaseException { Relation table = example.getRelation(); Schema schema = example.getSchema(); String name = example.getName(); boolean bulkQuery; String sql; String cacheKey = "uniqueConstraints-" + example.getClass().getSimpleName() + "-" + example.getSchema().toCatalogAndSchema().customize(database).toString(); String queryCountKey = "uniqueConstraints-" + example.getClass().getSimpleName() + "-queryCount"; Map<String, List<Map<String, ?>>> columnCache = (Map<String, List<Map<String, ?>>>) snapshot.getScratchData(cacheKey); Integer columnQueryCount = (Integer) snapshot.getScratchData(queryCountKey); if (columnQueryCount == null) { columnQueryCount = 0; } if (columnCache == null) { bulkQuery = false; if (columnQueryCount > 3) { bulkQuery = supportsBulkQuery(database); } snapshot.setScratchData(queryCountKey, columnQueryCount + 1); if ((database instanceof MySQLDatabase) || (database instanceof HsqlDatabase)) { sql = "select const.CONSTRAINT_NAME, const.TABLE_NAME, COLUMN_NAME, const.constraint_schema as CONSTRAINT_CONTAINER " + "from " + database.getSystemSchema() + ".table_constraints const " + "join " + database.getSystemSchema() + ".key_column_usage col " + "on const.constraint_schema=col.constraint_schema " + "and const.table_name=col.table_name " + "and const.constraint_name=col.constraint_name " + "where const.constraint_schema='" + database.correctObjectName(schema.getCatalogName(), Catalog.class) + "' "; if (!bulkQuery) { sql += "and const.table_name='" + database.correctObjectName(example.getRelation().getName(), Table.class) + "' " + "and const.constraint_name='" + database.correctObjectName(name, UniqueConstraint.class) + "'"; } sql += "order by ordinal_position"; } else if (database instanceof PostgresDatabase) { List<String> conditions = new ArrayList<>(); sql = "select const.CONSTRAINT_NAME, COLUMN_NAME, const.constraint_schema as CONSTRAINT_CONTAINER " + "from " + database.getSystemSchema() + ".table_constraints const " + "join " + database.getSystemSchema() + ".key_column_usage col " + "on const.constraint_schema=col.constraint_schema " + "and const.table_name=col.table_name " + "and const.constraint_name=col.constraint_name "; if (schema.getCatalogName() != null) { conditions.add("const.constraint_catalog='" + database.correctObjectName(schema.getCatalogName(), Catalog.class) + "'"); } if (database instanceof CockroachDatabase) { conditions.add("(select count(*) from (select indexdef from pg_indexes where schemaname='\" + database.correctObjectName(schema.getSchema().getName(), Schema.class) + \"' AND indexname='\" + database.correctObjectName(name, UniqueConstraint.class) + \"' AND (position('DESC,' in indexdef) > 0 OR position('DESC)' in indexdef) > 0))) = 0"); conditions.add("const.constraint_name != 'primary'"); } if (schema.getSchema().getName() != null) { conditions.add("const.constraint_schema='" + database.correctObjectName(schema.getSchema().getName(), Schema.class) + "'"); } if (!bulkQuery) { conditions.add("const.table_name='" + database.correctObjectName(example.getRelation().getName(), Table.class) + "'"); if (name != null) { conditions.add("const.constraint_name='" + database.correctObjectName(name, UniqueConstraint.class) + "' "); } } if (!conditions.isEmpty()) { sql += " WHERE "; sql += conditions.stream().collect(Collectors.joining(" AND ")); } sql += " order by ordinal_position"; } else if (database.getClass().getName().contains("MaxDB")) { //have to check classname as this is currently an extension sql = "select CONSTRAINTNAME as constraint_name, COLUMNNAME as column_name from CONSTRAINTCOLUMNS WHERE CONSTRAINTTYPE = 'UNIQUE_CONST' AND tablename = '" + database.correctObjectName(example.getRelation().getName(), Table.class) + "' AND constraintname = '" + database.correctObjectName(name, UniqueConstraint.class) + "'"; } else if (database instanceof MSSQLDatabase) { sql = "SELECT " + "[kc].[name] AS [CONSTRAINT_NAME], " + "s.name AS constraint_container, " + "[c].[name] AS [COLUMN_NAME], " + "CASE [ic].[is_descending_key] WHEN 0 THEN N'A' WHEN 1 THEN N'D' END AS [ASC_OR_DESC] " + "FROM [sys].[schemas] AS [s] " + "INNER JOIN [sys].[tables] AS [t] " + "ON [t].[schema_id] = [s].[schema_id] " + "INNER JOIN [sys].[key_constraints] AS [kc] " + "ON [kc].[parent_object_id] = [t].[object_id] " + "INNER JOIN [sys].[indexes] AS [i] " + "ON [i].[object_id] = [kc].[parent_object_id] " + "AND [i].[index_id] = [kc].[unique_index_id] " + "INNER JOIN [sys].[index_columns] AS [ic] " + "ON [ic].[object_id] = [i].[object_id] " + "AND [ic].[index_id] = [i].[index_id] " + "INNER JOIN [sys].[columns] AS [c] " + "ON [c].[object_id] = [ic].[object_id] " + "AND [c].[column_id] = [ic].[column_id] " + "WHERE [s].[name] = N'" + database.escapeStringForDatabase(database.correctObjectName(schema.getName(), Schema.class)) + "' "; if (!bulkQuery) { sql += "AND [t].[name] = N'" + database.escapeStringForDatabase(database.correctObjectName(example.getRelation().getName(), Table.class)) + "' " + "AND [kc].[name] = N'" + database.escapeStringForDatabase(database.correctObjectName(name, UniqueConstraint.class)) + "' "; } sql += "ORDER BY " + "[ic].[key_ordinal]"; } else if (database instanceof OracleDatabase) { sql = "select ucc.owner as constraint_container, ucc.constraint_name as constraint_name, ucc.column_name, f.validated as constraint_validate, ucc.table_name " + "from all_cons_columns ucc " + "INNER JOIN all_constraints f " + "ON ucc.owner = f.owner " + "AND ucc.constraint_name = f.constraint_name " + "where " + (bulkQuery ? "" : "ucc.constraint_name='" + database.correctObjectName(name, UniqueConstraint.class) + "' and ") + "ucc.owner='" + database.correctObjectName(schema.getCatalogName(), Catalog.class) + "' " + "and ucc.table_name not like 'BIN$%' " + "order by ucc.position"; } else if (database instanceof DB2Database) { if (database.getDatabaseProductName().startsWith("DB2 UDB for AS/400")) { sql = "select T1.constraint_name as CONSTRAINT_NAME, T2.COLUMN_NAME as COLUMN_NAME, T1.CONSTRAINT_SCHEMA as CONSTRAINT_CONTAINER from QSYS2.TABLE_CONSTRAINTS T1, QSYS2.SYSCSTCOL T2\n" + "where T1.CONSTRAINT_TYPE='UNIQUE' and T1.CONSTRAINT_NAME=T2.CONSTRAINT_NAME\n" + "and T1.CONSTRAINT_SCHEMA='" + database.correctObjectName(schema.getName(), Schema.class) + "'\n" + "and T2.CONSTRAINT_SCHEMA='" + database.correctObjectName(schema.getName(), Schema.class) + "'\n" //+ "T2.TABLE_NAME='"+ database.correctObjectName(example.getTable().getName(), Table.class) + "'\n" //+ "\n" + "order by T2.COLUMN_NAME\n"; } else { sql = "select k.constname as constraint_name, k.colname as column_name from syscat.keycoluse k, syscat.tabconst t " + "where k.constname = t.constname " + "and k.tabschema = t.tabschema " + "and t.type='U' " + (bulkQuery? "" : "and k.constname='" + database.correctObjectName(name, UniqueConstraint.class) + "' ") + "and t.tabschema = '" + database.correctObjectName(schema.getName(), Schema.class) + "' " + "order by colseq"; } } else if (database instanceof Db2zDatabase) { sql = "select k.colname as column_name from SYSIBM.SYSKEYCOLUSE k, SYSIBM.SYSTABCONST t " + "where k.constname = t.constname " + "and k.TBCREATOR = t.TBCREATOR " + "and t.type = 'U'" + "and k.constname='" + database.correctObjectName(name, UniqueConstraint.class) + "' " + "and t.TBCREATOR = '" + database.correctObjectName(schema.getName(), Schema.class) + "' " + "order by colseq"; } else if (database instanceof DerbyDatabase) { //does not support bulkQuery, supportsBulkQuery should return false() sql = "SELECT cg.descriptor as descriptor, t.tablename " + "FROM sys.sysconglomerates cg " + "JOIN sys.syskeys k ON cg.conglomerateid = k.conglomerateid " + "JOIN sys.sysconstraints c ON c.constraintid = k.constraintid " + "JOIN sys.systables t ON c.tableid = t.tableid " + "WHERE c.constraintname='" + database.correctObjectName(name, UniqueConstraint.class) + "'"; List<Map<String, ?>> rows = Scope.getCurrentScope().getSingleton(ExecutorService.class).getExecutor("jdbc", database).queryForList(new RawSqlStatement(sql)); List<Map<String, ?>> returnList = new ArrayList<>(); if (rows.isEmpty()) { return returnList; } else if (rows.size() > 1) { throw new UnexpectedLiquibaseException("Got multiple rows back querying unique constraints"); } else { Map rowData = rows.get(0); String descriptor = rowData.get("DESCRIPTOR").toString(); descriptor = descriptor.replaceFirst(".*\\(", "").replaceFirst("\\).*", ""); for (String columnNumber : StringUtil.splitAndTrim(descriptor, ",")) { String columnName = Scope.getCurrentScope().getSingleton(ExecutorService.class).getExecutor("jdbc", database).queryForObject(new RawSqlStatement( "select c.columnname from sys.syscolumns c " + "join sys.systables t on t.tableid=c.referenceid " + "where t.tablename='" + rowData.get("TABLENAME") + "' and c.columnnumber=" + columnNumber), String.class); Map<String, String> row = new HashMap<>(); row.put("COLUMN_NAME", columnName); returnList.add(row); } return returnList; } } else if (database instanceof FirebirdDatabase) { //does not support bulkQuery, supportsBulkQuery should return false() // Careful! FIELD_NAME and INDEX_NAME in RDB$INDEX_SEGMENTS are CHAR, not VARCHAR columns. sql = "SELECT TRIM(RDB$INDEX_SEGMENTS.RDB$FIELD_NAME) AS column_name " + "FROM RDB$INDEX_SEGMENTS " + "LEFT JOIN RDB$INDICES ON RDB$INDICES.RDB$INDEX_NAME = RDB$INDEX_SEGMENTS.RDB$INDEX_NAME " + "WHERE UPPER(TRIM(RDB$INDICES.RDB$INDEX_NAME))='" + database.correctObjectName(name, UniqueConstraint.class) + "' " + "ORDER BY RDB$INDEX_SEGMENTS.RDB$FIELD_POSITION"; } else if (database instanceof SybaseASADatabase) { //does not support bulkQuery, supportsBulkQuery should return false() sql = "select sysconstraint.constraint_name, syscolumn.column_name " + "from sysconstraint, syscolumn, systable " + "where sysconstraint.ref_object_id = syscolumn.object_id " + "and sysconstraint.table_object_id = systable.object_id " + "and sysconstraint.constraint_name = '" + database.correctObjectName(name, UniqueConstraint.class) + "' " + "and systable.table_name = '" + database.correctObjectName(example.getRelation().getName(), Table.class) + "'"; } else if(database instanceof Ingres9Database) { //does not support bulkQuery, supportsBulkQuery should return false() sql = "select constraint_name, column_name " + "from iikeys " + "where constraint_name = '" + database.correctObjectName(name, UniqueConstraint.class) + "' " + "and table_name = '" + database.correctObjectName(example.getTable().getName(), Table.class) + "'"; } else if (database instanceof InformixDatabase) { //does not support bulkQuery, supportsBulkQuery should return false() sql = getUniqueConstraintsSqlInformix((InformixDatabase) database, schema, name); } else if (database instanceof Db2zDatabase) { sql = "select KC.TBCREATOR as CONSTRAINT_CONTAINER, KC.CONSTNAME as CONSTRAINT_NAME, KC.COLNAME as COLUMN_NAME from SYSIBM.SYSKEYCOLUSE KC, SYSIBM.SYSTABCONST TC " + "where KC.CONSTNAME = TC.CONSTNAME " + "and KC.TBCREATOR = TC.TBCREATOR " + "and TC.TYPE='U' " + (bulkQuery ? "" : "and KC.CONSTNAME='" + database.correctObjectName(name, UniqueConstraint.class) + "' ") + "and TC.TBCREATOR = '" + database.correctObjectName(schema.getName(), Schema.class) + "' " + "order by KC.COLSEQ"; } else if (database instanceof H2Database && database.getDatabaseMajorVersion() >= 2) { String catalogName = database.correctObjectName(schema.getCatalogName(), Catalog.class); String schemaName = database.correctObjectName(schema.getName(), Schema.class); String constraintName = database.correctObjectName(name, UniqueConstraint.class); String tableName = database.correctObjectName(table.getName(), Table.class); sql = "select table_constraints.CONSTRAINT_NAME, index_columns.COLUMN_NAME, table_constraints.constraint_schema as CONSTRAINT_CONTAINER " + "from information_schema.table_constraints " + "join information_schema.index_columns on index_columns.index_name=table_constraints.index_name " + "where constraint_type='UNIQUE' "; if (catalogName != null) { sql += "and constraint_catalog='" + catalogName + "' "; } if (schemaName != null) { sql += "and constraint_schema='" + schemaName + "' "; } if (!bulkQuery) { if (tableName != null) { sql += "and table_constraints.table_name='" + tableName + "' "; } if (constraintName != null) { sql += "and constraint_name='" + constraintName + "'"; } } } else { // If we do not have a specific handler for the RDBMS, we assume that the database has an // INFORMATION_SCHEMA we can use. This is a last-resort measure and might fail. String catalogName = database.correctObjectName(schema.getCatalogName(), Catalog.class); String schemaName = database.correctObjectName(schema.getName(), Schema.class); String constraintName = database.correctObjectName(name, UniqueConstraint.class); String tableName = database.correctObjectName(table.getName(), Table.class); sql = "select CONSTRAINT_NAME, COLUMN_LIST as COLUMN_NAME, constraint_schema as CONSTRAINT_CONTAINER " + "from " + database.getSystemSchema() + ".constraints " + "where constraint_type='UNIQUE' "; if (catalogName != null) { sql += "and constraint_catalog='" + catalogName + "' "; } if (schemaName != null) { sql += "and constraint_schema='" + schemaName + "' "; } if (!bulkQuery) { if (tableName != null) { sql += "and table_name='" + tableName + "' "; } if (constraintName != null) { sql += "and constraint_name='" + constraintName + "'"; } } } List<Map<String, ?>> rows = Scope.getCurrentScope().getSingleton(ExecutorService.class).getExecutor("jdbc", database).queryForList(new RawSqlStatement(sql)); if (bulkQuery) { columnCache = new HashMap<>(); snapshot.setScratchData(cacheKey, columnCache); for (Map<String, ?> row : rows) { String key = getCacheKey(row, database); List<Map<String, ?>> constraintRows = columnCache.get(key); if (constraintRows == null) { constraintRows = new ArrayList<>(); columnCache.put(key, constraintRows); } constraintRows.add(row); } return listColumns(example, database, snapshot); } else { return rows; } } else { String lookupKey = getCacheKey(example, database); List<Map<String, ?>> rows = columnCache.get(lookupKey); if (rows == null) { rows = new ArrayList<>(); } return rows; } } /** * Should the given database include the table name in the key? * Databases that need to include the table names are ones where unique constraint names do not have to be unique * within the schema. * * Currently only mysql is known to have non-unique constraint names. * * If this returns true, the database-specific query in {@link #listColumns(UniqueConstraint, Database, DatabaseSnapshot)} must include * a TABLE_NAME column in the results for {@link #getCacheKey(Map, Database)} to use. */ protected boolean includeTableNameInCacheKey(Database database) { return database instanceof MySQLDatabase; } /** * Return the cache key for the given UniqueConstraint. Must return the same result as {@link #getCacheKey(Map, Database)}. * Default implementation uses {@link #includeTableNameInCacheKey(Database)} to determine if the table name should be included in the key or not. */ protected String getCacheKey(UniqueConstraint example, Database database) { if (includeTableNameInCacheKey(database)) { return example.getSchema().getName() + "_" + example.getRelation() + "_" + example.getName(); } else { return example.getSchema().getName() + "_" + example.getName(); } } /** * Return the cache key for the given query row. Must return the same result as {@link #getCacheKey(UniqueConstraint, Database)} * Default implementation uses {@link #includeTableNameInCacheKey(Database)} to determine if the table name should be included in the key or not. */ protected String getCacheKey(Map<String, ?> row, Database database) { if (includeTableNameInCacheKey(database)) { return row.get("CONSTRAINT_CONTAINER") + "_" + row.get("TABLE_NAME") + "_" + row.get("CONSTRAINT_NAME"); } else { return row.get("CONSTRAINT_CONTAINER") + "_" + row.get("CONSTRAINT_NAME"); } } /** * To support bulk query, the resultSet must include a CONSTRAINT_CONTAINER column for caching purposes */ protected boolean supportsBulkQuery(Database database) { return !(database instanceof DerbyDatabase) && !(database instanceof FirebirdDatabase) && !(database instanceof SybaseASADatabase) && !(database instanceof Ingres9Database) && !(database instanceof InformixDatabase); } /** * Gets an SQL query that returns the constraint names and columns for all UNIQUE constraints. * * @param database A database object of the InformixDatabase type * @param schema Name of the schema to examine (or null for all) * @param name Name of the constraint to examine (or null for all) * @return A lengthy SQL statement that fetches the constraint names and columns */ private String getUniqueConstraintsSqlInformix(InformixDatabase database, Schema schema, String name) { StringBuilder sqlBuf = new StringBuilder(); sqlBuf.append("SELECT * FROM (\n"); // Yes, I am serious about this. It appears there are neither CTE/WITH clauses nor PIVOT/UNPIVOT operators // in Informix SQL. for (int i = 1; i <= 16; i++) { if (i > 1) sqlBuf.append("UNION ALL\n"); sqlBuf.append( String.format(" SELECT\n" + " CONS.owner,\n" + " CONS.constrname AS constraint_name,\n" + " COL.colname AS column_name,\n" + " CONS.constrtype,\n" + " %d AS column_index\n" + " FROM informix.sysconstraints CONS\n" + " JOIN informix.sysindexes IDX ON CONS.idxname = IDX.idxname\n" + " JOIN informix.syscolumns COL ON COL.tabid = CONS.tabid AND COL.colno = IDX.part%d\n", i, i ) ); } // Finish the subquery and filter on the U(NIQUE) constraint type sqlBuf.append( " ) SUBQ\n" + "WHERE constrtype='U' \n"); String catalogName = database.correctObjectName(schema.getCatalogName(), Catalog.class); String constraintName = database.correctObjectName(name, UniqueConstraint.class); // If possible, filter for catalog name and/or constraint name if (catalogName != null) { sqlBuf.append("AND owner='").append(catalogName).append("'\n"); } if (constraintName != null) { sqlBuf.append("AND constraint_name='").append(constraintName).append("'"); } // For correct processing, it is important that we get all columns in order. sqlBuf.append("ORDER BY owner, constraint_name, column_index"); // Return the query return sqlBuf.toString(); } }
liquibase/liquibase
liquibase-core/src/main/java/liquibase/snapshot/jvm/UniqueConstraintSnapshotGenerator.java
Java
apache-2.0
30,661
/* * Copyright (c) 2015 Complexible Inc. <http://complexible.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.complexible.basecrm; import com.google.common.base.MoreObjects; import com.google.common.base.Objects; /** * <p></p> * * @author Michael Grove * @since 0.1 * @version 0.1 */ public class Deal extends BaseObject { private String mName; private String mStageName; private boolean mHot; private boolean mIsNew; private Contact mMainContact; private Contact mCompany; public Contact getCompany() { return mCompany; } public void setCompany(final Contact theCompany) { mCompany = theCompany; } public boolean isHot() { return mHot; } public void setHot(final boolean theHot) { mHot = theHot; } public boolean isNew() { return mIsNew; } public void setNew(final boolean theIsNew) { mIsNew = theIsNew; } public Contact getMainContact() { return mMainContact; } public void setMainContact(final Contact theMainContact) { mMainContact = theMainContact; } public String getName() { return mName; } public void setName(final String theName) { mName = theName; } public String getStageName() { return mStageName; } public void setStageName(final String theStageName) { mStageName = theStageName; } /** * @{inheritDoc} */ @Override public int hashCode() { return Objects.hashCode(mId, mName); } /** * @{inheritDoc} */ @Override public boolean equals(final Object theObj) { if (theObj == this) { return true; } else if (theObj instanceof Deal) { Deal aDeal = (Deal) theObj; return Objects.equal(mName, aDeal.mName) && Objects.equal(mId, aDeal.mId); } else { return false; } } /** * @{inheritDoc} */ @Override public String toString() { return MoreObjects.toStringHelper("Deal") .add("name", mName) .add("stage", mStageName) .add("company", mCompany) .toString(); } }
Complexible/basecrm
main/src/com/complexible/basecrm/Deal.java
Java
apache-2.0
2,438
<?php /** * 2013-11-25 上午12:08:30 * @author x.li * @abstract */ class Product_Model_Series extends Application_Model_Db { /** * 表名、主键 */ protected $_name = 'product_catalog_series'; protected $_primary = 'id'; public function getData() { $sql = $this->select() ->setIntegrityCheck(false) ->from(array('t1' => $this->_name)) ->joinLeft(array('t2' => $this->_dbprefix.'user'), "t2.id = t1.create_user", array()) ->joinLeft(array('t3' => $this->_dbprefix.'employee'), "t3.id = t2.employee_id", array('creater' => 'cname')) ->joinLeft(array('t4' => $this->_dbprefix.'user'), "t4.id = t1.update_user", array()) ->joinLeft(array('t5' => $this->_dbprefix.'employee'), "t5.id = t4.employee_id", array('updater' => 'cname')) ->order(array('t1.name')); $data = $this->fetchAll($sql)->toArray(); for($i = 0; $i < count($data); $i++){ $data[$i]['create_time'] = strtotime($data[$i]['create_time']); $data[$i]['update_time'] = strtotime($data[$i]['update_time']); $data[$i]['active'] = $data[$i]['active'] == 1 ? true : false; } return $data; } }
eoasoft/evolve
application/modules/product/models/Series.php
PHP
apache-2.0
1,314
/* * Copyright (C) 2015 Orange * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.orange.ngsi.model; /** * Created by pborscia on 05/06/2015. */ public enum UpdateAction { UPDATE("UPDATE"),APPEND("APPEND"),DELETE("DELETE"); private String label; UpdateAction(String label) { this.label=label; } public String getLabel() { return label; } public void setLabel(String label) { this.label = label; } public Boolean isDelete() { return label.equals("DELETE"); } public Boolean isUpdate() { return label.equals("UPDATE"); } public Boolean isAppend() { return label.equals("APPEND"); } }
Orange-OpenSource/fiware-ngsi-api
ngsi-client/src/main/java/com/orange/ngsi/model/UpdateAction.java
Java
apache-2.0
1,228
package com.github.florent37.camerafragment.internal.timer; import com.github.florent37.camerafragment.internal.utils.DateTimeUtils; /* * Created by florentchampigny on 13/01/2017. */ public class TimerTask extends TimerTaskBase implements Runnable { public TimerTask(TimerTaskBase.Callback callback) { super(callback); } @Override public void run() { recordingTimeSeconds++; if (recordingTimeSeconds == 60) { recordingTimeSeconds = 0; recordingTimeMinutes++; } if(callback != null) { callback.setText( String.format("%02d:%02d", recordingTimeMinutes, recordingTimeSeconds)); } if (alive) handler.postDelayed(this, DateTimeUtils.SECOND); } public void start() { alive = true; recordingTimeMinutes = 0; recordingTimeSeconds = 0; if(callback != null) { callback.setText( String.format("%02d:%02d", recordingTimeMinutes, recordingTimeSeconds)); callback.setTextVisible(true); } handler.postDelayed(this, DateTimeUtils.SECOND); } public void stop() { if(callback != null){ callback.setTextVisible(false); } alive = false; } }
mkrtchyanmnatsakan/DemoAppHelloWord
camerafragment/src/main/java/com/github/florent37/camerafragment/internal/timer/TimerTask.java
Java
apache-2.0
1,308
package fr.openwide.core.commons.util.registry; import java.io.Closeable; import java.io.File; import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.util.Objects; import java.util.Set; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.Sets; import de.schlichtherle.truezip.file.TFile; import de.schlichtherle.truezip.file.TVFS; import de.schlichtherle.truezip.fs.FsSyncException; import de.schlichtherle.truezip.fs.FsSyncOptions; /** * A registry responsible for keeping track of open {@link TFile TFiles} and for cleaning them up upon closing it. * <p>This should generally be used as a factory for creating TFiles (see the <code>create</code> methods), and the * calling of {@link #open()} and {@link #close()} should be done in some generic code (such as a servlet filter). * <p>Please note that this registry is using a {@link ThreadLocal}. Opening and closing the registry must therefore * be done in the same thread, and each TFile-creating thread should use its own registry. */ public final class TFileRegistry { private static final Logger LOGGER = LoggerFactory.getLogger(TFileRegistry.class); private static final ThreadLocal<TFileRegistryImpl> THREAD_LOCAL = new ThreadLocal<TFileRegistryImpl>(); private TFileRegistry() { } /** * Enables the (thread-local) TFileRegistry, so that every call to the createXX() or register() static methods * will result in the relevant TFile to be stored for further cleaning of TrueZip internal resources. * <p>The actual cleaning must be performed by calling the {@link #close()} static method on TFileRegistry. */ public static void open() { TFileRegistryImpl registry = THREAD_LOCAL.get(); if (registry == null) { THREAD_LOCAL.set(new TFileRegistryImpl()); } else { throw new IllegalStateException("TFileRegistry.open() should not be called twice without calling close() in-between."); } } /** * {@link TVFS#sync(de.schlichtherle.truezip.fs.FsMountPoint, de.schlichtherle.truezip.util.BitField) Synchronize} * the TrueZip virtual filesystem for every registered file, and clears the registry. * <p><strong>WARNING :</strong> If some {@link InputStream InputStreams} or {@link OutputStream OutputStreams} * managed by the current thread have not been closed yet, they will be ignored. */ public static void close() { try { TFileRegistryImpl registry = THREAD_LOCAL.get(); if (registry != null) { registry.close(); } else { throw new IllegalStateException("TFileRegistry.close() should not be called if TFileRegistry.open() has not been called before."); } } finally { THREAD_LOCAL.remove(); } } public static TFile create(String path) { TFile tFile = new TFile(path); register(tFile); return tFile; } public static TFile create(File file) { TFile tFile = new TFile(file); register(tFile); return tFile; } public static TFile create(String parent, String member) { TFile tFile = new TFile(parent, member); register(tFile); return tFile; } public static TFile create(File parent, String member) { TFile tFile = new TFile(parent, member); register(tFile); return tFile; } public static TFile create(URI uri) { TFile tFile = new TFile(uri); register(tFile); return tFile; } public static void register(File file) { Objects.requireNonNull(file, "file must not be null"); TFileRegistryImpl registry = THREAD_LOCAL.get(); if (registry != null) { registry.register(file); } else { LOGGER.info("Trying to register file '{}', but the TFileRegistry has not been open (see TFileRegistry.open()). Ignoring registration.", file); THREAD_LOCAL.remove(); } } public static void register(Iterable<? extends File> files) { Objects.requireNonNull(files, "files must not be null"); TFileRegistryImpl registry = THREAD_LOCAL.get(); if (registry != null) { registry.register(files); } else { LOGGER.info("Trying to register files '{}', but the TFileRegistry has not been open (see TFileRegistry.open()). Ignoring registration.", files); THREAD_LOCAL.remove(); } } private static final class TFileRegistryImpl implements Closeable { private final Set<TFile> registeredFiles = Sets.newHashSet(); private TFileRegistryImpl() { } public void register(File file) { if (file instanceof TFile) { TFile topLevelArchive = ((TFile)file).getTopLevelArchive(); if (topLevelArchive != null) { registeredFiles.add(topLevelArchive); } } } public void register(Iterable<? extends File> files) { for (File file : files) { register(file); } } @Override public void close() { for (TFile tFile : registeredFiles) { try { TVFS.sync(tFile, FsSyncOptions.SYNC); } catch (RuntimeException | FsSyncException e) { LOGGER.error("Error while trying to sync the truezip filesystem on '" + tFile + "'", e); } } } } }
openwide-java/owsi-core-parent
owsi-core/owsi-core-components/owsi-core-component-commons/src/main/java/fr/openwide/core/commons/util/registry/TFileRegistry.java
Java
apache-2.0
4,967
/* Copyright 2014 base2Services Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.base2.kagura.core.report.configmodel; import com.base2.kagura.core.report.connectors.JDBCDataReportConnector; import com.base2.kagura.core.report.connectors.ReportConnector; /** * JDBC backend for @see #FreeMarkerSQLReport * User: aubels * Date: 24/07/13 * Time: 4:46 PM * */ public class JDBCReportConfig extends FreeMarkerSQLReportConfig { String jdbc; String username; String password; private String classLoaderPath; /** * {@inheritDoc} * @return */ @Override public ReportConnector getReportConnector() { return new JDBCDataReportConnector(this); } /** * JDBC connection string * @return */ public String getJdbc() { return jdbc; } /** * @see #getJdbc() */ public void setJdbc(String jdbc) { this.jdbc = jdbc; } /** * JDBC Password if necessary * @return */ public String getPassword() { return password; } /** * @see #getPassword() */ public void setPassword(String password) { this.password = password; } /** * JDBC username * @return */ public String getUsername() { return username; } /** * @see #getUsername() * @param username */ public void setUsername(String username) { this.username = username; } /** * Class path loader for the Database driver, for instance: com.mysql.jdbc.Driver * @return */ public String getClassLoaderPath() { return classLoaderPath; } /** * @see #getClassLoaderPath() */ public void setClassLoaderPath(String classLoaderPath) { this.classLoaderPath = classLoaderPath; } }
base2Services/kagura
shared/reporting-core/src/main/java/com/base2/kagura/core/report/configmodel/JDBCReportConfig.java
Java
apache-2.0
2,350
<?php final class ManiphestTaskResultListView extends ManiphestView { private $tasks; private $savedQuery; private $canEditPriority; private $canBatchEdit; private $showBatchControls; public function setSavedQuery(PhabricatorSavedQuery $query) { $this->savedQuery = $query; return $this; } public function setTasks(array $tasks) { $this->tasks = $tasks; return $this; } public function setCanEditPriority($can_edit_priority) { $this->canEditPriority = $can_edit_priority; return $this; } public function setCanBatchEdit($can_batch_edit) { $this->canBatchEdit = $can_batch_edit; return $this; } public function setShowBatchControls($show_batch_controls) { $this->showBatchControls = $show_batch_controls; return $this; } public function render() { $viewer = $this->getUser(); $tasks = $this->tasks; $query = $this->savedQuery; // If we didn't match anything, just pick up the default empty state. if (!$tasks) { return id(new PHUIObjectItemListView()) ->setUser($viewer); } $group_parameter = nonempty($query->getParameter('group'), 'priority'); $order_parameter = nonempty($query->getParameter('order'), 'priority'); $handles = ManiphestTaskListView::loadTaskHandles($viewer, $tasks); $groups = $this->groupTasks( $tasks, $group_parameter, $handles); $can_edit_priority = $this->canEditPriority; $can_drag = ($order_parameter == 'priority') && ($can_edit_priority) && ($group_parameter == 'none' || $group_parameter == 'priority'); if (!$viewer->isLoggedIn()) { // TODO: (T603) Eventually, we conceivably need to make each task // draggable individually, since the user may be able to edit some but // not others. $can_drag = false; } $result = array(); $lists = array(); foreach ($groups as $group => $list) { $task_list = new ManiphestTaskListView(); $task_list->setShowBatchControls($this->showBatchControls); if ($can_drag) { $task_list->setShowSubpriorityControls(true); } $task_list->setUser($viewer); $task_list->setTasks($list); $task_list->setHandles($handles); $header = javelin_tag( 'h1', array( 'class' => 'maniphest-task-group-header', 'sigil' => 'task-group', 'meta' => array( 'priority' => head($list)->getPriority(), ), ), pht('%s (%s)', $group, new PhutilNumber(count($list)))); $lists[] = phutil_tag( 'div', array( 'class' => 'maniphest-task-group' ), array( $header, $task_list, )); } if ($can_drag) { Javelin::initBehavior( 'maniphest-subpriority-editor', array( 'uri' => '/maniphest/subpriority/', )); } return phutil_tag( 'div', array( 'class' => 'maniphest-list-container', ), array( $lists, $this->showBatchControls ? $this->renderBatchEditor($query) : null, )); } private function groupTasks(array $tasks, $group, array $handles) { assert_instances_of($tasks, 'ManiphestTask'); assert_instances_of($handles, 'PhabricatorObjectHandle'); $groups = $this->getTaskGrouping($tasks, $group); $results = array(); foreach ($groups as $label_key => $tasks) { $label = $this->getTaskLabelName($group, $label_key, $handles); $results[$label][] = $tasks; } foreach ($results as $label => $task_groups) { $results[$label] = array_mergev($task_groups); } return $results; } private function getTaskGrouping(array $tasks, $group) { switch ($group) { case 'priority': return mgroup($tasks, 'getPriority'); case 'status': return mgroup($tasks, 'getStatus'); case 'assigned': return mgroup($tasks, 'getOwnerPHID'); case 'project': return mgroup($tasks, 'getGroupByProjectPHID'); default: return array(pht('Tasks') => $tasks); } } private function getTaskLabelName($group, $label_key, array $handles) { switch ($group) { case 'priority': return ManiphestTaskPriority::getTaskPriorityName($label_key); case 'status': return ManiphestTaskStatus::getTaskStatusFullName($label_key); case 'assigned': if ($label_key) { return $handles[$label_key]->getFullName(); } else { return pht('(Not Assigned)'); } case 'project': if ($label_key) { return $handles[$label_key]->getFullName(); } else { // This may mean "No Projects", or it may mean the query has project // constraints but the task is only in constrained projects (in this // case, we don't show the group because it would always have all // of the tasks). Since distinguishing between these two cases is // messy and the UI is reasonably clear, label generically. return pht('(Ungrouped)'); } default: return pht('Tasks'); } } private function renderBatchEditor(PhabricatorSavedQuery $saved_query) { $user = $this->getUser(); if (!$this->canBatchEdit) { return null; } if (!$user->isLoggedIn()) { // Don't show the batch editor or excel export for logged-out users. // Technically we //could// let them export, but ehh. return null; } Javelin::initBehavior( 'maniphest-batch-selector', array( 'selectAll' => 'batch-select-all', 'selectNone' => 'batch-select-none', 'submit' => 'batch-select-submit', 'status' => 'batch-select-status-cell', 'idContainer' => 'batch-select-id-container', 'formID' => 'batch-select-form', )); $select_all = javelin_tag( 'a', array( 'href' => '#', 'mustcapture' => true, 'class' => 'grey button', 'id' => 'batch-select-all', ), pht('Select All')); $select_none = javelin_tag( 'a', array( 'href' => '#', 'mustcapture' => true, 'class' => 'grey button', 'id' => 'batch-select-none', ), pht('Clear Selection')); $submit = phutil_tag( 'button', array( 'id' => 'batch-select-submit', 'disabled' => 'disabled', 'class' => 'disabled', ), pht("Batch Edit Selected \xC2\xBB")); $export = javelin_tag( 'a', array( 'href' => '/maniphest/export/'.$saved_query->getQueryKey().'/', 'class' => 'grey button', ), pht('Export to Excel')); $hidden = phutil_tag( 'div', array( 'id' => 'batch-select-id-container', ), ''); $editor = hsprintf( '<div class="maniphest-batch-editor">'. '<div class="batch-editor-header">%s</div>'. '<table class="maniphest-batch-editor-layout">'. '<tr>'. '<td>%s%s</td>'. '<td>%s</td>'. '<td id="batch-select-status-cell">%s</td>'. '<td class="batch-select-submit-cell">%s%s</td>'. '</tr>'. '</table>'. '</div>', pht('Batch Task Editor'), $select_all, $select_none, $export, '', $submit, $hidden); $editor = phabricator_form( $user, array( 'method' => 'POST', 'action' => '/maniphest/batch/', 'id' => 'batch-select-form', ), $editor); return $editor; } }
akkakks/phabricator
src/applications/maniphest/view/ManiphestTaskResultListView.php
PHP
apache-2.0
7,755
#region license // Copyright 2014 JetBrains s.r.o. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #endregion using JetBrains.ReSharper.Plugins.AngularJS.Psi.Html; using JetBrains.ReSharper.Psi; using JetBrains.ReSharper.Psi.JavaScript.Tree; using JetBrains.ReSharper.Psi.Resolve; using JetBrains.ReSharper.Psi.Tree; using JetBrains.ReSharper.Psi.Util; using JetBrains.Util; namespace JetBrains.ReSharper.Plugins.AngularJS.Psi.AngularJs.References { [ReferenceProviderFactory] public class AngularJsIncludeFileReferenceProvider : AngularJsReferenceFactoryBase { private readonly IAngularJsHtmlDeclaredElementTypes elementTypes; public AngularJsIncludeFileReferenceProvider(IAngularJsHtmlDeclaredElementTypes elementTypes) { this.elementTypes = elementTypes; } protected override IReference[] GetReferences(ITreeNode element, IReference[] oldReferences) { if (!HasReference(element, null)) return EmptyArray<IReference>.Instance; var stringLiteral = (IJavaScriptLiteralExpression)element; var references = PathReferenceUtil.CreatePathReferences(stringLiteral, stringLiteral, null, GetFolderPathReference, GetFileReference, node => node.GetStringValue(), node => node.GetUnquotedTreeTextRange('"', '\'').StartOffset.Offset); return references; } private static IPathReference GetFileReference(IJavaScriptLiteralExpression literal, IQualifier qualifier, IJavaScriptLiteralExpression token, TreeTextRange range) { return new AngularJsFileLateBoundReference<IJavaScriptLiteralExpression, IJavaScriptLiteralExpression>(literal, qualifier, token, range); } private static IPathReference GetFolderPathReference(IJavaScriptLiteralExpression literal, IQualifier qualifier, IJavaScriptLiteralExpression token, TreeTextRange range) { return new AngularJsFolderLateBoundReference<IJavaScriptLiteralExpression, IJavaScriptLiteralExpression>(literal, qualifier, token, range); } protected override bool HasReference(ITreeNode element, IReferenceNameContainer names) { var stringLiteral = element as IJavaScriptLiteralExpression; if (stringLiteral == null) return false; var file = element.GetContainingFile(); if (file == null) return false; // TODO: We can't use this, due to losing data when we reparse for code completion // When we start code completion, the tree node is reparsed with new text inserted, // so that references have something to attach to. Reparsing works with IChameleon // blocks that allow for resync-ing in-place. Our AngularJs nodes don't have any // chameleon blocks (for JS, it's the Block class - anything with braces) so we end // up re-parsing the file. This creates a new IFile, (in a sandbox that allows access // to the original file's reference provider) but it doesn't copy the user data. We // could theoretically look for a containing sandbox, get the context node and try // and get the user data there, but that just makes it feel like this is the wrong // solution. I think maybe this should be a reference provider for HTML, not AngularJs. // It would have the context of the attribute name, but should really work with the // injected AngularJs language, if only to see that it's a string literal //var originalAttributeType = file.UserData.GetData(AngularJsFileData.OriginalAttributeType); //if (originalAttributeType != elementTypes.AngularJsUrlType.Name) // return false; return true; } } }
JetBrains/resharper-angularjs
src/resharper-angularjs/Psi/AngularJs/References/AngularJsIncludeFileReferenceProvider.cs
C#
apache-2.0
4,390
package com.github.windchopper.common.fx.behavior; import javafx.geometry.*; import javafx.scene.Cursor; import javafx.scene.input.MouseEvent; import javafx.scene.layout.Region; import javafx.stage.Stage; import javafx.stage.Window; import java.util.stream.Stream; import static java.util.Arrays.stream; public class WindowMoveResizeBehavior implements Behavior<Region> { private static class AngleRange { private final double minAngle; private final double maxAngle; public AngleRange(double minAngle, double maxAngle) { this.minAngle = minAngle; this.maxAngle = maxAngle; } public boolean matches(double angle) { return angle >= minAngle && angle <= maxAngle; } } private abstract class DragMode { protected double savedSceneX; protected double savedSceneY; protected double savedScreenX; protected double savedScreenY; protected double savedWidth; protected double savedHeight; public abstract void apply(MouseEvent event); public void store(MouseEvent event) { savedSceneX = event.getSceneX(); savedSceneY = event.getSceneY(); savedScreenX = event.getScreenX(); savedScreenY = event.getScreenY(); Window window = region.getScene().getWindow(); savedWidth = window.getWidth(); savedHeight = window.getHeight(); } } private class MoveMode extends DragMode { private final Cursor normalCursor; private final Cursor dragCursor; public MoveMode(Cursor normalCursor, Cursor dragCursor) { this.normalCursor = normalCursor; this.dragCursor = dragCursor; } @Override public void apply(MouseEvent event) { if (event.getEventType() != MouseEvent.MOUSE_DRAGGED) { region.setCursor(normalCursor); } else { region.setCursor(dragCursor); Window window = region.getScene().getWindow(); window.setX(event.getScreenX() - savedSceneX); window.setY(event.getScreenY() - savedSceneY); } } } private abstract class ResizeMode extends DragMode { private final Cursor cursor; private final AngleRange []angleRanges; public abstract Bounds calculateBounds(MouseEvent dragEvent, Window window); public ResizeMode(Cursor cursor, AngleRange... angleRanges) { this.cursor = cursor; this.angleRanges = angleRanges; } public boolean matches(double angle) { return stream(angleRanges).anyMatch(range -> range.matches(angle)); } @Override public void apply(MouseEvent event) { region.setCursor(cursor); if (event.getEventType() == MouseEvent.MOUSE_DRAGGED) { var window = region.getScene().getWindow(); var bounds = calculateBounds(event, window); var width = bounds.getWidth(); var height = bounds.getHeight(); if (window instanceof Stage) { var stage = (Stage) window; if (stage.isResizable()) { width = Math.min(Math.max(width, stage.getMinWidth()), stage.getMaxWidth()); height = Math.min(Math.max(height, stage.getMinHeight()), stage.getMaxHeight()); } else { width = window.getWidth(); height = window.getHeight(); } } window.setX(bounds.getMinX()); window.setY(bounds.getMinY()); window.setWidth(width); window.setHeight(height); } } } private class ResizeEastMode extends ResizeMode { public ResizeEastMode(AngleRange... angleRanges) { super(Cursor.E_RESIZE, angleRanges); } @Override public Bounds calculateBounds(MouseEvent dragEvent, Window window) { return new BoundingBox( window.getX(), window.getY(), savedWidth + dragEvent.getScreenX() - savedScreenX, window.getHeight()); } } private class ResizeNorthEastMode extends ResizeMode { public ResizeNorthEastMode(AngleRange... angleRanges) { super(Cursor.NE_RESIZE, angleRanges); } @Override public Bounds calculateBounds(MouseEvent dragEvent, Window window) { return new BoundingBox( window.getX(), dragEvent.getScreenY() - savedSceneY, savedWidth + dragEvent.getScreenX() - savedScreenX, savedHeight - dragEvent.getScreenY() + savedScreenY); } } private class ResizeNorthMode extends ResizeMode { public ResizeNorthMode(AngleRange... angleRanges) { super(Cursor.N_RESIZE, angleRanges); } @Override public Bounds calculateBounds(MouseEvent dragEvent, Window window) { return new BoundingBox( window.getX(), dragEvent.getScreenY() - savedSceneY, window.getWidth(), savedHeight - dragEvent.getScreenY() + savedScreenY); } } private class ResizeNorthWestMode extends ResizeMode { public ResizeNorthWestMode(AngleRange... angleRanges) { super(Cursor.NW_RESIZE, angleRanges); } @Override public Bounds calculateBounds(MouseEvent dragEvent, Window window) { return new BoundingBox( dragEvent.getScreenX() - savedSceneX, dragEvent.getScreenY() - savedSceneY, savedWidth - dragEvent.getScreenX() + savedScreenX, savedHeight - dragEvent.getScreenY() + savedScreenY); } } private class ResizeWestMode extends ResizeMode { public ResizeWestMode(AngleRange... angleRanges) { super(Cursor.W_RESIZE, angleRanges); } @Override public Bounds calculateBounds(MouseEvent dragEvent, Window window) { return new BoundingBox( dragEvent.getScreenX() - savedSceneX, window.getY(), savedWidth - dragEvent.getScreenX() + savedScreenX, window.getHeight()); } } private class ResizeSouthWestMode extends ResizeMode { public ResizeSouthWestMode(AngleRange... angleRanges) { super(Cursor.SW_RESIZE, angleRanges); } @Override public Bounds calculateBounds(MouseEvent dragEvent, Window window) { return new BoundingBox( dragEvent.getScreenX() - savedSceneX, window.getY(), savedWidth - dragEvent.getScreenX() + savedScreenX, savedHeight + dragEvent.getScreenY() - savedScreenY); } } private class ResizeSouthMode extends ResizeMode { public ResizeSouthMode(AngleRange... angleRanges) { super(Cursor.S_RESIZE, angleRanges); } @Override public Bounds calculateBounds(MouseEvent dragEvent, Window window) { return new BoundingBox( window.getX(), window.getY(), window.getWidth(), savedHeight + dragEvent.getScreenY() - savedScreenY); } } private class ResizeSouthEastMode extends ResizeMode { public ResizeSouthEastMode(AngleRange... angleRanges) { super(Cursor.SE_RESIZE, angleRanges); } @Override public Bounds calculateBounds(MouseEvent dragEvent, Window window) { return new BoundingBox( window.getX(), window.getY(), savedWidth + dragEvent.getScreenX() - savedScreenX, savedHeight + dragEvent.getScreenY() - savedScreenY); } } private final Region region; private final double resizeBorderSize; private final MoveMode move; private final ResizeMode resizeEast; private final ResizeMode resizeNorthEast; private final ResizeMode resizeNorth; private final ResizeMode resizeNorthWest; private final ResizeMode resizeWest; private final ResizeMode resizeSouthWest; private final ResizeMode resizeSouth; private final ResizeMode resizeSouthEast; private DragMode dragMode; public WindowMoveResizeBehavior(Region region) { this(region, 2, 2); } public WindowMoveResizeBehavior(Region region, double resizeBorderSize, double diagonalResizeSpotAngularSize) { this.region = region; this.resizeBorderSize = resizeBorderSize; dragMode = move = new MoveMode(Cursor.DEFAULT, Cursor.MOVE); resizeEast = new ResizeEastMode(new AngleRange(0, 45 - diagonalResizeSpotAngularSize), new AngleRange(315 + diagonalResizeSpotAngularSize, 360)); resizeNorthEast = new ResizeNorthEastMode(new AngleRange(45 - diagonalResizeSpotAngularSize, 45 + diagonalResizeSpotAngularSize)); resizeNorth = new ResizeNorthMode(new AngleRange(45 + diagonalResizeSpotAngularSize, 135 - diagonalResizeSpotAngularSize)); resizeNorthWest = new ResizeNorthWestMode(new AngleRange(135 - diagonalResizeSpotAngularSize, 135 + diagonalResizeSpotAngularSize)); resizeWest = new ResizeWestMode(new AngleRange(135 + diagonalResizeSpotAngularSize, 225 - diagonalResizeSpotAngularSize)); resizeSouthWest = new ResizeSouthWestMode(new AngleRange(225 - diagonalResizeSpotAngularSize, 225 + diagonalResizeSpotAngularSize)); resizeSouth = new ResizeSouthMode(new AngleRange(225 + diagonalResizeSpotAngularSize, 315 - diagonalResizeSpotAngularSize)); resizeSouthEast = new ResizeSouthEastMode(new AngleRange(315 - diagonalResizeSpotAngularSize, 315 + diagonalResizeSpotAngularSize)); } private double calculateAngle(double x, double y) { double angle = Math.toDegrees( Math.atan2(y, x)); if (angle < 0) { angle = 360 + angle; } if (angle > 360) { angle = 360; } return angle; } private DragMode detect(MouseEvent event) { Bounds bounds = region.getLayoutBounds(); Bounds outerBounds = new BoundingBox(bounds.getMinX() - resizeBorderSize, bounds.getMinY() - resizeBorderSize, bounds.getWidth() + resizeBorderSize * 2, bounds.getHeight() + resizeBorderSize * 2); Bounds innerBounds = new BoundingBox(bounds.getMinX() + resizeBorderSize, bounds.getMinY() + resizeBorderSize, bounds.getWidth() - resizeBorderSize * 2, bounds.getHeight() - resizeBorderSize * 2); Point2D center = new Point2D(innerBounds.getWidth() / 2, innerBounds.getHeight() / 2); Point2D ptr = new Point2D( event.getX(), event.getY()); if (outerBounds.contains(ptr) && !innerBounds.contains(ptr)) { double angle = calculateAngle(ptr.getX() - center.getX(), (ptr.getY() - center.getY()) * (outerBounds.getWidth() / outerBounds.getHeight()) * -1); return Stream.of(resizeEast, resizeNorthEast, resizeNorth, resizeNorthWest, resizeWest, resizeSouthWest, resizeSouth, resizeSouthEast) .filter(mode -> mode.matches(angle)) .findFirst().orElseThrow(AssertionError::new); } return move; } private void mouseAny(MouseEvent event) { dragMode.apply(event); } private void mousePress(MouseEvent event) { dragMode.store(event); mouseAny(event); } private void mouseRelease(MouseEvent event) { mouseAny(event); } private void mouseMove(MouseEvent event) { dragMode = detect(event); mouseAny(event); } private void mouseDrag(MouseEvent event) { mouseAny(event); } /* * */ @Override public void apply(Region region) { region.setOnMousePressed(this::mousePress); region.setOnMouseReleased(this::mouseRelease); region.setOnMouseMoved(this::mouseMove); region.setOnMouseDragged(this::mouseDrag); } }
windchopper/common
common-fx/src/main/java/com/github/windchopper/common/fx/behavior/WindowMoveResizeBehavior.java
Java
apache-2.0
12,341
package com.vaadin.book.applications; import com.vaadin.Application; import com.vaadin.data.Property; import com.vaadin.data.Property.ValueChangeEvent; import com.vaadin.ui.*; import com.vaadin.ui.UriFragmentUtility.FragmentChangedEvent; import com.vaadin.ui.UriFragmentUtility.FragmentChangedListener; // BEGIN-EXAMPLE: advanced.urifragmentutility.uriexample public class UriFragmentApplication extends Application { private static final long serialVersionUID = -128617724108192945L; @Override public void init() { Window main = new Window("URI Fragment Example"); setMainWindow(main); setTheme("book-examples"); // Create the URI fragment utility final UriFragmentUtility urifu = new UriFragmentUtility(); main.addComponent(urifu); // Application state menu final ListSelect menu = new ListSelect("Select a URI Fragment"); menu.addItem("mercury"); menu.addItem("venus"); menu.addItem("earth"); menu.addItem("mars"); menu.setRows(4); menu.setNullSelectionAllowed(false); menu.setImmediate(true); main.addComponent(menu); // Set the URI Fragment when menu selection changes menu.addListener(new Property.ValueChangeListener() { private static final long serialVersionUID = 6380648224897936536L; public void valueChange(ValueChangeEvent event) { String itemid = (String) event.getProperty().getValue(); urifu.setFragment(itemid); } }); // When the URI fragment is given, use it to set menu selection urifu.addListener(new FragmentChangedListener() { private static final long serialVersionUID = -6588416218607827834L; public void fragmentChanged(FragmentChangedEvent source) { String fragment = source.getUriFragmentUtility().getFragment(); if (fragment != null) menu.setValue(fragment); } }); } } // END-EXAMPLE: advanced.urifragmentutility.uriexample
BillHan/book-examples-v6
src/main/java/com/vaadin/book/applications/UriFragmentApplication.java
Java
apache-2.0
2,141
package resource import ( "fmt" "net/url" "strings" "github.com/rs/rest-layer/schema" ) // Index is an interface defining a type able to bind and retrieve resources // from a resource graph. type Index interface { // Bind a new resource at the "name" endpoint Bind(name string, s schema.Schema, h Storer, c Conf) *Resource // GetResource retrives a given resource by it's path. // For instance if a resource user has a sub-resource posts, // a users.posts path can be use to retrieve the posts resource. // // If a parent is given and the path starts with a dot, the lookup is started at the // parent's location instead of root's. GetResource(path string, parent *Resource) (*Resource, bool) // GetResources returns first level resources GetResources() []*Resource } // index is the root of the resource graph type index struct { resources subResources } // NewIndex creates a new resource index func NewIndex() Index { return &index{ resources: subResources{}, } } // Bind a resource at the specified endpoint name func (r *index) Bind(name string, s schema.Schema, h Storer, c Conf) *Resource { assertNotBound(name, r.resources, nil) sr := new(name, s, h, c) r.resources.add(sr) return sr } // Compile the resource graph and report any error func (r *index) Compile() error { return compileResourceGraph(r.resources) } // GetResource retrives a given resource by it's path. // For instance if a resource user has a sub-resource posts, // a users.posts path can be use to retrieve the posts resource. // // If a parent is given and the path starts with a dot, the lookup is started at the // parent's location instead of root's. func (r *index) GetResource(path string, parent *Resource) (*Resource, bool) { resources := r.resources if len(path) > 0 && path[0] == '.' { if parent == nil { // If field starts with a dot and no parent is given, fail the lookup return nil, false } path = path[1:] resources = parent.resources } var sr *Resource if strings.IndexByte(path, '.') == -1 { if sr = resources.get(path); sr != nil { resources = sr.resources } else { return nil, false } } else { for _, comp := range strings.Split(path, ".") { if sr = resources.get(comp); sr != nil { resources = sr.resources } else { return nil, false } } } return sr, true } // GetResources returns first level resources func (r *index) GetResources() []*Resource { return r.resources } func compileResourceGraph(resources subResources) error { for _, r := range resources { if err := r.Compile(); err != nil { sep := "." if err.Error()[0] == ':' { sep = "" } return fmt.Errorf("%s%s%s", r.name, sep, err) } } return nil } // assertNotBound asserts a given resource name is not already bound func assertNotBound(name string, resources subResources, aliases map[string]url.Values) { for _, r := range resources { if r.name == name { logPanicf(nil, "Cannot bind `%s': already bound as resource'", name) } } if _, found := aliases[name]; found { logPanicf(nil, "Cannot bind `%s': already bound as alias'", name) } }
elmgone/coligui
vendor/github.com/rs/rest-layer/resource/index.go
GO
apache-2.0
3,120
from django.test import TestCase from django.utils.timezone import utc from datetime import datetime import json import logging import mock from dbkeeper.models import Organization, Team, Setting from piservice.models import PiStation, PiEvent import scoreboard.views as target def _mocked_utcNow(): return datetime(2001, 1, 1, 0, 0, 0).replace(tzinfo=utc) class ScoreboardStatusDockTestCase(TestCase): def _setUpStations(self): self.launchStation = PiStation.objects.create( station_type = PiStation.LAUNCH_STATION_TYPE, serial_num = self._serialNum ) self._serialNum += 1 self.dockStation = PiStation.objects.create( station_type = PiStation.DOCK_STATION_TYPE, serial_num = self._serialNum ) self._serialNum += 1 self.secureStation = PiStation.objects.create( station_type = PiStation.SECURE_STATION_TYPE, serial_num = self._serialNum ) self._serialNum += 1 self.returnStation = PiStation.objects.create( station_type = PiStation.RETURN_STATION_TYPE, serial_num = self._serialNum ) self._serialNum += 1 self.station = self.dockStation def _setUpTeams(self): org = Organization.objects.create( name = "School 1", type = Organization.SCHOOL_TYPE ) self.team1Name = "Team 1" self.team1 = Team.objects.create( name = self.team1Name, organization = org ) def _setUpEvents(self): # Some tests don't need these events. If not needed for a particular # test, use PiEvent.objects.all().delete() e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 0, 0).replace(tzinfo=utc), type = PiEvent.EVENT_STARTED_MSG_TYPE ) def _verify(self, expectedScore, expectedDuration_s): actual = target._recomputeTeamScore(self.team1Name) actualScore = actual['dock_score'] actualDuration_s = actual['dock_duration_s'] self.assertEqual(expectedScore, actualScore) self.assertEqual(expectedDuration_s, actualDuration_s) def setUp(self): PiEvent._meta.get_field("time").auto_now_add = False self._serialNum = 1 self._setUpStations() self._setUpTeams() self._setUpEvents() self._watchingTime_s = 45.0 Setting.objects.create(name = 'DNF_TIME_PENALTY_FACTOR', value = str(2.0)) Setting.objects.create(name = 'DOCK_SIM_PLAYBACK_TIME_S', value = str(self._watchingTime_s)) def test_recomputeDockScore_noEvents(self): PiEvent.objects.all().delete() expectedScore = 0 expectedDuration_s = 0 self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_noEventStartedEvent(self, side_effect=_mocked_utcNow): PiEvent.objects.all().delete() e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": 0, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) expectedScore = 0 expectedDuration_s = 0 self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_eventsBeforeEventStartedEvent(self, side_effect=_mocked_utcNow): PiEvent.objects.all().delete() e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, pi = self.station, team = self.team1, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": 0, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 59).replace(tzinfo=utc), type = PiEvent.EVENT_STARTED_MSG_TYPE ) expectedScore = 0 expectedDuration_s = 0 self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_noStartChallengeEvents(self, side_effect=_mocked_utcNow): e = PiEvent.objects.create( time = datetime(2001, 1, 1, 0, 0, 0).replace(tzinfo=utc), type = PiEvent.REGISTER_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS ) expectedScore = 0 expectedDuration_s = 0 self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventSameTimestampNoSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2001, 1, 1, 0, 0, 0).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 0 self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampNoSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 10 self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampSuccessNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 100 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampSuccessWithConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 68 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.EVENT_CONCLUDED_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampFailOutcomeDnf2xPenaltyNoConclude(self, mock_utcNow): dnfPenalty = 2.0 Setting.objects.all().delete() Setting.objects.create(name = 'DNF_TIME_PENALTY_FACTOR', value = str(dnfPenalty)) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 213 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_DNF"}, separators=(',',':')) ) expectedScore = 1 expectedDuration_s = 10 - self._watchingTime_s + (actualTime_s * dnfPenalty) self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampFailOutcomeDnf3xPenaltyNoConclude(self, mock_utcNow): dnfPenalty = 3.0 Setting.objects.all().delete() Setting.objects.create(name = 'DNF_TIME_PENALTY_FACTOR', value = str(dnfPenalty)) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 47 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_DNF"}, separators=(',',':')) ) expectedScore = 1 expectedDuration_s = 10 - self._watchingTime_s + (actualTime_s * dnfPenalty) self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampFailOutcomeDnf8xPenaltyNoConclude(self, mock_utcNow): dnfPenalty = 8.0 Setting.objects.all().delete() Setting.objects.create(name = 'DNF_TIME_PENALTY_FACTOR', value = str(dnfPenalty)) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 33 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_DNF"}, separators=(',',':')) ) expectedScore = 1 expectedDuration_s = 10 - self._watchingTime_s + (actualTime_s * dnfPenalty) self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampFailNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 1684 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) expectedScore = 1 expectedDuration_s = 10 - self._watchingTime_s + actualTime_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampFailWithConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 2000 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.EVENT_CONCLUDED_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 8 - self._watchingTime_s + actualTime_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampSuccessNoSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 3000 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 57).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 319 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 57).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 4897 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime1_s # ignore actualTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampSuccessSuccess(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 3213 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 57).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 228 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime1_s # ignore acutalTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampFailNoSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 283 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 14 - self._watchingTime_s + actualTime_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampFailSuccessNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 9385 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 332 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampFailSuccessWithConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 123 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 456 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.EVENT_CONCLUDED_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampFailFailNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 345 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 678 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 14 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampFailFailWithConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 4567 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 678 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.EVENT_CONCLUDED_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 12 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_threeStartChallengeEventsEarlierTimestampFailFailNoSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 567 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 890 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 14 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_threeStartChallengeEventsEarlierTimestampFailFailSuccessNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 678 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 789 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 7654 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) expectedScore = 9 expectedDuration_s = 10 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_threeStartChallengeEventsEarlierTimestampFailFailSuccessWithConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 321 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 654 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 987 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.EVENT_CONCLUDED_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 9 expectedDuration_s = 10 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_threeStartChallengeEventsEarlierTimestampFailFailFailNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 37 # this is less than 45 sec, so watchingTime will be used instead e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 54 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 76 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) expectedScore = 5 expectedDuration_s = 10 - self._watchingTime_s + self._watchingTime_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_threeStartChallengeEventsEarlierTimestampFailFailFailWithConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 23 # use watchTime_s instead since this is less than 45 sec e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 45 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 67 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.EVENT_CONCLUDED_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 5 expectedDuration_s = 10 - self._watchingTime_s + self._watchingTime_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_fourStartChallengeEventsEarlierTimestampFailFailFailNoSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 123 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 45 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 6789 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 57).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 5 expectedDuration_s = 10 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_fourStartChallengeEventsEarlierTimestampFailFailFailFailNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 122 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 233 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 344 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 57).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime4_s = 455 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime4_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) expectedScore = 5 expectedDuration_s = 10 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s # ignore actualTime4_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_fourStartChallengeEventsEarlierTimestampFailFailFailSuccessNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 1223 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 2334 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 3445 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 57).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime4_s = 4556 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime4_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) expectedScore = 5 expectedDuration_s = 10 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s # ignore actualTime4_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventLaterTimestamp(self, mock_utcNow): pass # Don't worry about later timestamps #TODO - Remaining items... # # Scoreboard # [x] 1. absent/present Registered indicator # [x] 2. make title larger and change to "Leaderboard" # [x] 3. fill width 100% # [x] 4. make 30 teams fit on the same page with roughly 20-30 chars # [x] 5. header row multiple lines--all text doesn't show up # [x] 6. don't need to show page footer; find another place for the attribution # [ ] 7. put "Harris Design Challenge 2016" along the left-hand side # [x] 8. ranking # [x] 11. remove team logo if not implementing this time # [ ] 12. Page has two jquery <script> tags--one looks WRONG_ARGUMENTS # # # Enhancements # [ ] 9. Change color (darker) for the ones that are zero (not started) # [ ] 10. Set color brighter to stand out for the ones that are done
brata-hsdc/brata.masterserver
workspace/ms/scoreboard/tests/test_dock.py
Python
apache-2.0
49,951
var express = require('express'); var path = require('path'); var favicon = require('serve-favicon'); var logger = require('morgan'); var cookieParser = require('cookie-parser'); var bodyParser = require('body-parser'); var stylus = require('stylus'); var nib = require('nib'); var passport = require('passport'); var HttpStrategy = require('passport-http'); var LocalStrategy = require('passport-local').Strategy; var expressSession = require('express-session'); var md5 = require('md5'); var sql = require('mssql') var flash = require('connect-flash'); var routes = require('./routes/index'); var templates = require('./routes/templates'); var publicApisRoutes = require('./routes/publicApis'); var authedApisRoutes = require('./routes/authedApis'); var app = express(); var db = require('mongoskin').db('mongodb://192.168.0.56/TestDB', { native_parser: true }) // view engine setup app.set('views', path.join(__dirname, 'views')); app.set('view engine', 'jade'); //-------------------------------------------------------------------------------------------------------------- app.use(expressSession({ secret: 'mySecretKey' })); app.use(passport.initialize()); app.use(passport.session()); app.use(flash()); function compile(str, path) { return stylus(str) .set('filename', path) .use(nib()); } app.use(stylus.middleware({ src: __dirname + '/public/', compile: compile })); // uncomment after placing your favicon in /public //app.use(favicon(path.join(__dirname, 'public', 'favicon.ico'))); app.use(logger('dev')); app.use(bodyParser.json()); app.use(bodyParser.urlencoded({ extended: false })); app.use(cookieParser()); app.use(express.static(path.join(__dirname, 'public'))); app.use(express.static(path.join(__dirname, 'bower_components'))); var sqlconfig = { user: 'Nodejs', password: 'Nodejs', server: 'frosh', database: 'UM', } app.use(function (req, res, next) { req.db = db req.sqlconfig = sqlconfig; next(); }); passport.serializeUser(function (user, done) { done(null, user.UserId); }); passport.deserializeUser(function (id, done) { var sqlconnection = new sql.Connection(sqlconfig, function (err) { var request = new sql.Request(sqlconnection); request.query('SELECT TOP 1 * FROM dbo.Users WHERE UserId = ' + id, function (err, recordset) { if (recordset) return done(err, recordset[0]); }); }); }); passport.use('local', new LocalStrategy({ passReqToCallback: true }, function (req, username, password, done) { var sqlconnection = new sql.Connection(sqlconfig, function (err) { var request = new sql.Request(sqlconnection); request.query('SELECT TOP 1 * FROM dbo.Users WHERE UserId = ' + username, function (err, recordset) { if (recordset.length == 1) { var userRec = recordset[0]; var hashPass = md5(password); if (userRec.PasswordText.toLowerCase() == hashPass.toLowerCase()) { //success var request2 = new sql.Request(sqlconnection); request2.input('ID', sql.NVarChar(50), username) request2.execute('dbo.spTest', function (err, records) { req.session.user = recordset[0]; req.session.employee = records[0][0]; return done(null, recordset[0]); }) } else return done(null, false, req.flash('loginMessage', 'کلمه عبور صحیح نیست.')); } else return done(err, false, req.flash('loginMessage', 'کد پرسنلی صحیح نیست یا در مرکز سعیدآباد مشغول نیست.')); }); }); })); //-------------------------------------------------------------------------------------------------------------- app.use('/tmpl', templates); app.use('/papi', publicApisRoutes); app.use('/api', authedApisRoutes(passport)); app.all('/*', routes(passport)); passport.authenticate('local', function (req, res, next) { }) // catch 404 and forward to error handler app.use(function (req, res, next) { var err = new Error('Not Found'); err.status = 404; next(err); }); // error handlers // development error handler // will print stacktrace if (app.get('env') === 'development') { app.use(function (err, req, res, next) { res.status(err.status || 500); res.render('error', { message: err.message, error: err }); }); } // production error handler // no stacktraces leaked to user app.use(function (err, req, res, next) { res.status(err.status || 500); res.render('error', { message: err.message, error: {} }); }); module.exports = app;
Hamcker/PersonalExit
app.js
JavaScript
apache-2.0
4,622
/* * Copyright (C) 2016 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.googlecode.android_scripting.future; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; /** * FutureResult represents an eventual execution result for asynchronous operations. * * @author Damon Kohler (damonkohler@gmail.com) */ public class FutureResult<T> implements Future<T> { private final CountDownLatch mLatch = new CountDownLatch(1); private volatile T mResult = null; public void set(T result) { mResult = result; mLatch.countDown(); } @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; } @Override public T get() throws InterruptedException { mLatch.await(); return mResult; } @Override public T get(long timeout, TimeUnit unit) throws InterruptedException { mLatch.await(timeout, unit); return mResult; } @Override public boolean isCancelled() { return false; } @Override public boolean isDone() { return mResult != null; } }
kuri65536/sl4a
android/Utils/src/com/googlecode/android_scripting/future/FutureResult.java
Java
apache-2.0
1,701
package com.datalint.open.server.io; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; import com.datalint.open.shared.general.ILineReader; public class LineReader implements ILineReader { private final BufferedReader reader; public LineReader(Reader reader) { this.reader = new BufferedReader(reader); } public LineReader(InputStream inputStream) { this(new InputStreamReader(inputStream)); } @Override public String readLine() throws IOException { return reader.readLine(); } @Override public void close() throws IOException { reader.close(); } }
datalint/open
Open/src/main/java/com/datalint/open/server/io/LineReader.java
Java
apache-2.0
669
<?php declare(strict_types=1); /* * +----------------------------------------------------------------------+ * | ThinkSNS Plus | * +----------------------------------------------------------------------+ * | Copyright (c) 2016-Present ZhiYiChuangXiang Technology Co., Ltd. | * +----------------------------------------------------------------------+ * | This source file is subject to enterprise private license, that is | * | bundled with this package in the file LICENSE, and is available | * | through the world-wide-web at the following url: | * | https://github.com/slimkit/plus/blob/master/LICENSE | * +----------------------------------------------------------------------+ * | Author: Slim Kit Group <master@zhiyicx.com> | * | Homepage: www.thinksns.com | * +----------------------------------------------------------------------+ */ namespace Zhiyi\Plus\Http\Controllers\APIs\V2; use Illuminate\Http\Request; use Illuminate\Support\Arr; use Zhiyi\Plus\Models\WalletCharge as WalletChargeModel; use Zhiyi\Plus\Services\Wallet\Charge as ChargeService; use function Zhiyi\Plus\setting; class PingPlusPlusChargeWebHooks { public function webhook(Request $request, ChargeService $chargeService) { if ($request->json('type') !== 'charge.succeeded') { return response('不是支持的事件', 422); } $settings = setting('wallet', 'ping++', []); $signature = $request->headers->get('x-pingplusplus-signature'); $pingPlusPlusPublicCertificate = $settings['public_key'] ?? null; $signed = openssl_verify($request->getContent(), base64_decode($signature), $pingPlusPlusPublicCertificate, OPENSSL_ALGO_SHA256); if (! $signed) { return response('加密验证失败', 422); } $pingPlusPlusCharge = $request->json('data.object'); $charge = WalletChargeModel::find($chargeService->unformatChargeId($pingPlusPlusCharge['order_no'])); if (! $charge) { return response('凭据不存在', 404); } elseif ($charge->status === 1) { return response('订单已提前完成'); } $user = $charge->user; $charge->status = 1; $charge->transaction_no = $pingPlusPlusCharge['transaction_no']; $charge->account = $this->resolveChargeAccount($pingPlusPlusCharge, $charge->account); $user->getConnection()->transaction(function () use ($user, $charge) { $user->wallet()->increment('balance', $charge->amount); $charge->save(); }); return response('通知成功'); } /** * 解决付款订单来源. * * @param array $charge * @param string|null $default * @return string|null * @author Seven Du <shiweidu@outlook.com> */ protected function resolveChargeAccount($charge, $default = null) { $channel = Arr::get($charge, 'channel'); // 支付宝渠道 if (in_array($channel, ['alipay', 'alipay_wap', 'alipay_pc_direct', 'alipay_qr'])) { return Arr::get($charge, 'extra.buyer_account', $default); // 支付宝付款账号 // 微信渠道 } elseif (in_array($channel, ['wx', 'wx_pub', 'wx_pub_qr', 'wx_wap', 'wx_lite'])) { return Arr::get($charge, 'extra.open_id', $default); // 用户唯一 open_id } return $default; } }
slimkit/thinksns-plus
app/Http/Controllers/APIs/V2/PingPlusPlusChargeWebHooks.php
PHP
apache-2.0
3,568
<!DOCTYPE html> <html> <head> <title>Ferramenta X</title> </head> <body> <?php require("template.php"); ?> <div class="container" align="center"> <h2>Login</h2> <p>Realize o login para acessar a plataforma NERD POWER</p><br> <form name="form1" action="proc/proc_login.php" method="POST" id="IdForm1"> <div class="col-md-4 col-md-offset-4"> <label for="usr">E-mail:</label> <input type="text" class="form-control" id="IdEmail" name="NEmail" required placeholder="Digite seu email..." /> </div> <br><br><br><br> <div class="col-md-4 col-md-offset-4"> <label for="pwd">Senha:</label> <input type="password" class="form-control" id="IdSenha" name="NSenha" required placeholder="Sua senha..." /> </div> <br><br><br><br> <div class="col-md-4 col-md-offset-4"> <button type="submit" class="btn btn-primary" name="Enviar">Login</button> </div> </form> </div> <br /> </body> </html>
IsaacR2/FerramentaX
login.php
PHP
apache-2.0
957
// Copyright 2020 the Exposure Notifications Verification Server authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package issueapi import ( "context" "crypto" "fmt" "net/http" "strings" "time" "github.com/google/exposure-notifications-server/pkg/logging" enobs "github.com/google/exposure-notifications-server/pkg/observability" "github.com/google/exposure-notifications-verification-server/pkg/api" "github.com/google/exposure-notifications-verification-server/pkg/database" "github.com/google/exposure-notifications-verification-server/pkg/signatures" "github.com/google/exposure-notifications-verification-server/pkg/sms" ) // scrubbers is a list of known Twilio error messages that contain the send to phone number. var scrubbers = []struct { prefix string suffix string }{ { prefix: "phone number: ", suffix: ", ", }, { prefix: "'To' number ", suffix: " is not", }, } // ScrubPhoneNumbers checks for phone numbers in known Twilio error strings that contains // user phone numbers. func ScrubPhoneNumbers(s string) string { noScrubs := s for _, scrub := range scrubbers { pi := strings.Index(noScrubs, scrub.prefix) si := strings.Index(noScrubs, scrub.suffix) // if prefix is in the string and suffix is in the sting after the prefix if pi >= 0 && si > pi+len(scrub.prefix) { noScrubs = strings.Join([]string{ noScrubs[0 : pi+len(scrub.prefix)], noScrubs[si:], }, "REDACTED") } } return noScrubs } // SendSMS sends the sms mesage with the given provider and wraps any seen errors into the IssueResult func (c *Controller) SendSMS(ctx context.Context, realm *database.Realm, smsProvider sms.Provider, signer crypto.Signer, keyID string, request *api.IssueCodeRequest, result *IssueResult) { if request.Phone == "" { return } if err := c.doSend(ctx, realm, smsProvider, signer, keyID, request, result); err != nil { result.HTTPCode = http.StatusBadRequest if sms.IsSMSQueueFull(err) { result.ErrorReturn = api.Errorf("failed to send sms: queue is full: %s", err).WithCode(api.ErrSMSQueueFull) } else { result.ErrorReturn = api.Errorf("failed to send sms: %s", err).WithCode(api.ErrSMSFailure) } } } // BuildSMS builds and signs (if configured) the SMS message. It returns the // complete and compiled message. func (c *Controller) BuildSMS(ctx context.Context, realm *database.Realm, signer crypto.Signer, keyID string, request *api.IssueCodeRequest, vercode *database.VerificationCode) (string, error) { now := time.Now() logger := logging.FromContext(ctx).Named("issueapi.BuildSMS") redirectDomain := c.config.IssueConfig().ENExpressRedirectDomain message, err := realm.BuildSMSText(vercode.Code, vercode.LongCode, redirectDomain, request.SMSTemplateLabel) if err != nil { logger.Errorw("failed to build sms text for realm", "template", request.SMSTemplateLabel, "error", err) return "", fmt.Errorf("failed to build sms message: %w", err) } // A signer will only be provided if the realm has configured and enabled // SMS signing. if signer == nil { return message, nil } purpose := signatures.SMSPurposeENReport if request.TestType == api.TestTypeUserReport { purpose = signatures.SMSPurposeUserReport } message, err = signatures.SignSMS(signer, keyID, now, purpose, request.Phone, message) if err != nil { logger.Errorw("failed to sign sms", "error", err) if c.config.GetAuthenticatedSMSFailClosed() { return "", fmt.Errorf("failed to sign sms: %w", err) } } return message, nil } func (c *Controller) doSend(ctx context.Context, realm *database.Realm, smsProvider sms.Provider, signer crypto.Signer, keyID string, request *api.IssueCodeRequest, result *IssueResult) error { defer enobs.RecordLatency(ctx, time.Now(), mSMSLatencyMs, &result.obsResult) logger := logging.FromContext(ctx).Named("issueapi.sendSMS") // Build the message message, err := c.BuildSMS(ctx, realm, signer, keyID, request, result.VerCode) if err != nil { logger.Errorw("failed to build sms", "error", err) result.obsResult = enobs.ResultError("FAILED_TO_BUILD_SMS") return err } // Send the message if err := smsProvider.SendSMS(ctx, request.Phone, message); err != nil { // Delete the user report record. if result.VerCode.UserReportID != nil { // No audit record since this is a recall of an action that can't happen inside the transaction. if err := c.db.DeleteUserReport(request.Phone, database.NullActor); err != nil { logger.Errorw("failed to delete the user report record", "error", err) } } // Delete the verification code. if err := realm.DeleteVerificationCode(c.db, result.VerCode.ID); err != nil { logger.Errorw("failed to delete verification code", "error", err) // fallthrough to the error } logger.Infow("failed to send sms", "error", ScrubPhoneNumbers(err.Error())) result.obsResult = enobs.ResultError("FAILED_TO_SEND_SMS") return err } return nil }
google/exposure-notifications-verification-server
pkg/controller/issueapi/send_sms.go
GO
apache-2.0
5,453
/*! * UI development toolkit for HTML5 (OpenUI5) * (c) Copyright 2009-2016 SAP SE or an SAP affiliate company. * Licensed under the Apache License, Version 2.0 - see LICENSE.txt. */ // Provides control sap.m.InputListItem. sap.ui.define(['jquery.sap.global', './ListItemBase', './library'], function(jQuery, ListItemBase, library) { "use strict"; /** * Constructor for a new InputListItem. * * @param {string} [sId] id for the new control, generated automatically if no id is given * @param {object} [mSettings] initial settings for the new control * * @class * List item should be used for a label and an input field. * @extends sap.m.ListItemBase * * @author SAP SE * @version 1.38.7 * * @constructor * @public * @alias sap.m.InputListItem * @ui5-metamodel This control/element also will be described in the UI5 (legacy) designtime metamodel */ var InputListItem = ListItemBase.extend("sap.m.InputListItem", /** @lends sap.m.InputListItem.prototype */ { metadata : { library : "sap.m", properties : { /** * Label of the list item */ label : {type : "string", group : "Misc", defaultValue : null}, /** * This property specifies the label text directionality with enumerated options. By default, the label inherits text direction from the DOM. * @since 1.30.0 */ labelTextDirection : {type : "sap.ui.core.TextDirection", group : "Appearance", defaultValue : sap.ui.core.TextDirection.Inherit} }, defaultAggregation : "content", aggregations : { /** * Content controls can be added */ content : {type : "sap.ui.core.Control", multiple : true, singularName : "content", bindable : "bindable"} }, designtime : true }}); return InputListItem; }, /* bExport= */ true);
SuicidePreventionSquad/SeniorProject
resources/sap/m/InputListItem-dbg.js
JavaScript
apache-2.0
1,780
/** * Created by Janeluo on 2016/8/13 0013. */ package com.janeluo.jfinalplus;
yangyining/JFinal-plus
src/main/java/com/janeluo/jfinalplus/package-info.java
Java
apache-2.0
80
package com.likya.tlossw.utils; import java.util.ArrayList; import java.util.Calendar; import java.util.HashMap; import org.apache.commons.collections.iterators.ArrayIterator; import org.apache.log4j.Logger; import com.likya.tlos.model.xmlbeans.data.JobListDocument.JobList; import com.likya.tlos.model.xmlbeans.data.JobPropertiesDocument.JobProperties; import com.likya.tlos.model.xmlbeans.data.ScenarioDocument.Scenario; import com.likya.tlos.model.xmlbeans.data.TlosProcessDataDocument.TlosProcessData; import com.likya.tlos.model.xmlbeans.state.LiveStateInfoDocument.LiveStateInfo; import com.likya.tlos.model.xmlbeans.state.StateNameDocument.StateName; import com.likya.tlos.model.xmlbeans.state.StatusNameDocument.StatusName; import com.likya.tlos.model.xmlbeans.state.SubstateNameDocument.SubstateName; import com.likya.tlossw.TlosSpaceWide; import com.likya.tlossw.core.cpc.CpcBase; import com.likya.tlossw.core.cpc.model.SpcInfoType; import com.likya.tlossw.core.spc.Spc; import com.likya.tlossw.core.spc.helpers.JobQueueOperations; import com.likya.tlossw.core.spc.model.JobRuntimeProperties; import com.likya.tlossw.exceptions.TlosFatalException; import com.likya.tlossw.model.SpcLookupTable; import com.likya.tlossw.model.engine.EngineeConstants; import com.likya.tlossw.model.path.BasePathType; import com.likya.tlossw.model.path.TlosSWPathType; import com.likya.tlossw.utils.validation.XMLValidations; public class CpcUtils { public static Scenario getScenario(TlosProcessData tlosProcessData) { Scenario scenario = Scenario.Factory.newInstance(); scenario.setJobList(tlosProcessData.getJobList()); scenario.setScenarioArray(tlosProcessData.getScenarioArray()); scenario.setBaseScenarioInfos(tlosProcessData.getBaseScenarioInfos()); scenario.setDependencyList(tlosProcessData.getDependencyList()); scenario.setScenarioStatusList(tlosProcessData.getScenarioStatusList()); scenario.setAlarmPreference(tlosProcessData.getAlarmPreference()); scenario.setManagement(tlosProcessData.getManagement()); scenario.setAdvancedScenarioInfos(tlosProcessData.getAdvancedScenarioInfos()); scenario.setLocalParameters(tlosProcessData.getLocalParameters()); return scenario; } public static Scenario getScenario(TlosProcessData tlosProcessData, String runId) { Scenario scenario = CpcUtils.getScenario(tlosProcessData); scenario.getManagement().getConcurrencyManagement().setRunningId(runId); return scenario; } // public static Scenario getScenarioOrj(TlosProcessData tlosProcessData, String planId) { // // Scenario scenario = Scenario.Factory.newInstance(); // scenario.setJobList(tlosProcessData.getJobList()); // // scenario.setScenarioArray(tlosProcessData.getScenarioArray()); // // tlosProcessData.getConcurrencyManagement().setPlanId(planId); // // scenario.setBaseScenarioInfos(tlosProcessData.getBaseScenarioInfos()); // scenario.setDependencyList(tlosProcessData.getDependencyList()); // scenario.setScenarioStatusList(tlosProcessData.getScenarioStatusList()); // scenario.setAlarmPreference(tlosProcessData.getAlarmPreference()); // scenario.setTimeManagement(tlosProcessData.getTimeManagement()); // scenario.setAdvancedScenarioInfos(tlosProcessData.getAdvancedScenarioInfos()); // scenario.setConcurrencyManagement(tlosProcessData.getConcurrencyManagement()); // scenario.setLocalParameters(tlosProcessData.getLocalParameters()); // // return scenario; // } public static Scenario getScenario(Spc spc) { Scenario scenario = Scenario.Factory.newInstance(); scenario.setBaseScenarioInfos(spc.getBaseScenarioInfos()); scenario.setDependencyList(spc.getDependencyList()); scenario.setScenarioStatusList(spc.getScenarioStatusList()); scenario.setAlarmPreference(spc.getAlarmPreference()); scenario.setManagement(spc.getManagement()); scenario.setAdvancedScenarioInfos(spc.getAdvancedScenarioInfos()); scenario.setLocalParameters(spc.getLocalParameters()); return scenario; } public static Scenario getScenario(Scenario tmpScenario) { Scenario scenario = Scenario.Factory.newInstance(); scenario.setBaseScenarioInfos(tmpScenario.getBaseScenarioInfos()); scenario.setDependencyList(tmpScenario.getDependencyList()); scenario.setScenarioStatusList(tmpScenario.getScenarioStatusList()); scenario.setAlarmPreference(tmpScenario.getAlarmPreference()); scenario.setManagement(tmpScenario.getManagement()); scenario.setAdvancedScenarioInfos(tmpScenario.getAdvancedScenarioInfos()); scenario.setLocalParameters(tmpScenario.getLocalParameters()); return scenario; } public static SpcInfoType getSpcInfo(Spc spc, String userId, String runId, Scenario tmpScenario) { LiveStateInfo myLiveStateInfo = LiveStateInfo.Factory.newInstance(); myLiveStateInfo.setStateName(StateName.PENDING); myLiveStateInfo.setSubstateName(SubstateName.IDLED); myLiveStateInfo.setStatusName(StatusName.BYTIME); spc.setLiveStateInfo(myLiveStateInfo); Thread thread = new Thread(spc); spc.setExecuterThread(thread); spc.setJsName(tmpScenario.getBaseScenarioInfos().getJsName()); spc.setConcurrent(tmpScenario.getManagement().getConcurrencyManagement().getConcurrent()); spc.setComment(tmpScenario.getBaseScenarioInfos().getComment()); spc.setUserId(userId); tmpScenario.getManagement().getConcurrencyManagement().setRunningId(runId); spc.setBaseScenarioInfos(tmpScenario.getBaseScenarioInfos()); spc.setDependencyList(tmpScenario.getDependencyList()); spc.setScenarioStatusList(tmpScenario.getScenarioStatusList()); spc.setAlarmPreference(tmpScenario.getAlarmPreference()); spc.setManagement(tmpScenario.getManagement()); spc.setAdvancedScenarioInfos(tmpScenario.getAdvancedScenarioInfos()); spc.setLocalParameters(tmpScenario.getLocalParameters()); SpcInfoType spcInfoType = new SpcInfoType(); spcInfoType.setJsName(spc.getBaseScenarioInfos().getJsName()); spcInfoType.setConcurrent(spc.getManagement().getConcurrencyManagement().getConcurrent()); spcInfoType.setComment(spc.getBaseScenarioInfos().getComment()); spcInfoType.setUserId(userId); Scenario scenario = CpcUtils.getScenario(spc); spcInfoType.setScenario(scenario); spcInfoType.setSpcReferance(spc); return spcInfoType; } public static SpcInfoType getSpcInfo(String userId, String runId, Scenario tmpScenario) { SpcInfoType spcInfoType = new SpcInfoType(); spcInfoType.setJsName(tmpScenario.getBaseScenarioInfos().getJsName()); spcInfoType.setConcurrent(tmpScenario.getManagement().getConcurrencyManagement().getConcurrent()); spcInfoType.setComment(tmpScenario.getBaseScenarioInfos().getComment()); spcInfoType.setUserId(userId); Scenario scenario = CpcUtils.getScenario(tmpScenario); spcInfoType.setScenario(scenario); spcInfoType.setSpcReferance(null); return spcInfoType; } public static ArrayList<JobRuntimeProperties> transformJobList(JobList jobList, Logger myLogger) { myLogger.debug("start:transformJobList"); ArrayList<JobRuntimeProperties> transformTable = new ArrayList<JobRuntimeProperties>(); ArrayIterator jobListIterator = new ArrayIterator(jobList.getJobPropertiesArray()); while (jobListIterator.hasNext()) { JobProperties jobProperties = (JobProperties) (jobListIterator.next()); JobRuntimeProperties jobRuntimeProperties = new JobRuntimeProperties(); /* IDLED state i ekle */ LiveStateInfoUtils.insertNewLiveStateInfo(jobProperties, StateName.INT_PENDING, SubstateName.INT_IDLED, StatusName.INT_BYTIME); jobRuntimeProperties.setJobProperties(jobProperties); // TODO infoBusInfo Manager i bilgilendir. transformTable.add(jobRuntimeProperties); } myLogger.debug("end:transformJobList"); return transformTable; } public static boolean validateJobList(JobList jobList, Logger myLogger) { XMLValidations.validateWithCode(jobList, myLogger); return true; } public static SpcInfoType prepareScenario(String runId, TlosSWPathType tlosSWPathType, Scenario myScenario, Logger myLogger) throws TlosFatalException { myLogger.info(""); myLogger.info(" > Senaryo ismi : " + tlosSWPathType.getFullPath()); JobList jobList = myScenario.getJobList(); if (!validateJobList(jobList, myLogger)) { // TODO WAITING e nasil alacagiz? myLogger.info(" > is listesi validasyonunda problem oldugundan WAITING e alinarak problemin giderilmesi beklenmektedir."); myLogger.error("Cpc Scenario jobs validation failed, process state changed to WAITING !"); return null; // 08.07.2013 Serkan // throw new TlosException("Cpc Job List validation failed, process state changed to WAITING !"); } if (jobList.getJobPropertiesArray().length == 0 && myScenario.getScenarioArray().length == 0) { myLogger.error(tlosSWPathType.getFullPath() + " isimli senaryo bilgileri yüklenemedi ya da iş listesi bos geldi !"); myLogger.error(tlosSWPathType.getFullPath() + " isimli senaryo için spc başlatılmıyor !"); return null; } SpcInfoType spcInfoType = null; // TODO Henüz ayarlanmadı ! String userId = null; if (jobList.getJobPropertiesArray().length == 0) { spcInfoType = CpcUtils.getSpcInfo(userId, runId, myScenario); spcInfoType.setSpcId(tlosSWPathType); } else { Spc spc = new Spc(tlosSWPathType.getRunId(), tlosSWPathType.getAbsolutePath(), TlosSpaceWide.getSpaceWideRegistry(), transformJobList(jobList, myLogger)); spcInfoType = CpcUtils.getSpcInfo(spc, userId, runId, myScenario); spcInfoType.setSpcId(tlosSWPathType); if (!TlosSpaceWide.getSpaceWideRegistry().getServerConfig().getServerParams().getIsPersistent().getUse() || !JobQueueOperations.recoverJobQueue(spcInfoType.getSpcReferance().getSpcAbsolutePath(), spc.getJobQueue(), spc.getJobQueueIndex())) { if (!spc.initScenarioInfo()) { myLogger.warn(tlosSWPathType.getFullPath() + " isimli senaryo bilgileri yüklenemedi ya da iş listesi boş geldi !"); Logger.getLogger(CpcBase.class).warn(" WARNING : " + tlosSWPathType.getFullPath() + " isimli senaryo bilgileri yüklenemedi ya da iş listesi boş geldi !"); System.exit(-1); } } } return spcInfoType; } public static String getRunId(TlosProcessData tlosProcessData, boolean isTest, Logger myLogger) { String runId = null; if (isTest) { String userId = "" + tlosProcessData.getBaseScenarioInfos().getUserId(); if (userId == null || userId.equals("")) { userId = "" + Calendar.getInstance().getTimeInMillis(); } myLogger.info(" > InstanceID = " + userId + " olarak belirlenmistir."); runId = userId; } else { runId = tlosProcessData.getRunId(); if (runId == null) { runId = "" + Calendar.getInstance().getTimeInMillis(); } myLogger.info(" > InstanceID = " + runId + " olarak belirlenmiştir."); } return runId; } public static void startSpc(TlosSWPathType tlosSWPathType, Logger myLogger) { SpcLookupTable spcLookupTable = TlosSpaceWide.getSpaceWideRegistry().getRunLookupTable().get(tlosSWPathType.getRunId()).getSpcLookupTable(); HashMap<String, SpcInfoType> table = spcLookupTable.getTable(); SpcInfoType spcInfoType = table.get(tlosSWPathType.getFullPath()); startSpc(spcInfoType, myLogger); } public static void startSpc(SpcInfoType spcInfoType, Logger myLogger) { /** * Bu thread daha once calistirildi mi? Degilse thread i * baslatabiliriz !! **/ Spc mySpc = spcInfoType.getSpcReferance(); if (spcInfoType.isVirgin() && !mySpc.getExecuterThread().isAlive()) { spcInfoType.setVirgin(false); /* Artik baslattik */ /** Statuleri set edelim **/ mySpc.getLiveStateInfo().setStateName(StateName.RUNNING); mySpc.getLiveStateInfo().setSubstateName(SubstateName.STAGE_IN); myLogger.info(" > Senaryo " + mySpc.getSpcFullPath() + " aktive edildi !"); mySpc.getExecuterThread().setName(mySpc.getCommonName()); /** Senaryonun thread lerle calistirildigi yer !! **/ mySpc.getExecuterThread().start(); myLogger.info(" > OK"); } } public static String getRootScenarioPath(String runId) { return getInstancePath(runId) + "." + EngineeConstants.LONELY_JOBS; } public static String getInstancePath(String runId) { return BasePathType.getRootPath() + "." + runId; } }
likyateknoloji/TlosSWGroup
TlosSW_V1.0/src/com/likya/tlossw/utils/CpcUtils.java
Java
apache-2.0
12,211
package org.ovirt.engine.core.common.businessentities; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import javax.validation.constraints.Max; import javax.validation.constraints.Min; import javax.validation.constraints.Size; import org.ovirt.engine.core.compat.Guid; import org.ovirt.engine.core.compat.INotifyPropertyChanged; /** * Quota business entity which reflects the <code>Quota</code> limitations for storage pool. <BR/> * The limitation are separated to two different types * <ul> * <li>General Limitation - Indicates the general limitation of the quota cross all the storage pool</li> * <li>Specific Limitation - Indicates specific limitation of the quota for specific storage or vds group</li> * </ul> * <BR/> * Quota entity encapsulate the specific limitations of the storage pool with lists, general limitations are configured * in the field members.<BR/> * <BR/> * Take in notice there can not be general limitation and specific limitation on the same resource type. */ public class Quota extends IVdcQueryable implements INotifyPropertyChanged, Serializable, QuotaVdsGroupProperties, QuotaStorageProperties { /** * Automatic generated serial version ID. */ private static final long serialVersionUID = 6637198348072059199L; /** * The quota id. */ private Guid id = new Guid(); /** * The storage pool id the quota is enforced on. */ private Guid storagePoolId; /** * The storage pool name the quota is enforced on, for GUI use. */ private transient String storagePoolName; /** * The quota name. */ @Size(min = 1, max = BusinessEntitiesDefinitions.QUOTA_NAME_SIZE) private String quotaName; /** * The quota description. */ @Size(min = 1, max = BusinessEntitiesDefinitions.QUOTA_DESCRIPTION_SIZE) private String description; /** * The threshold of vds group in percentages. */ @Min(0) @Max(100) private int thresholdVdsGroupPercentage; /** * The threshold of storage in percentages. */ @Min(0) @Max(100) private int thresholdStoragePercentage; /** * The grace of vds group in percentages. */ @Min(0) @Max(100) private int graceVdsGroupPercentage; /** * The grace of storage in percentages. */ @Min(0) @Max(100) private int graceStoragePercentage; /** * The global storage limit in Giga bytes. */ @Min(-1) private Long storageSizeGB; /** * The global storage usage in Giga bytes for Quota. */ private transient Double storageSizeGBUsage; /** * The global virtual CPU limitations. */ @Min(-1) private Integer virtualCpu; /** * The global virtual CPU usage for Quota. */ private transient Integer virtualCpuUsage; /** * The global virtual memory limitations for Quota. */ @Min(-1) private Long memSizeMB; /** * The global virtual memory usage for Quota. */ private transient Long memSizeMBUsage; /** * List of all the specific VdsGroups limitations. */ private List<QuotaVdsGroup> quotaVdsGroupList; /** * List of all the specific storage limitations. */ private List<QuotaStorage> quotaStorageList; /** * Default constructor of Quota, which initialize empty lists for specific limitations, and no user assigned. */ public Quota() { setQuotaStorages(new ArrayList<QuotaStorage>()); setQuotaVdsGroups(new ArrayList<QuotaVdsGroup>()); } /** * @return the quota id. */ public Guid getId() { return id; } /** * @param id * the quota Id to set. */ public void setId(Guid id) { this.id = id; } /** * @return the thresholdVdsGroupPercentage */ public int getThresholdVdsGroupPercentage() { return thresholdVdsGroupPercentage; } /** * @param thresholdVdsGroupPercentage * the thresholdVdsGroupPercentage to set */ public void setThresholdVdsGroupPercentage(int thresholdVdsGroupPercentage) { this.thresholdVdsGroupPercentage = thresholdVdsGroupPercentage; } /** * @return the thresholdStoragePercentage */ public int getThresholdStoragePercentage() { return thresholdStoragePercentage; } /** * @param thresholdStoragePercentage * the thresholdStoragePercentage to set */ public void setThresholdStoragePercentage(int thresholdStoragePercentage) { this.thresholdStoragePercentage = thresholdStoragePercentage; } /** * @return the graceVdsGroupPercentage */ public int getGraceVdsGroupPercentage() { return graceVdsGroupPercentage; } /** * @param graceVdsGroupPercentage * the graceVdsGroupPercentage to set */ public void setGraceVdsGroupPercentage(int graceVdsGroupPercentage) { this.graceVdsGroupPercentage = graceVdsGroupPercentage; } /** * @return the graceStoragePercentage */ public int getGraceStoragePercentage() { return graceStoragePercentage; } /** * @param graceStoragePercentage * the graceStoragePercentage to set */ public void setGraceStoragePercentage(int graceStoragePercentage) { this.graceStoragePercentage = graceStoragePercentage; } /** * @return the description */ public String getDescription() { return description; } /** * @param description * the description to set */ public void setDescription(String description) { this.description = description; } /** * @return the quotaName */ public String getQuotaName() { return quotaName; } /** * @param quotaName * the quotaName to set */ public void setQuotaName(String quotaName) { this.quotaName = quotaName; } /** * @return the storagePoolId */ public Guid getStoragePoolId() { return storagePoolId; } /** * @param storagePoolId * the storagePoolId to set */ public void setStoragePoolId(Guid storagePoolId) { this.storagePoolId = storagePoolId; } /** * @return the storagePoolName */ public String getStoragePoolName() { return storagePoolName; } /** * @param storagePoolName * the storagePoolName to set */ public void setStoragePoolName(String storagePoolName) { this.storagePoolName = storagePoolName; } /** * @return the quotaStorageList */ public List<QuotaStorage> getQuotaStorages() { return quotaStorageList; } /** * @param quotaStorages * the quotaStorages to set */ public void setQuotaStorages(List<QuotaStorage> quotaStorages) { this.quotaStorageList = quotaStorages; } /** * @return the quotaVdsGroups */ public List<QuotaVdsGroup> getQuotaVdsGroups() { return quotaVdsGroupList; } /** * @param quotaVdsGroups * the quotaVdsGroups to set */ public void setQuotaVdsGroups(List<QuotaVdsGroup> quotaVdsGroups) { this.quotaVdsGroupList = quotaVdsGroups; } /** * @return the memSizeMBUsage */ public Long getMemSizeMBUsage() { return memSizeMBUsage; } /** * @param memSizeMBUsage * the memSizeMBUsage to set */ public void setMemSizeMBUsage(Long memSizeMBUsage) { this.memSizeMBUsage = memSizeMBUsage; } /** * @return the memSizeMB */ public Long getMemSizeMB() { return memSizeMB; } /** * @param memSizeMB * the memSizeMB to set */ public void setMemSizeMB(Long memSizeMB) { this.memSizeMB = memSizeMB; } /** * @return the virtualCpuUsage */ public Integer getVirtualCpuUsage() { return virtualCpuUsage; } /** * @param virtualCpuUsage * the virtualCpuUsage to set */ public void setVirtualCpuUsage(Integer virtualCpuUsage) { this.virtualCpuUsage = virtualCpuUsage; } /** * @return the virtualCpu */ public Integer getVirtualCpu() { return virtualCpu; } /** * @param virtualCpu * the virtualCpu to set */ public void setVirtualCpu(Integer virtualCpu) { this.virtualCpu = virtualCpu; } /** * @return the storageSizeGBUsage */ public Double getStorageSizeGBUsage() { return storageSizeGBUsage; } /** * @param storageSizeGBUsage * the storageSizeGBUsage to set */ public void setStorageSizeGBUsage(Double storageSizeGBUsage) { this.storageSizeGBUsage = storageSizeGBUsage; } /** * @return the storageSizeGB */ public Long getStorageSizeGB() { return storageSizeGB; } /** * @param storageSizeGB * the storageSizeGB to set */ public void setStorageSizeGB(Long storageSizeGB) { this.storageSizeGB = storageSizeGB; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((description == null) ? 0 : description.hashCode()); result = prime * result + graceStoragePercentage; result = prime * result + graceVdsGroupPercentage; result = prime * result + ((id == null) ? 0 : id.hashCode()); result = prime * result + ((quotaName == null) ? 0 : quotaName.hashCode()); result = prime * result + ((quotaStorageList == null) ? 0 : quotaStorageList.hashCode()); result = prime * result + ((quotaVdsGroupList == null) ? 0 : quotaVdsGroupList.hashCode()); result = prime * result + ((storageSizeGB == null) ? 0 : storageSizeGB.hashCode()); result = prime * result + ((storagePoolId == null) ? 0 : storagePoolId.hashCode()); result = prime * result + thresholdStoragePercentage; result = prime * result + thresholdVdsGroupPercentage; result = prime * result + ((virtualCpu == null) ? 0 : virtualCpu.hashCode()); result = prime * result + ((memSizeMB == null) ? 0 : memSizeMB.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; Quota other = (Quota) obj; if (description == null) { if (other.description != null) return false; } else if (!description.equals(other.description)) return false; if (graceStoragePercentage != other.graceStoragePercentage) return false; if (graceVdsGroupPercentage != other.graceVdsGroupPercentage) return false; if (id == null) { if (other.id != null) return false; } else if (!id.equals(other.id)) return false; if (quotaName == null) { if (other.quotaName != null) return false; } else if (!quotaName.equals(other.quotaName)) return false; if (quotaStorageList == null) { if (other.quotaStorageList != null) return false; } else if (!quotaStorageList.equals(other.quotaStorageList)) return false; if (quotaVdsGroupList == null) { if (other.quotaVdsGroupList != null) return false; } else if (!quotaVdsGroupList.equals(other.quotaVdsGroupList)) return false; if (storageSizeGB == null) { if (other.storageSizeGB != null) return false; } else if (!storageSizeGB.equals(other.storageSizeGB)) return false; if (storagePoolId == null) { if (other.storagePoolId != null) return false; } else if (!storagePoolId.equals(other.storagePoolId)) return false; if (thresholdStoragePercentage != other.thresholdStoragePercentage) return false; if (thresholdVdsGroupPercentage != other.thresholdVdsGroupPercentage) return false; if (virtualCpu == null) { if (other.virtualCpu != null) return false; } else if (!virtualCpu.equals(other.virtualCpu)) return false; if (memSizeMB == null) { if (other.memSizeMB != null) return false; } else if (!memSizeMB.equals(other.memSizeMB)) return false; return true; } }
raksha-rao/gluster-ovirt
backend/manager/modules/common/src/main/java/org/ovirt/engine/core/common/businessentities/Quota.java
Java
apache-2.0
12,992
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.pulsar.broker.resources; import com.fasterxml.jackson.core.type.TypeReference; import java.util.Map; import java.util.Optional; import lombok.Getter; import org.apache.pulsar.common.partition.PartitionedTopicMetadata; import org.apache.pulsar.common.policies.data.NamespaceIsolationData; import org.apache.pulsar.common.policies.data.Policies; import org.apache.pulsar.common.policies.impl.NamespaceIsolationPolicies; import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; @Getter public class NamespaceResources extends BaseResources<Policies> { private IsolationPolicyResources isolationPolicies; private PartitionedTopicResources partitionedTopicResources; private MetadataStoreExtended configurationStore; public NamespaceResources(MetadataStoreExtended configurationStore, int operationTimeoutSec) { super(configurationStore, Policies.class, operationTimeoutSec); this.configurationStore = configurationStore; isolationPolicies = new IsolationPolicyResources(configurationStore, operationTimeoutSec); partitionedTopicResources = new PartitionedTopicResources(configurationStore, operationTimeoutSec); } public static class IsolationPolicyResources extends BaseResources<Map<String, NamespaceIsolationData>> { public IsolationPolicyResources(MetadataStoreExtended store, int operationTimeoutSec) { super(store, new TypeReference<Map<String, NamespaceIsolationData>>() { }, operationTimeoutSec); } public Optional<NamespaceIsolationPolicies> getPolicies(String path) throws MetadataStoreException { Optional<Map<String, NamespaceIsolationData>> data = super.get(path); return data.isPresent() ? Optional.of(new NamespaceIsolationPolicies(data.get())) : Optional.empty(); } } public static class PartitionedTopicResources extends BaseResources<PartitionedTopicMetadata> { public PartitionedTopicResources(MetadataStoreExtended configurationStore, int operationTimeoutSec) { super(configurationStore, PartitionedTopicMetadata.class, operationTimeoutSec); } } }
yahoo/pulsar
pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/NamespaceResources.java
Java
apache-2.0
3,049
/* * Created on Nov 29, 2007 * * Copyright (c), 2007 Don Branson. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.moneybender.proxy.channels.decorators; import com.moneybender.proxy.channels.IReadBytes; public class ThrottleDecorator extends ReadDelayDecorator { private final int microsecondsPerByte; public ThrottleDecorator(IReadBytes decoratedWriter, int bandwidthLimit) { super(decoratedWriter); double bytesPerSecond = bandwidthLimit * 1000; this.microsecondsPerByte = (int)((1 / bytesPerSecond) * 1000 * 1000); saveEarliestNextIO(0); } @Override protected void saveEarliestNextIO(int bytesRead) { setEarliestNextIO(System.currentTimeMillis() + ((bytesRead * microsecondsPerByte) / 1000)); } }
DonBranson/DonsProxy
src/main/com/moneybender/proxy/channels/decorators/ThrottleDecorator.java
Java
apache-2.0
1,299
<?php echo e(Form::open(['url' => route('lotes.store'), 'class'=>'form-horizontal', 'role'=>"form"])); ?> <div class="modal-header"> <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button> <h4 class="modal-title">Criar Lote</h4> </div> <div class="modal-body"> <div class="form-group"> <label for="inputDescricao" class="col-sm-2 control-label">Nome:</label> <div class="col-sm-10"> <input type="text" name="descricao" id="inputDescricao" class="form-control" value="" required="required" tabindex="1" autofocus> </div> </div> <div class="form-group"> <label for="inputDataPCP" class="col-sm-2 control-label">PCP:</label> <div class="col-sm-4"> <input type="date" name="dataprev_pcp" id="inputDataPCP" class="form-control" value="<?php echo e(date('Y-m-d')); ?>" tabindex="2" required="required"> </div> <label for="inputDataPintura" class="col-sm-2 control-label">Pintura:</label> <div class="col-sm-4"> <input type="date" name="dataprev_pintura" id="inputDataPintura" class="form-control" value="" tabindex="6"> </div> </div> <div class="form-group"> <label for="inputDataPreparacao" class="col-sm-2 control-label">Preparacao:</label> <div class="col-sm-4"> <input type="date" name="dataprev_preparacao" id="inputDataPreparacao" class="form-control" value="" tabindex="3"> </div> <label for="inputDataExpedicao" class="col-sm-2 control-label">Expedição:</label> <div class="col-sm-4"> <input type="date" name="dataprev_expedicao" id="inputDataExpedicao" class="form-control" value="" tabindex="7"> </div> </div> <div class="form-group"> <label for="inputDataGabarito" class="col-sm-2 control-label">Gabarito:</label> <div class="col-sm-4"> <input type="date" name="dataprev_gabarito" id="inputDataGabarito" class="form-control" value="" tabindex="4"> </div> <label for="inputDataMontagem" class="col-sm-2 control-label">Montagem:</label> <div class="col-sm-4"> <input type="date" name="dataprev_montagem" id="inputDataMontagem" class="form-control" value="" tabindex="8"> </div> </div> <div class="form-group"> <label for="inputDataSolda" class="col-sm-2 control-label">Solda:</label> <div class="col-sm-4"> <input type="date" name="dataprev_solda" id="inputDataSolda" class="form-control" value="" tabindex="5"> </div> <label for="inputDataEntregaFinal" class="col-sm-2 control-label">Entrega Final:</label> <div class="col-sm-4"> <input type="date" name="dataprev_entrega" id="inputDataEntregaFinal" class="form-control" value="null" tabindex="9"> </div> </div> <!-- HIDDEN IDs --> <input type="hidden" name="obra_id" value="<?php echo e($obra_id); ?>"> <input type="hidden" name="etapa_id" value="<?php echo e($etapa_id); ?>"> <input type="hidden" name="grouped" value="<?php echo e($grouped); ?>"> <?php foreach($handles_ids as $handle_id): ?> <input type="hidden" name="handles_ids[]" id="inputHandleIds" class="form-control" value="<?php echo e($handle_id); ?>"> <?php endforeach; ?> </div> <div class="modal-footer"> <button type="button" class="btn btn-default" data-dismiss="modal">Cancelar</button> <button type="submit" class="btn btn-primary">Salvar</button> </div> <?php echo e(Form::close()); ?>
System3D/gestor-de-lotes
storage/framework/views/96fd683b3799ea6bb61223346be7d37255bde268.php
PHP
apache-2.0
3,354
// Copyright 2010-2014, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package org.mozc.android.inputmethod.japanese.testing; import org.mozc.android.inputmethod.japanese.protobuf.ProtoCandidates.CandidateWord; import org.mozc.android.inputmethod.japanese.ui.CandidateLayout; import org.mozc.android.inputmethod.japanese.ui.CandidateLayout.Row; import org.mozc.android.inputmethod.japanese.ui.CandidateLayout.Span; import com.google.common.base.Optional; import org.easymock.EasyMockSupport; import org.easymock.IMockBuilder; import java.util.Collections; import java.util.List; /** * Methods commonly used for MechaMozc's testing. * */ public class MozcLayoutUtil { // Disallow instantiation. private MozcLayoutUtil() { } public static Row createRow(int top, int height, int width, Span... spans) { Row result = new Row(); result.setTop(top); result.setHeight(height); result.setWidth(width); for (Span span : spans) { result.addSpan(span); } return result; } public static Span createSpan(int id, int left, int right) { Span span = new Span( Optional.of(CandidateWord.newBuilder().setId(id).build()), 0, 0, Collections.<String>emptyList()); span.setLeft(left); span.setRight(right); return span; } public static Span createEmptySpan() { return new Span( Optional.of(CandidateWord.getDefaultInstance()), 0, 0, Collections.<String>emptyList()); } public static CandidateLayout createCandidateLayoutMock(EasyMockSupport easyMockSupport) { return createCandidateLayoutMockBuilder(easyMockSupport).createMock(); } public static CandidateLayout createNiceCandidateLayoutMock(EasyMockSupport easyMockSupport) { return createCandidateLayoutMockBuilder(easyMockSupport).createNiceMock(); } public static IMockBuilder<CandidateLayout> createCandidateLayoutMockBuilder( EasyMockSupport easyMockSupport) { return easyMockSupport.createMockBuilder(CandidateLayout.class) .withConstructor(List.class, float.class, float.class) .withArgs(Collections.emptyList(), 0f, 0f); } }
kishikawakatsumi/Mozc-for-iOS
src/android/tests/src/com/google/android/inputmethod/japanese/testing/MozcLayoutUtil.java
Java
apache-2.0
3,609
/* * Copyright 2021 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.thoughtworks.go.spark; import org.apache.commons.io.IOUtils; import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; import static java.lang.String.valueOf; public abstract class HtmlErrorPage { public static String errorPage(int code, String message) { return Holder.INSTANCE.replaceAll(buildRegex("status_code"), valueOf(code)) .replaceAll(buildRegex("error_message"), message); } private static String buildRegex(final String value) { return "\\{\\{" + value + "\\}\\}"; } private static class Holder { private static final String INSTANCE = fileContents(); private static String fileContents() { try (InputStream in = Holder.class.getResourceAsStream("/error.html")) { return IOUtils.toString(in, StandardCharsets.UTF_8); } catch (IOException e) { throw new RuntimeException(e); } } } }
marques-work/gocd
spark/spark-base/src/main/java/com/thoughtworks/go/spark/HtmlErrorPage.java
Java
apache-2.0
1,606
<?php /** * THE CODE IN THIS FILE WAS GENERATED FROM THE EBAY WSDL USING THE PROJECT: * * https://github.com/davidtsadler/ebay-api-sdk-php * * Copyright 2014 David T. Sadler * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ namespace DTS\eBaySDK\Trading\Types; /** * * @property boolean $BestOfferEnabled * @property boolean $AutoPayEnabled * @property boolean $B2BVATEnabled * @property boolean $CatalogEnabled * @property string $CategoryID * @property integer $CategoryLevel * @property string $CategoryName * @property string[] $CategoryParentID * @property string[] $CategoryParentName * @property boolean $Expired * @property boolean $IntlAutosFixedCat * @property boolean $LeafCategory * @property boolean $Virtual * @property boolean $ORPA * @property boolean $ORRA * @property boolean $LSD */ class CategoryType extends \DTS\eBaySDK\Types\BaseType { /** * @var array Properties belonging to objects of this class. */ private static $propertyTypes = array( 'BestOfferEnabled' => array( 'type' => 'boolean', 'unbound' => false, 'attribute' => false, 'elementName' => 'BestOfferEnabled' ), 'AutoPayEnabled' => array( 'type' => 'boolean', 'unbound' => false, 'attribute' => false, 'elementName' => 'AutoPayEnabled' ), 'B2BVATEnabled' => array( 'type' => 'boolean', 'unbound' => false, 'attribute' => false, 'elementName' => 'B2BVATEnabled' ), 'CatalogEnabled' => array( 'type' => 'boolean', 'unbound' => false, 'attribute' => false, 'elementName' => 'CatalogEnabled' ), 'CategoryID' => array( 'type' => 'string', 'unbound' => false, 'attribute' => false, 'elementName' => 'CategoryID' ), 'CategoryLevel' => array( 'type' => 'integer', 'unbound' => false, 'attribute' => false, 'elementName' => 'CategoryLevel' ), 'CategoryName' => array( 'type' => 'string', 'unbound' => false, 'attribute' => false, 'elementName' => 'CategoryName' ), 'CategoryParentID' => array( 'type' => 'string', 'unbound' => true, 'attribute' => false, 'elementName' => 'CategoryParentID' ), 'CategoryParentName' => array( 'type' => 'string', 'unbound' => true, 'attribute' => false, 'elementName' => 'CategoryParentName' ), 'Expired' => array( 'type' => 'boolean', 'unbound' => false, 'attribute' => false, 'elementName' => 'Expired' ), 'IntlAutosFixedCat' => array( 'type' => 'boolean', 'unbound' => false, 'attribute' => false, 'elementName' => 'IntlAutosFixedCat' ), 'LeafCategory' => array( 'type' => 'boolean', 'unbound' => false, 'attribute' => false, 'elementName' => 'LeafCategory' ), 'Virtual' => array( 'type' => 'boolean', 'unbound' => false, 'attribute' => false, 'elementName' => 'Virtual' ), 'ORPA' => array( 'type' => 'boolean', 'unbound' => false, 'attribute' => false, 'elementName' => 'ORPA' ), 'ORRA' => array( 'type' => 'boolean', 'unbound' => false, 'attribute' => false, 'elementName' => 'ORRA' ), 'LSD' => array( 'type' => 'boolean', 'unbound' => false, 'attribute' => false, 'elementName' => 'LSD' ) ); /** * @param array $values Optional properties and values to assign to the object. */ public function __construct(array $values = array()) { list($parentValues, $childValues) = self::getParentValues(self::$propertyTypes, $values); parent::__construct($parentValues); if (!array_key_exists(__CLASS__, self::$properties)) { self::$properties[__CLASS__] = array_merge(self::$properties[get_parent_class()], self::$propertyTypes); } if (!array_key_exists(__CLASS__, self::$xmlNamespaces)) { self::$xmlNamespaces[__CLASS__] = 'urn:ebay:apis:eBLBaseComponents'; } $this->setValues(__CLASS__, $childValues); } }
emullaraj/ebay-sdk-php
src/DTS/eBaySDK/Trading/Types/CategoryType.php
PHP
apache-2.0
5,200
export interface CommentJson { body: string; color?: string; size?: number; duration?: number; easing?: string; } export interface StampJson { path?: string; url?: string; duration?: number; easing?: string; } export interface Setting { key: string; value: string; } export interface Stamp { id: number; label: string; path: string; contentType: string; }
chimerast/niconico-speenya
messages/index.ts
TypeScript
apache-2.0
389
/* * Copyright 2012 - 2015 Manuel Laggner * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.tinymediamanager.core.movie; import java.util.ArrayList; import java.util.List; import java.util.ListIterator; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlElementWrapper; import javax.xml.bind.annotation.XmlRootElement; import org.jdesktop.observablecollections.ObservableCollections; import org.tinymediamanager.core.AbstractModelObject; import org.tinymediamanager.core.movie.connector.MovieConnectors; import org.tinymediamanager.scraper.CountryCode; import org.tinymediamanager.scraper.MediaArtwork.FanartSizes; import org.tinymediamanager.scraper.MediaArtwork.PosterSizes; import org.tinymediamanager.scraper.MediaLanguages; /** * The Class MovieSettings. */ @XmlRootElement(name = "MovieSettings") public class MovieSettings extends AbstractModelObject { private final static String PATH = "path"; private final static String FILENAME = "filename"; private final static String MOVIE_DATA_SOURCE = "movieDataSource"; private final static String IMAGE_POSTER_SIZE = "imagePosterSize"; private final static String IMAGE_FANART_SIZE = "imageFanartSize"; private final static String IMAGE_EXTRATHUMBS = "imageExtraThumbs"; private final static String IMAGE_EXTRATHUMBS_RESIZE = "imageExtraThumbsResize"; private final static String IMAGE_EXTRATHUMBS_SIZE = "imageExtraThumbsSize"; private final static String IMAGE_EXTRATHUMBS_COUNT = "imageExtraThumbsCount"; private final static String IMAGE_EXTRAFANART = "imageExtraFanart"; private final static String IMAGE_EXTRAFANART_COUNT = "imageExtraFanartCount"; private final static String ENABLE_MOVIESET_ARTWORK_MOVIE_FOLDER = "enableMovieSetArtworkMovieFolder"; private final static String ENABLE_MOVIESET_ARTWORK_FOLDER = "enableMovieSetArtworkFolder"; private final static String MOVIESET_ARTWORK_FOLDER = "movieSetArtworkFolder"; private final static String MOVIE_CONNECTOR = "movieConnector"; private final static String MOVIE_NFO_FILENAME = "movieNfoFilename"; private final static String MOVIE_POSTER_FILENAME = "moviePosterFilename"; private final static String MOVIE_FANART_FILENAME = "movieFanartFilename"; private final static String MOVIE_RENAMER_PATHNAME = "movieRenamerPathname"; private final static String MOVIE_RENAMER_FILENAME = "movieRenamerFilename"; private final static String MOVIE_RENAMER_SPACE_SUBSTITUTION = "movieRenamerSpaceSubstitution"; private final static String MOVIE_RENAMER_SPACE_REPLACEMENT = "movieRenamerSpaceReplacement"; private final static String MOVIE_RENAMER_NFO_CLEANUP = "movieRenamerNfoCleanup"; private final static String MOVIE_RENAMER_MOVIESET_SINGLE_MOVIE = "movieRenamerMoviesetSingleMovie"; private final static String MOVIE_SCRAPER = "movieScraper"; private final static String SCRAPE_BEST_IMAGE = "scrapeBestImage"; private final static String IMAGE_SCRAPER_TMDB = "imageScraperTmdb"; private final static String IMAGE_SCRAPER_FANART_TV = "imageScraperFanartTv"; private final static String TRAILER_SCRAPER_TMDB = "trailerScraperTmdb"; private final static String TRAILER_SCRAPER_HD_TRAILERS = "trailerScraperHdTrailers"; private final static String TRAILER_SCRAPER_OFDB = "trailerScraperOfdb"; private final static String WRITE_ACTOR_IMAGES = "writeActorImages"; private final static String IMDB_SCRAPE_FOREIGN_LANGU = "imdbScrapeForeignLanguage"; private final static String SCRAPER_LANGU = "scraperLanguage"; private final static String CERTIFICATION_COUNTRY = "certificationCountry"; private final static String SCRAPER_THRESHOLD = "scraperThreshold"; private final static String DETECT_MOVIE_MULTI_DIR = "detectMovieMultiDir"; private final static String BUILD_IMAGE_CACHE_ON_IMPORT = "buildImageCacheOnImport"; private final static String BAD_WORDS = "badWords"; private final static String ENTRY = "entry"; private final static String RUNTIME_FROM_MI = "runtimeFromMediaInfo"; private final static String ASCII_REPLACEMENT = "asciiReplacement"; private final static String YEAR_COLUMN_VISIBLE = "yearColumnVisible"; private final static String NFO_COLUMN_VISIBLE = "nfoColumnVisible"; private final static String IMAGE_COLUMN_VISIBLE = "imageColumnVisible"; private final static String TRAILER_COLUMN_VISIBLE = "trailerColumnVisible"; private final static String SUBTITLE_COLUMN_VISIBLE = "subtitleColumnVisible"; private final static String WATCHED_COLUMN_VISIBLE = "watchedColumnVisible"; private final static String SCRAPER_FALLBACK = "scraperFallback"; @XmlElementWrapper(name = MOVIE_DATA_SOURCE) @XmlElement(name = PATH) private final List<String> movieDataSources = ObservableCollections.observableList(new ArrayList<String>()); @XmlElementWrapper(name = MOVIE_NFO_FILENAME) @XmlElement(name = FILENAME) private final List<MovieNfoNaming> movieNfoFilenames = new ArrayList<MovieNfoNaming>(); @XmlElementWrapper(name = MOVIE_POSTER_FILENAME) @XmlElement(name = FILENAME) private final List<MoviePosterNaming> moviePosterFilenames = new ArrayList<MoviePosterNaming>(); @XmlElementWrapper(name = MOVIE_FANART_FILENAME) @XmlElement(name = FILENAME) private final List<MovieFanartNaming> movieFanartFilenames = new ArrayList<MovieFanartNaming>(); @XmlElementWrapper(name = BAD_WORDS) @XmlElement(name = ENTRY) private final List<String> badWords = ObservableCollections.observableList(new ArrayList<String>()); private MovieConnectors movieConnector = MovieConnectors.XBMC; private String movieRenamerPathname = "$T ($Y)"; private String movieRenamerFilename = "$T ($Y) $V $A"; private boolean movieRenamerSpaceSubstitution = false; private String movieRenamerSpaceReplacement = "_"; private boolean movieRenamerNfoCleanup = false; private boolean imdbScrapeForeignLanguage = false; private MovieScrapers movieScraper = MovieScrapers.TMDB; private PosterSizes imagePosterSize = PosterSizes.BIG; private boolean imageScraperTmdb = true; private boolean imageScraperFanartTv = true; private FanartSizes imageFanartSize = FanartSizes.LARGE; private boolean imageExtraThumbs = false; private boolean imageExtraThumbsResize = true; private int imageExtraThumbsSize = 300; private int imageExtraThumbsCount = 5; private boolean imageExtraFanart = false; private int imageExtraFanartCount = 5; private boolean enableMovieSetArtworkMovieFolder = true; private boolean enableMovieSetArtworkFolder = false; private String movieSetArtworkFolder = "MoviesetArtwork"; private boolean scrapeBestImage = true; private boolean imageLanguagePriority = true; private boolean imageLogo = false; private boolean imageBanner = false; private boolean imageClearart = false; private boolean imageDiscart = false; private boolean imageThumb = false; private boolean trailerScraperTmdb = true; private boolean trailerScraperHdTrailers = true; private boolean trailerScraperOfdb = true; private boolean writeActorImages = false; private MediaLanguages scraperLanguage = MediaLanguages.en; private CountryCode certificationCountry = CountryCode.US; private double scraperThreshold = 0.75; private boolean detectMovieMultiDir = false; private boolean buildImageCacheOnImport = false; private boolean movieRenamerCreateMoviesetForSingleMovie = false; private boolean runtimeFromMediaInfo = false; private boolean asciiReplacement = false; private boolean yearColumnVisible = true; private boolean ratingColumnVisible = true; private boolean nfoColumnVisible = true; private boolean imageColumnVisible = true; private boolean trailerColumnVisible = true; private boolean subtitleColumnVisible = true; private boolean watchedColumnVisible = true; private boolean scraperFallback = false; private boolean useTrailerPreference = false; private MovieTrailerQuality trailerQuality = MovieTrailerQuality.HD_720; private MovieTrailerSources trailerSource = MovieTrailerSources.YOUTUBE; private boolean syncTrakt = false; public MovieSettings() { } public void addMovieDataSources(String path) { if (!movieDataSources.contains(path)) { movieDataSources.add(path); firePropertyChange(MOVIE_DATA_SOURCE, null, movieDataSources); } } public void removeMovieDataSources(String path) { MovieList movieList = MovieList.getInstance(); movieList.removeDatasource(path); movieDataSources.remove(path); firePropertyChange(MOVIE_DATA_SOURCE, null, movieDataSources); } public List<String> getMovieDataSource() { return movieDataSources; } public void addMovieNfoFilename(MovieNfoNaming filename) { if (!movieNfoFilenames.contains(filename)) { movieNfoFilenames.add(filename); firePropertyChange(MOVIE_NFO_FILENAME, null, movieNfoFilenames); } } public void removeMovieNfoFilename(MovieNfoNaming filename) { if (movieNfoFilenames.contains(filename)) { movieNfoFilenames.remove(filename); firePropertyChange(MOVIE_NFO_FILENAME, null, movieNfoFilenames); } } public void clearMovieNfoFilenames() { movieNfoFilenames.clear(); firePropertyChange(MOVIE_NFO_FILENAME, null, movieNfoFilenames); } public List<MovieNfoNaming> getMovieNfoFilenames() { return new ArrayList<MovieNfoNaming>(this.movieNfoFilenames); } public void addMoviePosterFilename(MoviePosterNaming filename) { if (!moviePosterFilenames.contains(filename)) { moviePosterFilenames.add(filename); firePropertyChange(MOVIE_POSTER_FILENAME, null, moviePosterFilenames); } } public void removeMoviePosterFilename(MoviePosterNaming filename) { if (moviePosterFilenames.contains(filename)) { moviePosterFilenames.remove(filename); firePropertyChange(MOVIE_POSTER_FILENAME, null, moviePosterFilenames); } } public void clearMoviePosterFilenames() { moviePosterFilenames.clear(); firePropertyChange(MOVIE_POSTER_FILENAME, null, moviePosterFilenames); } public List<MoviePosterNaming> getMoviePosterFilenames() { return new ArrayList<MoviePosterNaming>(this.moviePosterFilenames); } public void addMovieFanartFilename(MovieFanartNaming filename) { if (!movieFanartFilenames.contains(filename)) { movieFanartFilenames.add(filename); firePropertyChange(MOVIE_FANART_FILENAME, null, movieFanartFilenames); } } public void removeMovieFanartFilename(MovieFanartNaming filename) { if (movieFanartFilenames.contains(filename)) { movieFanartFilenames.remove(filename); firePropertyChange(MOVIE_FANART_FILENAME, null, movieFanartFilenames); } } public void clearMovieFanartFilenames() { movieFanartFilenames.clear(); firePropertyChange(MOVIE_FANART_FILENAME, null, movieFanartFilenames); } public List<MovieFanartNaming> getMovieFanartFilenames() { return new ArrayList<MovieFanartNaming>(this.movieFanartFilenames); } @XmlElement(name = IMAGE_POSTER_SIZE) public PosterSizes getImagePosterSize() { return imagePosterSize; } public void setImagePosterSize(PosterSizes newValue) { PosterSizes oldValue = this.imagePosterSize; this.imagePosterSize = newValue; firePropertyChange(IMAGE_POSTER_SIZE, oldValue, newValue); } @XmlElement(name = IMAGE_FANART_SIZE) public FanartSizes getImageFanartSize() { return imageFanartSize; } public void setImageFanartSize(FanartSizes newValue) { FanartSizes oldValue = this.imageFanartSize; this.imageFanartSize = newValue; firePropertyChange(IMAGE_FANART_SIZE, oldValue, newValue); } public boolean isImageExtraThumbs() { return imageExtraThumbs; } public boolean isImageExtraThumbsResize() { return imageExtraThumbsResize; } public int getImageExtraThumbsSize() { return imageExtraThumbsSize; } public void setImageExtraThumbsResize(boolean newValue) { boolean oldValue = this.imageExtraThumbsResize; this.imageExtraThumbsResize = newValue; firePropertyChange(IMAGE_EXTRATHUMBS_RESIZE, oldValue, newValue); } public void setImageExtraThumbsSize(int newValue) { int oldValue = this.imageExtraThumbsSize; this.imageExtraThumbsSize = newValue; firePropertyChange(IMAGE_EXTRATHUMBS_SIZE, oldValue, newValue); } public int getImageExtraThumbsCount() { return imageExtraThumbsCount; } public void setImageExtraThumbsCount(int newValue) { int oldValue = this.imageExtraThumbsCount; this.imageExtraThumbsCount = newValue; firePropertyChange(IMAGE_EXTRATHUMBS_COUNT, oldValue, newValue); } public int getImageExtraFanartCount() { return imageExtraFanartCount; } public void setImageExtraFanartCount(int newValue) { int oldValue = this.imageExtraFanartCount; this.imageExtraFanartCount = newValue; firePropertyChange(IMAGE_EXTRAFANART_COUNT, oldValue, newValue); } public boolean isImageExtraFanart() { return imageExtraFanart; } public void setImageExtraThumbs(boolean newValue) { boolean oldValue = this.imageExtraThumbs; this.imageExtraThumbs = newValue; firePropertyChange(IMAGE_EXTRATHUMBS, oldValue, newValue); } public void setImageExtraFanart(boolean newValue) { boolean oldValue = this.imageExtraFanart; this.imageExtraFanart = newValue; firePropertyChange(IMAGE_EXTRAFANART, oldValue, newValue); } public boolean isEnableMovieSetArtworkMovieFolder() { return enableMovieSetArtworkMovieFolder; } public void setEnableMovieSetArtworkMovieFolder(boolean newValue) { boolean oldValue = this.enableMovieSetArtworkMovieFolder; this.enableMovieSetArtworkMovieFolder = newValue; firePropertyChange(ENABLE_MOVIESET_ARTWORK_MOVIE_FOLDER, oldValue, newValue); } public boolean isEnableMovieSetArtworkFolder() { return enableMovieSetArtworkFolder; } public void setEnableMovieSetArtworkFolder(boolean newValue) { boolean oldValue = this.enableMovieSetArtworkFolder; this.enableMovieSetArtworkFolder = newValue; firePropertyChange(ENABLE_MOVIESET_ARTWORK_FOLDER, oldValue, newValue); } public String getMovieSetArtworkFolder() { return movieSetArtworkFolder; } public void setMovieSetArtworkFolder(String newValue) { String oldValue = this.movieSetArtworkFolder; this.movieSetArtworkFolder = newValue; firePropertyChange(MOVIESET_ARTWORK_FOLDER, oldValue, newValue); } @XmlElement(name = MOVIE_CONNECTOR) public MovieConnectors getMovieConnector() { return movieConnector; } public void setMovieConnector(MovieConnectors newValue) { MovieConnectors oldValue = this.movieConnector; this.movieConnector = newValue; firePropertyChange(MOVIE_CONNECTOR, oldValue, newValue); } @XmlElement(name = MOVIE_RENAMER_PATHNAME) public String getMovieRenamerPathname() { return movieRenamerPathname; } public void setMovieRenamerPathname(String newValue) { String oldValue = this.movieRenamerPathname; this.movieRenamerPathname = newValue; firePropertyChange(MOVIE_RENAMER_PATHNAME, oldValue, newValue); } @XmlElement(name = MOVIE_RENAMER_FILENAME) public String getMovieRenamerFilename() { return movieRenamerFilename; } public void setMovieRenamerFilename(String newValue) { String oldValue = this.movieRenamerFilename; this.movieRenamerFilename = newValue; firePropertyChange(MOVIE_RENAMER_FILENAME, oldValue, newValue); } @XmlElement(name = MOVIE_RENAMER_SPACE_SUBSTITUTION) public boolean isMovieRenamerSpaceSubstitution() { return movieRenamerSpaceSubstitution; } public void setMovieRenamerSpaceSubstitution(boolean movieRenamerSpaceSubstitution) { this.movieRenamerSpaceSubstitution = movieRenamerSpaceSubstitution; } @XmlElement(name = MOVIE_RENAMER_SPACE_REPLACEMENT) public String getMovieRenamerSpaceReplacement() { return movieRenamerSpaceReplacement; } public void setMovieRenamerSpaceReplacement(String movieRenamerSpaceReplacement) { this.movieRenamerSpaceReplacement = movieRenamerSpaceReplacement; } public MovieScrapers getMovieScraper() { if (movieScraper == null) { return MovieScrapers.TMDB; } return movieScraper; } public void setMovieScraper(MovieScrapers newValue) { MovieScrapers oldValue = this.movieScraper; this.movieScraper = newValue; firePropertyChange(MOVIE_SCRAPER, oldValue, newValue); } public boolean isImdbScrapeForeignLanguage() { return imdbScrapeForeignLanguage; } public void setImdbScrapeForeignLanguage(boolean newValue) { boolean oldValue = this.imdbScrapeForeignLanguage; this.imdbScrapeForeignLanguage = newValue; firePropertyChange(IMDB_SCRAPE_FOREIGN_LANGU, oldValue, newValue); } public boolean isImageScraperTmdb() { return imageScraperTmdb; } public boolean isImageScraperFanartTv() { return imageScraperFanartTv; } public void setImageScraperTmdb(boolean newValue) { boolean oldValue = this.imageScraperTmdb; this.imageScraperTmdb = newValue; firePropertyChange(IMAGE_SCRAPER_TMDB, oldValue, newValue); } public void setImageScraperFanartTv(boolean newValue) { boolean oldValue = this.imageScraperFanartTv; this.imageScraperFanartTv = newValue; firePropertyChange(IMAGE_SCRAPER_FANART_TV, oldValue, newValue); } public boolean isScrapeBestImage() { return scrapeBestImage; } public void setScrapeBestImage(boolean newValue) { boolean oldValue = this.scrapeBestImage; this.scrapeBestImage = newValue; firePropertyChange(SCRAPE_BEST_IMAGE, oldValue, newValue); } public boolean isTrailerScraperTmdb() { return trailerScraperTmdb; } public boolean isTrailerScraperHdTrailers() { return trailerScraperHdTrailers; } public void setTrailerScraperTmdb(boolean newValue) { boolean oldValue = this.trailerScraperTmdb; this.trailerScraperTmdb = newValue; firePropertyChange(TRAILER_SCRAPER_TMDB, oldValue, newValue); } public void setTrailerScraperHdTrailers(boolean newValue) { boolean oldValue = this.trailerScraperHdTrailers; this.trailerScraperHdTrailers = newValue; firePropertyChange(TRAILER_SCRAPER_HD_TRAILERS, oldValue, newValue); } public boolean isTrailerScraperOfdb() { return trailerScraperOfdb; } public void setTrailerScraperOfdb(boolean newValue) { boolean oldValue = this.trailerScraperOfdb; this.trailerScraperOfdb = newValue; firePropertyChange(TRAILER_SCRAPER_OFDB, oldValue, newValue); } public boolean isWriteActorImages() { return writeActorImages; } public void setWriteActorImages(boolean newValue) { boolean oldValue = this.writeActorImages; this.writeActorImages = newValue; firePropertyChange(WRITE_ACTOR_IMAGES, oldValue, newValue); } @XmlElement(name = SCRAPER_LANGU) public MediaLanguages getScraperLanguage() { return scraperLanguage; } public void setScraperLanguage(MediaLanguages newValue) { MediaLanguages oldValue = this.scraperLanguage; this.scraperLanguage = newValue; firePropertyChange(SCRAPER_LANGU, oldValue, newValue); } @XmlElement(name = CERTIFICATION_COUNTRY) public CountryCode getCertificationCountry() { return certificationCountry; } public void setCertificationCountry(CountryCode newValue) { CountryCode oldValue = this.certificationCountry; certificationCountry = newValue; firePropertyChange(CERTIFICATION_COUNTRY, oldValue, newValue); } @XmlElement(name = SCRAPER_THRESHOLD) public double getScraperThreshold() { return scraperThreshold; } public void setScraperThreshold(double newValue) { double oldValue = this.scraperThreshold; scraperThreshold = newValue; firePropertyChange(SCRAPER_THRESHOLD, oldValue, newValue); } @XmlElement(name = MOVIE_RENAMER_NFO_CLEANUP) public boolean isMovieRenamerNfoCleanup() { return movieRenamerNfoCleanup; } public void setMovieRenamerNfoCleanup(boolean movieRenamerNfoCleanup) { this.movieRenamerNfoCleanup = movieRenamerNfoCleanup; } /** * Should we detect (and create) movies from directories containing more than one movie? * * @return true/false */ public boolean isDetectMovieMultiDir() { return detectMovieMultiDir; } /** * Should we detect (and create) movies from directories containing more than one movie? * * @param newValue * true/false */ public void setDetectMovieMultiDir(boolean newValue) { boolean oldValue = this.detectMovieMultiDir; this.detectMovieMultiDir = newValue; firePropertyChange(DETECT_MOVIE_MULTI_DIR, oldValue, newValue); } public boolean isBuildImageCacheOnImport() { return buildImageCacheOnImport; } public void setBuildImageCacheOnImport(boolean newValue) { boolean oldValue = this.buildImageCacheOnImport; this.buildImageCacheOnImport = newValue; firePropertyChange(BUILD_IMAGE_CACHE_ON_IMPORT, oldValue, newValue); } public boolean isMovieRenamerCreateMoviesetForSingleMovie() { return movieRenamerCreateMoviesetForSingleMovie; } public void setMovieRenamerCreateMoviesetForSingleMovie(boolean newValue) { boolean oldValue = this.movieRenamerCreateMoviesetForSingleMovie; this.movieRenamerCreateMoviesetForSingleMovie = newValue; firePropertyChange(MOVIE_RENAMER_MOVIESET_SINGLE_MOVIE, oldValue, newValue); } public boolean isRuntimeFromMediaInfo() { return runtimeFromMediaInfo; } public void setRuntimeFromMediaInfo(boolean newValue) { boolean oldValue = this.runtimeFromMediaInfo; this.runtimeFromMediaInfo = newValue; firePropertyChange(RUNTIME_FROM_MI, oldValue, newValue); } public boolean isAsciiReplacement() { return asciiReplacement; } public void setAsciiReplacement(boolean newValue) { boolean oldValue = this.asciiReplacement; this.asciiReplacement = newValue; firePropertyChange(ASCII_REPLACEMENT, oldValue, newValue); } public void addBadWord(String badWord) { if (!badWords.contains(badWord.toLowerCase())) { badWords.add(badWord.toLowerCase()); firePropertyChange(BAD_WORDS, null, badWords); } } public void removeBadWord(String badWord) { badWords.remove(badWord.toLowerCase()); firePropertyChange(BAD_WORDS, null, badWords); } public List<String> getBadWords() { // convert to lowercase for easy contains checking ListIterator<String> iterator = badWords.listIterator(); while (iterator.hasNext()) { iterator.set(iterator.next().toLowerCase()); } return badWords; } public boolean isYearColumnVisible() { return yearColumnVisible; } public void setYearColumnVisible(boolean newValue) { boolean oldValue = this.yearColumnVisible; this.yearColumnVisible = newValue; firePropertyChange(YEAR_COLUMN_VISIBLE, oldValue, newValue); } public boolean isRatingColumnVisible() { return ratingColumnVisible; } public void setRatingColumnVisible(boolean newValue) { boolean oldValue = this.ratingColumnVisible; this.ratingColumnVisible = newValue; firePropertyChange("ratingColumnVisible", oldValue, newValue); } public boolean isNfoColumnVisible() { return nfoColumnVisible; } public void setNfoColumnVisible(boolean newValue) { boolean oldValue = this.nfoColumnVisible; this.nfoColumnVisible = newValue; firePropertyChange(NFO_COLUMN_VISIBLE, oldValue, newValue); } public boolean isImageColumnVisible() { return imageColumnVisible; } public void setImageColumnVisible(boolean newValue) { boolean oldValue = this.imageColumnVisible; this.imageColumnVisible = newValue; firePropertyChange(IMAGE_COLUMN_VISIBLE, oldValue, newValue); } public boolean isTrailerColumnVisible() { return trailerColumnVisible; } public void setTrailerColumnVisible(boolean newValue) { boolean oldValue = this.trailerColumnVisible; this.trailerColumnVisible = newValue; firePropertyChange(TRAILER_COLUMN_VISIBLE, oldValue, newValue); } public boolean isSubtitleColumnVisible() { return subtitleColumnVisible; } public void setSubtitleColumnVisible(boolean newValue) { boolean oldValue = this.subtitleColumnVisible; this.subtitleColumnVisible = newValue; firePropertyChange(SUBTITLE_COLUMN_VISIBLE, oldValue, newValue); } public boolean isWatchedColumnVisible() { return watchedColumnVisible; } public void setWatchedColumnVisible(boolean newValue) { boolean oldValue = this.watchedColumnVisible; this.watchedColumnVisible = newValue; firePropertyChange(WATCHED_COLUMN_VISIBLE, oldValue, newValue); } public boolean isScraperFallback() { return scraperFallback; } public void setScraperFallback(boolean newValue) { boolean oldValue = this.scraperFallback; this.scraperFallback = newValue; firePropertyChange(SCRAPER_FALLBACK, oldValue, newValue); } public boolean isImageLogo() { return imageLogo; } public boolean isImageBanner() { return imageBanner; } public boolean isImageClearart() { return imageClearart; } public boolean isImageDiscart() { return imageDiscart; } public boolean isImageThumb() { return imageThumb; } public void setImageLogo(boolean newValue) { boolean oldValue = this.imageLogo; this.imageLogo = newValue; firePropertyChange("imageLogo", oldValue, newValue); } public void setImageBanner(boolean newValue) { boolean oldValue = this.imageBanner; this.imageBanner = newValue; firePropertyChange("imageBanner", oldValue, newValue); } public void setImageClearart(boolean newValue) { boolean oldValue = this.imageClearart; this.imageClearart = newValue; firePropertyChange("imageClearart", oldValue, newValue); } public void setImageDiscart(boolean newValue) { boolean oldValue = this.imageDiscart; this.imageDiscart = newValue; firePropertyChange("imageDiscart", oldValue, newValue); } public void setImageThumb(boolean newValue) { boolean oldValue = this.imageThumb; this.imageThumb = newValue; firePropertyChange("imageThumb", oldValue, newValue); } public boolean isUseTrailerPreference() { return useTrailerPreference; } public void setUseTrailerPreference(boolean newValue) { boolean oldValue = this.useTrailerPreference; this.useTrailerPreference = newValue; firePropertyChange("useTrailerPreference", oldValue, newValue); } public MovieTrailerQuality getTrailerQuality() { return trailerQuality; } public void setTrailerQuality(MovieTrailerQuality newValue) { MovieTrailerQuality oldValue = this.trailerQuality; this.trailerQuality = newValue; firePropertyChange("trailerQuality", oldValue, newValue); } public MovieTrailerSources getTrailerSource() { return trailerSource; } public void setTrailerSource(MovieTrailerSources newValue) { MovieTrailerSources oldValue = this.trailerSource; this.trailerSource = newValue; firePropertyChange("trailerSource", oldValue, newValue); } public void setSyncTrakt(boolean newValue) { boolean oldValue = this.syncTrakt; this.syncTrakt = newValue; firePropertyChange("syncTrakt", oldValue, newValue); } public boolean getSyncTrakt() { return syncTrakt; } public boolean isImageLanguagePriority() { return imageLanguagePriority; } public void setImageLanguagePriority(boolean newValue) { boolean oldValue = this.imageLanguagePriority; this.imageLanguagePriority = newValue; firePropertyChange("imageLanguagePriority", oldValue, newValue); } }
mlaggner/tinyMediaManager
src/org/tinymediamanager/core/movie/MovieSettings.java
Java
apache-2.0
32,049
/** * @license Apache-2.0 * * Copyright (c) 2018 The Stdlib Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ 'use strict'; // MODULES // var factory = require( './factory.js' ); // MAIN // /** * Tests whether a collection contains at least `n` elements which pass a test implemented by a predicate function. * * ## Notes * * - If a predicate function calls the provided callback with a truthy error argument, the function suspends execution and immediately calls the `done` callback for subsequent error handling. * - This function does **not** guarantee that execution is asynchronous. To do so, wrap the `done` callback in a function which either executes at the end of the current stack (e.g., `nextTick`) or during a subsequent turn of the event loop (e.g., `setImmediate`, `setTimeout`). * * * @param {Collection} collection - input collection * @param {PositiveInteger} n - number of elements * @param {Options} [options] - function options * @param {*} [options.thisArg] - execution context * @param {PositiveInteger} [options.limit] - maximum number of pending invocations at any one time * @param {boolean} [options.series=false] - boolean indicating whether to wait for a previous invocation to complete before invoking a provided function for the next element in a collection * @param {Function} predicate - predicate function to invoke for each element in a collection * @param {Callback} done - function to invoke upon completion * @throws {TypeError} first argument must be a collection * @throws {TypeError} second argument must be a positive integer * @throws {TypeError} options argument must be an object * @throws {TypeError} must provide valid options * @throws {TypeError} second-to-last argument must be a function * @throws {TypeError} last argument must be a function * @returns {void} * * @example * var readFile = require( '@stdlib/fs/read-file' ); * * function done( error, bool ) { * if ( error ) { * throw error; * } * if ( bool ) { * console.log( 'Successfully read some files.' ); * } else { * console.log( 'Unable to read some files.' ); * } * } * * function predicate( file, next ) { * var opts = { * 'encoding': 'utf8' * }; * readFile( file, opts, onFile ); * * function onFile( error ) { * if ( error ) { * return next( null, false ); * } * next( null, true ); * } * } * * var files = [ * './beep.js', * './boop.js' * ]; * * someByAsync( files, 2, predicate, done ); */ function someByAsync( collection, n, options, predicate, done ) { if ( arguments.length < 5 ) { return factory( options )( collection, n, predicate ); } factory( options, predicate )( collection, n, done ); } // EXPORTS // module.exports = someByAsync;
stdlib-js/stdlib
lib/node_modules/@stdlib/utils/async/some-by/lib/some_by.js
JavaScript
apache-2.0
3,300
/* * Copyright 2014 Josselin Pujo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package fr.assoba.open.sel.jetbrains; import com.intellij.psi.tree.IElementType; import org.jetbrains.annotations.NonNls; import org.jetbrains.annotations.NotNull; public class SelElementType extends IElementType { public SelElementType(@NotNull @NonNls String debugName) { super(debugName, SelLanguage.INSTANCE); } }
neuneu2k/SEL
JetbrainsPlugin/src/fr/assoba/open/sel/jetbrains/SelElementType.java
Java
apache-2.0
923
package com.hxtech.offer.fragments; import android.os.Bundle; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import com.hxtech.offer.R; public class CanonFragment extends BaseFragment { public static CanonFragment newInstance() { CanonFragment fragment = new CanonFragment(); Bundle args = new Bundle(); fragment.setArguments(args); return fragment; } public CanonFragment() { // Required empty public constructor } @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { return inflater.inflate(R.layout.fragment_canon, container, false); } }
offerHere/offer
app/src/main/java/com/hxtech/offer/fragments/CanonFragment.java
Java
apache-2.0
877
/* Copyright (c) 2015, Google Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <CNIOBoringSSL_ssl.h> #include <assert.h> #include <limits.h> #include <stdlib.h> #include <string.h> #include <CNIOBoringSSL_bio.h> #include <CNIOBoringSSL_err.h> #include <CNIOBoringSSL_mem.h> #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN // BIO uses int instead of size_t. No lengths will exceed uint16_t, so this will // not overflow. static_assert(0xffff <= INT_MAX, "uint16_t does not fit in int"); static_assert((SSL3_ALIGN_PAYLOAD & (SSL3_ALIGN_PAYLOAD - 1)) == 0, "SSL3_ALIGN_PAYLOAD must be a power of 2"); void SSLBuffer::Clear() { if (buf_allocated_) { free(buf_); // Allocated with malloc(). } buf_ = nullptr; buf_allocated_ = false; offset_ = 0; size_ = 0; cap_ = 0; } bool SSLBuffer::EnsureCap(size_t header_len, size_t new_cap) { if (new_cap > 0xffff) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } if (cap_ >= new_cap) { return true; } uint8_t *new_buf; bool new_buf_allocated; size_t new_offset; if (new_cap <= sizeof(inline_buf_)) { // This function is called twice per TLS record, first for the five-byte // header. To avoid allocating twice, use an inline buffer for short inputs. new_buf = inline_buf_; new_buf_allocated = false; new_offset = 0; } else { // Add up to |SSL3_ALIGN_PAYLOAD| - 1 bytes of slack for alignment. // // Since this buffer gets allocated quite frequently and doesn't contain any // sensitive data, we allocate with malloc rather than |OPENSSL_malloc| and // avoid zeroing on free. new_buf = (uint8_t *)malloc(new_cap + SSL3_ALIGN_PAYLOAD - 1); if (new_buf == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); return false; } new_buf_allocated = true; // Offset the buffer such that the record body is aligned. new_offset = (0 - header_len - (uintptr_t)new_buf) & (SSL3_ALIGN_PAYLOAD - 1); } // Note if the both old and new buffer are inline, the source and destination // may alias. OPENSSL_memmove(new_buf + new_offset, buf_ + offset_, size_); if (buf_allocated_) { free(buf_); // Allocated with malloc(). } buf_ = new_buf; buf_allocated_ = new_buf_allocated; offset_ = new_offset; cap_ = new_cap; return true; } void SSLBuffer::DidWrite(size_t new_size) { if (new_size > cap() - size()) { abort(); } size_ += new_size; } void SSLBuffer::Consume(size_t len) { if (len > size_) { abort(); } offset_ += (uint16_t)len; size_ -= (uint16_t)len; cap_ -= (uint16_t)len; } void SSLBuffer::DiscardConsumed() { if (size_ == 0) { Clear(); } } static int dtls_read_buffer_next_packet(SSL *ssl) { SSLBuffer *buf = &ssl->s3->read_buffer; if (!buf->empty()) { // It is an error to call |dtls_read_buffer_extend| when the read buffer is // not empty. OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return -1; } // Read a single packet from |ssl->rbio|. |buf->cap()| must fit in an int. int ret = BIO_read(ssl->rbio.get(), buf->data(), static_cast<int>(buf->cap())); if (ret <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_READ; return ret; } buf->DidWrite(static_cast<size_t>(ret)); return 1; } static int tls_read_buffer_extend_to(SSL *ssl, size_t len) { SSLBuffer *buf = &ssl->s3->read_buffer; if (len > buf->cap()) { OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); return -1; } // Read until the target length is reached. while (buf->size() < len) { // The amount of data to read is bounded by |buf->cap|, which must fit in an // int. int ret = BIO_read(ssl->rbio.get(), buf->data() + buf->size(), static_cast<int>(len - buf->size())); if (ret <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_READ; return ret; } buf->DidWrite(static_cast<size_t>(ret)); } return 1; } int ssl_read_buffer_extend_to(SSL *ssl, size_t len) { // |ssl_read_buffer_extend_to| implicitly discards any consumed data. ssl->s3->read_buffer.DiscardConsumed(); if (SSL_is_dtls(ssl)) { static_assert( DTLS1_RT_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH <= 0xffff, "DTLS read buffer is too large"); // The |len| parameter is ignored in DTLS. len = DTLS1_RT_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH; } if (!ssl->s3->read_buffer.EnsureCap(ssl_record_prefix_len(ssl), len)) { return -1; } if (ssl->rbio == nullptr) { OPENSSL_PUT_ERROR(SSL, SSL_R_BIO_NOT_SET); return -1; } int ret; if (SSL_is_dtls(ssl)) { // |len| is ignored for a datagram transport. ret = dtls_read_buffer_next_packet(ssl); } else { ret = tls_read_buffer_extend_to(ssl, len); } if (ret <= 0) { // If the buffer was empty originally and remained empty after attempting to // extend it, release the buffer until the next attempt. ssl->s3->read_buffer.DiscardConsumed(); } return ret; } int ssl_handle_open_record(SSL *ssl, bool *out_retry, ssl_open_record_t ret, size_t consumed, uint8_t alert) { *out_retry = false; if (ret != ssl_open_record_partial) { ssl->s3->read_buffer.Consume(consumed); } if (ret != ssl_open_record_success) { // Nothing was returned to the caller, so discard anything marked consumed. ssl->s3->read_buffer.DiscardConsumed(); } switch (ret) { case ssl_open_record_success: return 1; case ssl_open_record_partial: { int read_ret = ssl_read_buffer_extend_to(ssl, consumed); if (read_ret <= 0) { return read_ret; } *out_retry = true; return 1; } case ssl_open_record_discard: *out_retry = true; return 1; case ssl_open_record_close_notify: return 0; case ssl_open_record_error: if (alert != 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); } return -1; } assert(0); return -1; } static_assert(SSL3_RT_HEADER_LENGTH * 2 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD * 2 + SSL3_RT_MAX_PLAIN_LENGTH <= 0xffff, "maximum TLS write buffer is too large"); static_assert(DTLS1_RT_HEADER_LENGTH + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD + SSL3_RT_MAX_PLAIN_LENGTH <= 0xffff, "maximum DTLS write buffer is too large"); static int tls_write_buffer_flush(SSL *ssl) { SSLBuffer *buf = &ssl->s3->write_buffer; while (!buf->empty()) { int ret = BIO_write(ssl->wbio.get(), buf->data(), buf->size()); if (ret <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_WRITE; return ret; } buf->Consume(static_cast<size_t>(ret)); } buf->Clear(); return 1; } static int dtls_write_buffer_flush(SSL *ssl) { SSLBuffer *buf = &ssl->s3->write_buffer; if (buf->empty()) { return 1; } int ret = BIO_write(ssl->wbio.get(), buf->data(), buf->size()); if (ret <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_WRITE; // If the write failed, drop the write buffer anyway. Datagram transports // can't write half a packet, so the caller is expected to retry from the // top. buf->Clear(); return ret; } buf->Clear(); return 1; } int ssl_write_buffer_flush(SSL *ssl) { if (ssl->wbio == nullptr) { OPENSSL_PUT_ERROR(SSL, SSL_R_BIO_NOT_SET); return -1; } if (SSL_is_dtls(ssl)) { return dtls_write_buffer_flush(ssl); } else { return tls_write_buffer_flush(ssl); } } BSSL_NAMESPACE_END
apple/swift-nio-ssl
Sources/CNIOBoringSSL/ssl/ssl_buffer.cc
C++
apache-2.0
8,307
package de.kevinschie.SimulatorListener; import java.io.BufferedReader; import java.io.InputStreamReader; import java.io.ObjectInputStream; import java.io.PrintWriter; import java.net.ServerSocket; import java.net.Socket; import java.net.URL; import java.net.URLConnection; public class SimulationListener { ObjectInputStream ois; Socket socket; public void listenToSimulator() { try{ new Thread("Device Listener") { public void run() { try ( Socket echoSocket = new Socket("0.0.0.0", 50001); BufferedReader in = new BufferedReader( new InputStreamReader(echoSocket.getInputStream())); ) { System.out.println("TEST"); String line; while ((line = in.readLine()) != null) { System.out.println("echo: " + line); } } catch(Exception ex) { ex.printStackTrace(); } }; }.start(); } catch (Exception ex) { ex.printStackTrace(); } /*try{ System.out.println("Listener läuft........."); URL oracle = new URL("http://0.0.0.0:50001/"); URLConnection yc = oracle.openConnection(); BufferedReader in = new BufferedReader(new InputStreamReader( yc.getInputStream())); String inputLine; while ((inputLine = in.readLine()) != null) System.out.println(inputLine); in.close(); } catch(Exception ex) { ex.printStackTrace(); }*/ /*try { final ServerSocket serverSocket = new ServerSocket(50001); new Thread("Device Listener") { public void run() { try ( ServerSocket serverSocket = new ServerSocket(50001); Socket clientSocket = serverSocket.accept(); PrintWriter out = new PrintWriter(clientSocket.getOutputStream(), true); BufferedReader in = new BufferedReader( new InputStreamReader(clientSocket.getInputStream())); ) { System.out.println(in.readLine()); } catch (Exception e) { e.printStackTrace(); } }; }.start(); } catch (Exception ex) { ex.printStackTrace(); }*/ } }
kevinschie/carGateway
src/de/kevinschie/SimulatorListener/SimulationListener.java
Java
apache-2.0
2,431
/* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ghidra.feature.vt.api.db; import static ghidra.feature.vt.api.db.VTMatchMarkupItemTableDBAdapter.MarkupTableDescriptor.ADDRESS_SOURCE_COL; import static ghidra.feature.vt.api.db.VTMatchMarkupItemTableDBAdapter.MarkupTableDescriptor.ASSOCIATION_KEY_COL; import static ghidra.feature.vt.api.db.VTMatchMarkupItemTableDBAdapter.MarkupTableDescriptor.DESTINATION_ADDRESS_COL; import static ghidra.feature.vt.api.db.VTMatchMarkupItemTableDBAdapter.MarkupTableDescriptor.MARKUP_TYPE_COL; import static ghidra.feature.vt.api.db.VTMatchMarkupItemTableDBAdapter.MarkupTableDescriptor.ORIGINAL_DESTINATION_VALUE_COL; import static ghidra.feature.vt.api.db.VTMatchMarkupItemTableDBAdapter.MarkupTableDescriptor.SOURCE_ADDRESS_COL; import static ghidra.feature.vt.api.db.VTMatchMarkupItemTableDBAdapter.MarkupTableDescriptor.SOURCE_VALUE_COL; import static ghidra.feature.vt.api.db.VTMatchMarkupItemTableDBAdapter.MarkupTableDescriptor.STATUS_COL; import ghidra.feature.vt.api.impl.MarkupItemStorage; import ghidra.feature.vt.api.main.VTSession; import ghidra.feature.vt.api.markuptype.VTMarkupTypeFactory; import ghidra.feature.vt.api.util.Stringable; import ghidra.program.database.map.AddressMap; import ghidra.program.model.address.Address; import ghidra.program.model.listing.Program; import ghidra.util.exception.VersionException; import ghidra.util.task.TaskMonitor; import java.io.IOException; import db.*; public class VTMatchMarkupItemTableDBAdapterV0 extends VTMatchMarkupItemTableDBAdapter { private Table table; public VTMatchMarkupItemTableDBAdapterV0(DBHandle dbHandle) throws IOException { table = dbHandle.createTable(TABLE_NAME, TABLE_SCHEMA, INDEXED_COLUMNS); } public VTMatchMarkupItemTableDBAdapterV0(DBHandle dbHandle, OpenMode openMode, TaskMonitor monitor) throws VersionException { table = dbHandle.getTable(TABLE_NAME); if (table == null) { throw new VersionException("Missing Table: " + TABLE_NAME); } else if (table.getSchema().getVersion() != 0) { throw new VersionException("Expected version 0 for table " + TABLE_NAME + " but got " + table.getSchema().getVersion()); } } @Override public DBRecord createMarkupItemRecord(MarkupItemStorage markupItem) throws IOException { DBRecord record = TABLE_SCHEMA.createRecord(table.getKey()); VTAssociationDB association = (VTAssociationDB) markupItem.getAssociation(); VTSession manager = association.getSession(); Program sourceProgram = manager.getSourceProgram(); Program destinationProgram = manager.getDestinationProgram(); record.setLongValue(ASSOCIATION_KEY_COL.column(), association.getKey()); record.setString(ADDRESS_SOURCE_COL.column(), markupItem.getDestinationAddressSource()); record.setLongValue(SOURCE_ADDRESS_COL.column(), getAddressID(sourceProgram, markupItem.getSourceAddress())); Address destinationAddress = markupItem.getDestinationAddress(); if (destinationAddress != null) { record.setLongValue(DESTINATION_ADDRESS_COL.column(), getAddressID(destinationProgram, markupItem.getDestinationAddress())); } record.setShortValue(MARKUP_TYPE_COL.column(), (short) VTMarkupTypeFactory.getID(markupItem.getMarkupType())); record.setString(SOURCE_VALUE_COL.column(), Stringable.getString( markupItem.getSourceValue(), sourceProgram)); record.setString(ORIGINAL_DESTINATION_VALUE_COL.column(), Stringable.getString( markupItem.getDestinationValue(), destinationProgram)); record.setByteValue(STATUS_COL.column(), (byte) markupItem.getStatus().ordinal()); table.putRecord(record); return record; } private long getAddressID(Program program, Address address) { AddressMap addressMap = program.getAddressMap(); return addressMap.getKey(address, false); } @Override public void removeMatchMarkupItemRecord(long key) throws IOException { table.deleteRecord(key); } @Override public RecordIterator getRecords() throws IOException { return table.iterator(); } @Override public RecordIterator getRecords(long associationKey) throws IOException { LongField longField = new LongField(associationKey); return table.indexIterator(ASSOCIATION_KEY_COL.column(), longField, longField, true); } @Override public DBRecord getRecord(long key) throws IOException { return table.getRecord(key); } @Override void updateRecord(DBRecord record) throws IOException { table.putRecord(record); } @Override public int getRecordCount() { return table.getRecordCount(); } }
NationalSecurityAgency/ghidra
Ghidra/Features/VersionTracking/src/main/java/ghidra/feature/vt/api/db/VTMatchMarkupItemTableDBAdapterV0.java
Java
apache-2.0
5,056
package liquibase.datatype.core; import liquibase.database.Database; import liquibase.database.core.DB2Database; import liquibase.database.core.DerbyDatabase; import liquibase.database.core.FirebirdDatabase; import liquibase.database.core.HsqlDatabase; import liquibase.database.core.InformixDatabase; import liquibase.database.core.MSSQLDatabase; import liquibase.database.core.MySQLDatabase; import liquibase.database.core.OracleDatabase; import liquibase.database.core.SQLiteDatabase; import liquibase.database.core.SybaseASADatabase; import liquibase.database.core.SybaseDatabase; import liquibase.datatype.DataTypeInfo; import liquibase.datatype.DatabaseDataType; import liquibase.datatype.LiquibaseDataType; import liquibase.exception.UnexpectedLiquibaseException; import liquibase.statement.DatabaseFunction; @DataTypeInfo(name = "boolean", aliases = {"java.sql.Types.BOOLEAN", "java.lang.Boolean", "bit"}, minParameters = 0, maxParameters = 0, priority = LiquibaseDataType.PRIORITY_DEFAULT) public class BooleanType extends LiquibaseDataType { @Override public DatabaseDataType toDatabaseDataType(Database database) { if (database instanceof DB2Database || database instanceof FirebirdDatabase) { return new DatabaseDataType("SMALLINT"); } else if (database instanceof MSSQLDatabase) { return new DatabaseDataType("BIT"); } else if (database instanceof MySQLDatabase) { if (getRawDefinition().toLowerCase().startsWith("bit")) { return new DatabaseDataType("BIT", getParameters()); } return new DatabaseDataType("BIT", 1); } else if (database instanceof OracleDatabase) { return new DatabaseDataType("NUMBER", 1); } else if (database instanceof SybaseASADatabase || database instanceof SybaseDatabase) { return new DatabaseDataType("BIT"); } else if (database instanceof DerbyDatabase) { if (((DerbyDatabase) database).supportsBooleanDataType()) { return new DatabaseDataType("BOOLEAN"); } else { return new DatabaseDataType("SMALLINT"); } } else if (database instanceof HsqlDatabase) { return new DatabaseDataType("BOOLEAN"); } return super.toDatabaseDataType(database); } @Override public String objectToSql(Object value, Database database) { if (value == null || value.toString().equalsIgnoreCase("null")) { return null; } String returnValue; if (value instanceof String) { if (((String) value).equalsIgnoreCase("true") || value.equals("1") || value.equals("t") || ((String) value).equalsIgnoreCase(this.getTrueBooleanValue(database))) { returnValue = this.getTrueBooleanValue(database); } else if (((String) value).equalsIgnoreCase("false") || value.equals("0") || value.equals("f") || ((String) value).equalsIgnoreCase(this.getFalseBooleanValue(database))) { returnValue = this.getFalseBooleanValue(database); } else { throw new UnexpectedLiquibaseException("Unknown boolean value: " + value); } } else if (value instanceof Long) { if (Long.valueOf(1).equals(value)) { returnValue = this.getTrueBooleanValue(database); } else { returnValue = this.getFalseBooleanValue(database); } } else if (value instanceof Number) { if (value.equals(1)) { returnValue = this.getTrueBooleanValue(database); } else { returnValue = this.getFalseBooleanValue(database); } } else if (value instanceof DatabaseFunction) { return value.toString(); } else if (value instanceof Boolean) { if (((Boolean) value)) { returnValue = this.getTrueBooleanValue(database); } else { returnValue = this.getFalseBooleanValue(database); } } else { throw new UnexpectedLiquibaseException("Cannot convert type "+value.getClass()+" to a boolean value"); } return returnValue; } protected boolean isNumericBoolean(Database database) { if (database instanceof DerbyDatabase) { return !((DerbyDatabase) database).supportsBooleanDataType(); } return database instanceof DB2Database || database instanceof FirebirdDatabase || database instanceof MSSQLDatabase || database instanceof MySQLDatabase || database instanceof OracleDatabase || database instanceof SQLiteDatabase || database instanceof SybaseASADatabase || database instanceof SybaseDatabase; } /** * The database-specific value to use for "false" "boolean" columns. */ public String getFalseBooleanValue(Database database) { if (isNumericBoolean(database)) { return "0"; } if (database instanceof InformixDatabase) { return "'f'"; } return "FALSE"; } /** * The database-specific value to use for "true" "boolean" columns. */ public String getTrueBooleanValue(Database database) { if (isNumericBoolean(database)) { return "1"; } if (database instanceof InformixDatabase) { return "'t'"; } return "TRUE"; } //sqllite // } else if (columnTypeString.toLowerCase(Locale.ENGLISH).contains("boolean") || // columnTypeString.toLowerCase(Locale.ENGLISH).contains("binary")) { // type = new BooleanType("BOOLEAN"); }
adriens/liquibase
liquibase-core/src/main/java/liquibase/datatype/core/BooleanType.java
Java
apache-2.0
5,817
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for generating BigQuery data querying scirpts.""" from google.cloud import aiplatform as vertex_ai def _get_source_query(bq_dataset_name, bq_table_name, ml_use, limit=None): query = f""" SELECT IF(trip_month IS NULL, -1, trip_month) trip_month, IF(trip_day IS NULL, -1, trip_day) trip_day, IF(trip_day_of_week IS NULL, -1, trip_day_of_week) trip_day_of_week, IF(trip_hour IS NULL, -1, trip_hour) trip_hour, IF(trip_seconds IS NULL, -1, trip_seconds) trip_seconds, IF(trip_miles IS NULL, -1, trip_miles) trip_miles, IF(payment_type IS NULL, 'NA', payment_type) payment_type, IF(pickup_grid IS NULL, 'NA', pickup_grid) pickup_grid, IF(dropoff_grid IS NULL, 'NA', dropoff_grid) dropoff_grid, IF(euclidean IS NULL, -1, euclidean) euclidean, IF(loc_cross IS NULL, 'NA', loc_cross) loc_cross""" if ml_use: query += f""", tip_bin FROM {bq_dataset_name}.{bq_table_name} WHERE ML_use = '{ml_use}' """ else: query += f""" FROM {bq_dataset_name}.{bq_table_name} """ if limit: query += f"LIMIT {limit}" return query def get_training_source_query( project, region, dataset_display_name, ml_use, limit=None ): vertex_ai.init(project=project, location=region) dataset = vertex_ai.TabularDataset.list( filter=f"display_name={dataset_display_name}", order_by="update_time" )[-1] bq_source_uri = dataset.gca_resource.metadata["inputConfig"]["bigquerySource"][ "uri" ] _, bq_dataset_name, bq_table_name = bq_source_uri.replace("g://", "").split(".") return _get_source_query(bq_dataset_name, bq_table_name, ml_use, limit) def get_serving_source_query(bq_dataset_name, bq_table_name, limit=None): return _get_source_query(bq_dataset_name, bq_table_name, ml_use=None, limit=limit)
GoogleCloudPlatform/mlops-with-vertex-ai
src/common/datasource_utils.py
Python
apache-2.0
2,483
<?php if(isset($_SESSION["bird"])) { $bird_info = $_SESSION["bird"]; $observers_list = $db->get_observers($bird_info["id_kentish_plover"]); $observers_array = array(); while($observers = $observers_list->fetch()) { array_push($observers_array, array( "date" => $observers["date"], "town" => $observers["town"], "name" => $observers["last_name"] . ' ' . $observers["first_name"] )); } $to_replace = array("\"", "'"); $replace_by = array("£dqot;", "£sqot;"); $bird_info_json = str_replace($to_replace, $replace_by, json_encode($bird_info)); $observers_list_json = str_replace($to_replace, $replace_by, json_encode($observers_array)); ?> <h2>Résultat de la requête</h2> <form method="post" action="core/pdf_creator.php"> <input type="hidden" name="bird_infos" value='<?php echo $bird_info_json ?>'> <input type="hidden" name="observers_list" value='<?php echo $observers_list_json ?>'> <button type="submit" class="btn btn-warning">Obtenir une version PDF</button> </form> <div class="row"> <div class="col-sm-4 padding-content"> <img src="statics/pictures/gonm_logo.jpg" class="img-responsive"> </div> <div class="col-sm-4"> <h2> Historique des observations d'un Gravelot à Collier interrompu bagué couleur <i>Charadrius alexandrinus</i> </h2> </div> <div class="col-sm-4 padding-content"> <img src="statics/pictures/plover_2.jpg" class="img-responsive"> </div> </div> <div class="row padding-content"> <div class="col-sm-5"> <div class="col-sm-12"> <table class="table"> <tbody> <tr> <td class="strong">Bague acier</td> <td><?php echo $bird_info["metal_ring"]; ?></td> </tr> <tr> <td class="strong">Numéro de la bague</td> <td><?php echo $bird_info["number"]; ?></td> </tr> <tr> <td class="strong">Couleur de la bague</td> <td><?php echo $bird_info["color"]; ?></td> </tr> <tr> <td class="strong">Date du baguage</td> <td><?php echo $bird_info["date"]; ?></td> </tr> <tr> <td class="strong">Age</td> <td><?php echo $bird_info["age"]; ?></td> </tr> <tr> <td class="strong">Sexe</td> <td><?php echo $bird_info["sex"]; ?></td> </tr> <tr> <td class="strong">Lieu de baguage</td> <td><?php echo $bird_info["town"]; ?></td> </tr> <tr> <td class="strong">Bagueur</td> <td><?php echo $bird_info["observer"]; ?></td> </tr> </tbody> </table> </div> <div class="col-sm-12"> <img src="statics/pictures/mnhn.jpg" class="img-responsive"> </div> <div class="col-sm-12"> <img src="statics/pictures/logo_warning.jpg" class="img-responsive"> </div> </div> <div class="col-sm-7"> <table class="table table-striped"> <thead> <tr> <th>Date</th> <th>Lieu d'observation</th> <th>Observateur</th> </tr> </thead> <tbody> <?php $observers_list = $db->get_observers($bird_info["id_kentish_plover"]); while($observers = $observers_list->fetch()) { ?> <tr> <td> <?php $mysql_date = strtotime($observers["date"]); echo date('d-m-Y', $mysql_date); ?> </td> <td><?php echo $observers["town"]; ?></td> <td><?php echo $observers["last_name"] . " " . $observers["first_name"]; ?></td> </tr> <?php } ?> </tbody> </table> </div> </div> <?php unset($_SESSION["bird"]); } else { header("Location: index.php?url=form"); } ?>
Carmain/Banding-tracking
content/obs_sheet.php
PHP
apache-2.0
3,621
"""Insteon base entity.""" import functools import logging from pyinsteon import devices from homeassistant.core import callback from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, ) from homeassistant.helpers.entity import DeviceInfo, Entity from .const import ( DOMAIN, SIGNAL_ADD_DEFAULT_LINKS, SIGNAL_LOAD_ALDB, SIGNAL_PRINT_ALDB, SIGNAL_REMOVE_ENTITY, SIGNAL_SAVE_DEVICES, STATE_NAME_LABEL_MAP, ) from .utils import print_aldb_to_log _LOGGER = logging.getLogger(__name__) class InsteonEntity(Entity): """INSTEON abstract base entity.""" def __init__(self, device, group): """Initialize the INSTEON binary sensor.""" self._insteon_device_group = device.groups[group] self._insteon_device = device def __hash__(self): """Return the hash of the Insteon Entity.""" return hash(self._insteon_device) @property def should_poll(self): """No polling needed.""" return False @property def address(self): """Return the address of the node.""" return str(self._insteon_device.address) @property def group(self): """Return the INSTEON group that the entity responds to.""" return self._insteon_device_group.group @property def unique_id(self) -> str: """Return a unique ID.""" if self._insteon_device_group.group == 0x01: uid = self._insteon_device.id else: uid = f"{self._insteon_device.id}_{self._insteon_device_group.group}" return uid @property def name(self): """Return the name of the node (used for Entity_ID).""" # Set a base description if (description := self._insteon_device.description) is None: description = "Unknown Device" # Get an extension label if there is one extension = self._get_label() if extension: extension = f" {extension}" return f"{description} {self._insteon_device.address}{extension}" @property def extra_state_attributes(self): """Provide attributes for display on device card.""" return {"insteon_address": self.address, "insteon_group": self.group} @property def device_info(self) -> DeviceInfo: """Return device information.""" return DeviceInfo( identifiers={(DOMAIN, str(self._insteon_device.address))}, manufacturer="Smart Home", model=f"{self._insteon_device.model} ({self._insteon_device.cat!r}, 0x{self._insteon_device.subcat:02x})", name=f"{self._insteon_device.description} {self._insteon_device.address}", sw_version=f"{self._insteon_device.firmware:02x} Engine Version: {self._insteon_device.engine_version}", via_device=(DOMAIN, str(devices.modem.address)), ) @callback def async_entity_update(self, name, address, value, group): """Receive notification from transport that new data exists.""" _LOGGER.debug( "Received update for device %s group %d value %s", address, group, value, ) self.async_write_ha_state() async def async_added_to_hass(self): """Register INSTEON update events.""" _LOGGER.debug( "Tracking updates for device %s group %d name %s", self.address, self.group, self._insteon_device_group.name, ) self._insteon_device_group.subscribe(self.async_entity_update) load_signal = f"{self.entity_id}_{SIGNAL_LOAD_ALDB}" self.async_on_remove( async_dispatcher_connect(self.hass, load_signal, self._async_read_aldb) ) print_signal = f"{self.entity_id}_{SIGNAL_PRINT_ALDB}" async_dispatcher_connect(self.hass, print_signal, self._print_aldb) default_links_signal = f"{self.entity_id}_{SIGNAL_ADD_DEFAULT_LINKS}" async_dispatcher_connect( self.hass, default_links_signal, self._async_add_default_links ) remove_signal = f"{self._insteon_device.address.id}_{SIGNAL_REMOVE_ENTITY}" self.async_on_remove( async_dispatcher_connect( self.hass, remove_signal, functools.partial(self.async_remove, force_remove=True), ) ) async def async_will_remove_from_hass(self): """Unsubscribe to INSTEON update events.""" _LOGGER.debug( "Remove tracking updates for device %s group %d name %s", self.address, self.group, self._insteon_device_group.name, ) self._insteon_device_group.unsubscribe(self.async_entity_update) async def _async_read_aldb(self, reload): """Call device load process and print to log.""" await self._insteon_device.aldb.async_load(refresh=reload) self._print_aldb() async_dispatcher_send(self.hass, SIGNAL_SAVE_DEVICES) def _print_aldb(self): """Print the device ALDB to the log file.""" print_aldb_to_log(self._insteon_device.aldb) def _get_label(self): """Get the device label for grouped devices.""" label = "" if len(self._insteon_device.groups) > 1: if self._insteon_device_group.name in STATE_NAME_LABEL_MAP: label = STATE_NAME_LABEL_MAP[self._insteon_device_group.name] else: label = f"Group {self.group:d}" return label async def _async_add_default_links(self): """Add default links between the device and the modem.""" await self._insteon_device.async_add_default_links()
aronsky/home-assistant
homeassistant/components/insteon/insteon_entity.py
Python
apache-2.0
5,749
package com.monkeyk.os.service.dto; import com.monkeyk.os.domain.oauth.ClientDetails; import java.io.Serializable; import java.util.ArrayList; import java.util.List; /** * 2016/6/8 * * @author Shengzhao Li */ public class ClientDetailsListDto implements Serializable { private static final long serialVersionUID = -6327364441565670231L; private String clientId; private List<ClientDetailsDto> clientDetailsDtos = new ArrayList<>(); public ClientDetailsListDto() { } public ClientDetailsListDto(String clientId, List<ClientDetails> list) { this.clientId = clientId; this.clientDetailsDtos = ClientDetailsDto.toDtos(list); } public int getSize() { return clientDetailsDtos.size(); } public String getClientId() { return clientId; } public void setClientId(String clientId) { this.clientId = clientId; } public List<ClientDetailsDto> getClientDetailsDtos() { return clientDetailsDtos; } public void setClientDetailsDtos(List<ClientDetailsDto> clientDetailsDtos) { this.clientDetailsDtos = clientDetailsDtos; } }
monkeyk/oauth2-shiro
authz/src/main/java/com/monkeyk/os/service/dto/ClientDetailsListDto.java
Java
apache-2.0
1,151
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ package org.nd4j.api.loader; import java.io.Serializable; /** * A factory interface for getting {@link Source} objects given a String path * @author Alex Black */ public interface SourceFactory extends Serializable { Source getSource(String path); }
RobAltena/deeplearning4j
nd4j/nd4j-common/src/main/java/org/nd4j/api/loader/SourceFactory.java
Java
apache-2.0
1,022
package ch.sourcepond.utils.fileobserver.impl; import static org.mockito.Mockito.mock; import ch.sourcepond.utils.fileobserver.impl.dispatcher.DefaultEventDispatcher; import ch.sourcepond.utils.fileobserver.impl.replay.DefaultEventReplayFactory; /** * @author rolandhauser * */ public class DefaultWorkspaceTest { private final WorkspaceDirectory dir = mock(WorkspaceDirectory.class); private final DefaultEventReplayFactory evenrplFactory = mock(DefaultEventReplayFactory.class); private final DefaultEventDispatcher dispatcher = mock(DefaultEventDispatcher.class); }
SourcePond/fileobserver-impl
src/test/java/ch/sourcepond/utils/fileobserver/impl/DefaultWorkspaceTest.java
Java
apache-2.0
578
//MongoDB script to Update the Wing Directors list. //The list includes Wing Directors and Assistants only. // //History: // 15Nov21 MEG Clean spaces from email addresses. // 07Mar21 MEG Exclude assistants // 27Jan21 MEG Created. var DEBUG = false; var db = db.getSiblingDB( 'NHWG'); // Google Group of interest var baseGroupName = 'nh-wing-directors'; var googleGroup = baseGroupName + '@nhwg.cap.gov'; // Mongo collection that holds all wing groups var groupsCollection = 'GoogleGroups'; // Aggregation pipeline find all wing staff members as var memberPipeline = [ // Stage 1 - find ALL directors and assistants { $match: { Duty:/director/i, Asst: 0, } }, // Stage 2 { $lookup: // join Google account record { from: "Google", localField: "CAPID", foreignField: "customSchemas.Member.CAPID", as: "member" } }, // Stage 3 { // flatten array $unwind: { path : "$member", preserveNullAndEmptyArrays : false // optional } }, // Stage 4 { $project: { CAPID:1, Duty:1, Asst:1, Director: "$member.name.fullName", Email: "$member.primaryEmail", } }, ]; // Aggregate a list of all emails for the Google group of interest // Exlcuding MANAGER & OWNER roles, no group aristocrats var groupMemberPipeline = [ { "$match" : { "group" : googleGroup, "role" : 'MEMBER', } }, { "$project" : { "email" : "$email" } } ]; // pipeline options var options = { "allowDiskUse" : false }; function isActiveMember( capid ) { // Check to see if member is active. // This function needs to be changed for each group depending // on what constitutes "active". var m = db.getCollection( "Member").findOne( { "CAPID": capid, "MbrStatus": "ACTIVE" } ); if ( m == null ) { return false; } return true; } function isGroupMember( group, email ) { // Check if email is already in the group var email = email.toLowerCase(); var rx = new RegExp( email, 'i' ); return db.getCollection( groupsCollection ).findOne( { 'group': group, 'email': rx } ); } function addMembers( collection, pipeline, options, group ) { // Scans looking for active members // if member is not currently on the mailing list generate gam command to add member. // returns a list of members qualified to be on the list regardless of inclusion. var list = []; // the set of possible group members // Get the list of all qualified potential members for the list var cursor = db.getCollection( collection ).aggregate( pipeline, options ); while ( cursor.hasNext() ) { var m = cursor.next(); var email = m.Email.toLowerCase().replace( / /g, "" ); if ( ! isActiveMember( m.CAPID ) ) { continue; } if ( ! list.includes( email ) ) { list.push( email ); } if ( isGroupMember( googleGroup, m.Email ) ) { continue; } // Print gam command to add new member print("gam update group", googleGroup, "add member", email ); } return list; } function removeMembers( collection, pipeline, options, group, authMembers ) { // compare each member of the group against the authList // check active status, if not generate a gam command to remove member. // collection - name of collection holding all Google Group info // pipeline - array containing the pipeline to extract members of the target group // options - options for aggregations pipeline var m = db.getCollection( collection ).aggregate( pipeline, options ); while ( m.hasNext() ) { var e = m.next().email.toLowerCase().replace( / /g, "" ); DEBUG && print("DEBUG::removeMembers::email",e); var rgx = new RegExp( e, "i" ); if ( authMembers.includes( e ) ) { continue; } var r = db.getCollection( 'MbrContact' ).findOne( { Type: 'EMAIL', Priority: 'PRIMARY', Contact: rgx } ); if ( r ) { var a = db.getCollection( 'Member' ).findOne( { CAPID: r.CAPID } ); DEBUG && print("DEBUG::removeMembers::Member.CAPID",a.CAPID,"NameLast:",a.NameLast,"NameFirst:",a.NameFirst); if ( a ) { print( '#INFO:', a.CAPID, a.NameLast, a.NameFirst, a.NameSuffix ); print( 'gam update group', googleGroup, 'delete member', e ); } } } } // Main here print("# Update group:", googleGroup ); print("# Add new members"); var theAuthList = addMembers( "DutyPosition", memberPipeline, options, googleGroup ); DEBUG == true && print("DEBUG::theAuthList:", theAuthList); print( "# Remove inactive members") ; removeMembers( "GoogleGroups", groupMemberPipeline, options, googleGroup, theAuthList );
ifrguy/NHWG-MIMS
src/Groups/wing_directors.js
JavaScript
apache-2.0
4,806
package ec.master.assignment1.selection.impl; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Random; import ec.master.assignment1.model.Individual; import ec.master.assignment1.selection.Selector; /** * @ClassName: FPSelection * @Description: implementation of fitness proportional selection * @date 17/08/2015 11:15:33 pm * */ public class FPSelection implements Selector { /** * The method is do fitness proportional selection * @return selected list */ public List<Individual> doSelection(List<Individual> individuals, int groupSize, int resultSize) { Collections.shuffle(individuals); ArrayList<Individual> selectedIndividuals = new ArrayList<Individual>(resultSize); double sum = 0; //sum all the fitness of each individual for(int i=0;i<individuals.size();i++){ sum += individuals.get(i).getFitness(); } Random random = new Random(); double compare; //select populationSize individuals for (int p = 0; p < resultSize; p++) { compare = random.nextDouble()*sum; for(int i=0;i<individuals.size();i++){ compare -= individuals.get(i).getFitness(); //choose individual when compared value is >former fitness && <later fitness if(compare<=0){ selectedIndividuals.add(individuals.get(i)); break; } } } return selectedIndividuals; } }
nettree/EC
EC_Assignment1/src/ec/master/assignment1/selection/impl/FPSelection.java
Java
apache-2.0
1,373
/* * Copyright 2017 Nobuki HIRAMINE * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hiramine.modelfileloader; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.util.StringTokenizer; import android.util.Log; public class StlFileLoader { public static MFLModel load( String strPath, OnProgressListener onProgressListener ) { File file = new File( strPath ); if( 0 == file.length() ) { return null; } // ファーストパース(要素数カウント) int[] aiCountTriangle = new int[1]; int[] aiCountLine = new int[1]; if( !parse_first( strPath, aiCountTriangle, aiCountLine ) ) { return null; } // 領域確保 if( 0 == aiCountTriangle[0] ) { return null; } float[] af3Vertex = new float[aiCountTriangle[0] * 3 * 3]; // af3Vertexは、3つのデータで一つの頂点、さらにそれが3つで一つの三角形 // セカンドパース(値詰め) if( !parse_second( strPath, af3Vertex, aiCountLine[0], onProgressListener ) ) { return null; } // 領域確保、データ構築 MFLModel model = new MFLModel(); model.iCountVertex = af3Vertex.length / 3; model.af3Vertex = af3Vertex; model.iCountTriangle = model.iCountVertex / 3; model.aIndexedTriangle = new MFLIndexedTriangle[model.iCountTriangle]; for( int iIndexTriangle = 0; iIndexTriangle < model.iCountTriangle; ++iIndexTriangle ) { model.aIndexedTriangle[iIndexTriangle] = new MFLIndexedTriangle(); model.aIndexedTriangle[iIndexTriangle].i3IndexVertex[0] = (short)( iIndexTriangle * 3 + 0 ); model.aIndexedTriangle[iIndexTriangle].i3IndexVertex[1] = (short)( iIndexTriangle * 3 + 1 ); model.aIndexedTriangle[iIndexTriangle].i3IndexVertex[2] = (short)( iIndexTriangle * 3 + 2 ); } model.iCountNormal = 0; model.af3Normal = null; model.aMaterial = null; model.groupRoot = new MFLGroup( "" ); model.groupRoot.iCountTriangle = model.iCountTriangle; model.groupRoot.aiIndexTriangle = new int[model.groupRoot.iCountTriangle]; for( int iIndexTriangle = 0; iIndexTriangle < model.groupRoot.iCountTriangle; ++iIndexTriangle ) { model.groupRoot.aiIndexTriangle[iIndexTriangle] = iIndexTriangle; } return model; } private static boolean parse_first( String strPath, int[] aiCountTriangle, int[] aiCountLine ) { // インプットのチェック if( null == aiCountTriangle || null == aiCountLine ) { return false; } // アウトプットの初期化 aiCountTriangle[0] = 0; aiCountLine[0] = 0; try { // 読み取り BufferedReader br = new BufferedReader( new FileReader( strPath ) ); int iIndexTriangle = 0; int iIndexLine = 0; while( true ) { ++iIndexLine; String strReadString = br.readLine(); if( null == strReadString ) { break; } StringTokenizer stReadString = new StringTokenizer( strReadString, ", \t\r\n" ); if( !stReadString.hasMoreTokens() ) { continue; } String token = stReadString.nextToken(); if( token.equalsIgnoreCase( "endfacet" ) ) { ++iIndexTriangle; continue; } } br.close(); aiCountTriangle[0] = iIndexTriangle; aiCountLine[0] = iIndexLine; return true; } catch( Exception e ) { Log.e( "StlFileLoader", "parse_first error : " + e ); return false; } } private static boolean parse_second( String strPath, float[] af3Vertex, int iCountLine, OnProgressListener onProgressListener ) { // インプットのチェック if( null == af3Vertex ) { return false; } try { // 読み取り BufferedReader br = new BufferedReader( new FileReader( strPath ) ); int iIndexTriangle = 0; int iIndexLine = 0; int iIndex3 = 0; while( true ) { if( null != onProgressListener && 0 == iIndexLine % 100 ) { if( !onProgressListener.updateProgress( iIndexLine, iCountLine ) ) { // ユーザー操作による処理中止 Log.d( "LoaderStlFile", "Cancelled" ); return false; } } ++iIndexLine; String strReadString = br.readLine(); if( null == strReadString ) { break; } StringTokenizer stReadString = new StringTokenizer( strReadString, ", \t\r\n" ); if( !stReadString.hasMoreTokens() ) { continue; } String token = stReadString.nextToken(); if( token.equalsIgnoreCase( "vertex" ) ) { if( 3 <= iIndex3 ) { continue; } af3Vertex[iIndexTriangle * 9 + iIndex3 * 3 + 0] = Float.valueOf( stReadString.nextToken() ); af3Vertex[iIndexTriangle * 9 + iIndex3 * 3 + 1] = Float.valueOf( stReadString.nextToken() ); af3Vertex[iIndexTriangle * 9 + iIndex3 * 3 + 2] = Float.valueOf( stReadString.nextToken() ); ++iIndex3; continue; } else if( token.equalsIgnoreCase( "facet" ) ) { // 面法線ベクトル iIndex3 = 0; continue; } else if( token.equalsIgnoreCase( "endfacet" ) ) { ++iIndexTriangle; continue; } else if( token.equalsIgnoreCase( "solid" ) ) { // ソリッド名 continue; } } br.close(); return true; } catch( Exception e ) { Log.e( "StlFileLoader", "parse_second error : " + e ); return false; } } }
nobukihiramine/ModelViewerTutorial
app/src/main/java/com/hiramine/modelfileloader/StlFileLoader.java
Java
apache-2.0
6,007