forked from archiver-appliance/epicsarchiverap
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request archiver-appliance#285 from jacomago/default-confi…
…g-files Add default config to the default build
- Loading branch information
Showing
3 changed files
with
296 additions
and
0 deletions.
There are no files selected for viewing
150 changes: 150 additions & 0 deletions
150
src/sitespecific/default/classpathfiles/archappl.properties
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,150 @@ | ||
# Specify arbitrary name value pairs here; names and values are strings... | ||
|
||
# The PlainPB plugin replaces the characters specified in this list with the "/" character to generate path names. | ||
# This has the effect of distributing the files containing data into multiple folders | ||
# Getting a good distribution is important for good performance; most file systems do not perform well if they have 10000's of files in a folder. | ||
# This is passed into java's String.replaceAll; so watch out for regex issues. | ||
org.epics.archiverappliance.config.ConvertPVNameToKey.siteNameSpaceSeparators = [\\:\\-] | ||
# To avoid substring issues, each pathname is terminated with a character that is guaranteed not to be in the path name (because of said replacement above) | ||
org.epics.archiverappliance.config.ConvertPVNameToKey.siteNameSpaceTerminator = : | ||
|
||
# We enforce a site specific minimum sampling period (read maximum rate) using this value | ||
org.epics.archiverappliance.mgmt.bpl.ArchivePVAction.minimumSamplingPeriod = 0.1 | ||
|
||
# We have a system wide buffer size (specified in seconds) for buffering the data in engine memory | ||
# This is a compromise between various factors including garbage collection, IOPS of the short term store and memory availability. | ||
org.epics.archiverappliance.config.PVTypeInfo.secondsToBuffer = 10 | ||
|
||
# Per FRIB/PSI, we have a configuration knob to increase/decrease the sample buffer size used by the engine for all PV's. | ||
# This is a double - by default 1.0 which means we leave the buffer size computation as is. | ||
# If you want to increase buffer size globally to 150% of what is normally computed, set this to 1.5 | ||
org.epics.archiverappliance.config.PVTypeInfo.sampleBufferCapacityAdjustment = 1.0 | ||
|
||
|
||
# The getData retrieval calls support a usereduced parameter which is the entry into sparsification. | ||
# It is possible for sites to configure their default sparsification post processor using this parameter. | ||
# For example, at SLAC we will probably use divide the request into two parts, data less than two weeks old is raw data while data older than two weeks old is sparsified using FirstSamplePP | ||
org.epics.archiverappliance.retrieval.DefaultUseReducedPostProcessor=org.epics.archiverappliance.retrieval.postprocessors.TwoWeekRaw | ||
|
||
|
||
# If a PV used to be archived by the appliance but has since been retired, data does not get returned from the request even though data might exist in the | ||
# datastores. Setting this option to 'true' means that if a request is received for a PV that is not currently being archived (i.e. the PVTypeInfo is null), | ||
# it will automatically then run a search in the datastores to see if there is any data available for this PV within the timeframe and return it. | ||
# org.epics.archiverappliance.retrieval.SearchStoreForRetiredPvs=true | ||
|
||
|
||
# This propery has been deprecated as it can easily lead to data loss. | ||
# The maximum number of datastores/stages/lifetimeids in this installation. | ||
# Specifically, this returns the maximum length of the datastores element across all PVTypeInfo's in this installation. | ||
# For example, in an typical installation with a short term store, a medium term store and a long term store, this should return 3. | ||
# Various optimizations are based on the maximum number of stages data goes thru in the archiver appliance. | ||
# We create one ETL thread per lifetime transition | ||
# org.epics.archiverappliance.config.PVTypeInfo.maximumNumberOfLifetimesInInstallation=5 | ||
|
||
|
||
# Use this property to control whether you want to use CAJ or the JNI implementation in JCA. | ||
org.epics.archiverappliance.engine.epics.JCAConfigGen.useCAJ=true | ||
# This controls the dispatcher used to dispatch ChannelAccess events. | ||
org.epics.archiverappliance.engine.epics.JCAConfigGen.dispatcher=org.epics.archiverappliance.engine.epics.JCAEventDispatcherBasedOnPVName | ||
#org.epics.archiverappliance.engine.epics.JCAConfigGen.dispatcher=gov.aps.jca.event.QueuedEventDispatcher | ||
# For faster reconnect times, we may want to use more than one JCAContext/CAJContext. This controls the number of JCACommandThreads and thus the number of JCAContext/CAJContext. | ||
# Each JCACommandThread launches aprox 4 threads in all in CAJ - one CAJ search thread (UDP); a couple of TCP threads and the JCACommand thread that controls them. | ||
# Routing all PVs thru fewer contexts seems to result in larger reconnect times. | ||
org.epics.archiverappliance.engine.epics.commandThreadCount=10 | ||
|
||
# Maximum amount of clock drift ( in seconds ) between appliance and IOC. | ||
# Samples more than this many seconds in the future are discarded for data quality reasons. | ||
# Samples from the second sample onwards (after a PV connect) more than this seconds in the past are discarded for data quality reasons. | ||
# org.epics.archiverappliance.engine.epics.server_ioc_drift_seconds=1800 | ||
|
||
# The SCAN sampling method establishes camonitors and skips samples that are less than the sampling method. | ||
# However, the IOC itself could have some jitter and this will cause the SCAN'ed PV to miss samples that arrive a few milliseconds early. | ||
# Use this to control how much jitter you want to accommodate. | ||
# This is a multiplier; so if your sampling method if 1.0 seconds and the jitter factor is 0.95, then the final sampling method is 950 milliseconds. | ||
# Alternatively, to establish a jitter of 5%, use 0.95; to establish a jitter of 10%, use 0.9 etc... | ||
org.epics.archiverappliance.engine.epics.scanJitterFactor=0.95 | ||
|
||
# We use a ScheduledThreadPoolExecutor for implementing SCAN PV's. | ||
# If you have a lot of PV's under SCAN and some of these take time; it is possible to miss some SCAN samples because we just don't get to the PV in time. | ||
# In this case, you can increase the number of SCAN threads used; the need for this is probably pretty rare | ||
org.epics.archiverappliance.engine.epics.scanThreadCount=1 | ||
|
||
|
||
# How should ETL handle out of space situations. | ||
# See the javadoc of org.epics.archiverappliance.etl.common.OutOfSpaceHandling for some options | ||
org.epics.archiverappliance.etl.common.OutOfSpaceHandling=DELETE_SRC_STREAMS_IF_FIRST_DEST_WHEN_OUT_OF_SPACE | ||
|
||
|
||
# A list of fields for PVs that are monitored and maintained in the engine. | ||
# These are used when displaying the PV in visualization tools like the ArchiveViewer as additional information for the PV. | ||
# Some of these could be archived along with the PV but need not be. | ||
# In this case, the engine simply maintains the latest copy in memory and this is served up when data from the engine in included in the stream. | ||
# This is a comma separated list of fields | ||
# For example, if you have an fields for the owner of a PV, you could add that here. | ||
# We add the DESC in addition to the typical limits. | ||
org.epics.archiverappliance.config.RuntimeKeys=DESC | ||
|
||
# On CA disconnects, occasionally, we do not reconnect back to the PV in time. | ||
# This timeout governs the delay between the disconnect and when we do a pause/resume to convert reconnects into ca searches. | ||
# If you want to turn off this functionality, simply set this value to 0 | ||
org.epics.archiverappliance.engine.util.EngineContext.disconnectCheckTimeoutInMinutes = 0 | ||
|
||
# Configure how fast the engine starts up PVs | ||
# To prevent broadcast storms, we pause for pausePerGroup seconds for every pausePerGroup PVs | ||
# org.epics.archiverappliance.engine.archivePVSonStartup.pausePerGroupPVCount = 2000 | ||
# org.epics.archiverappliance.engine.archivePVSonStartup.pausePerGroupPauseTimeInSeconds = 2 | ||
|
||
# This is a more complex one. | ||
# Ideally, we'll look in the datastores for the last known sample and use that as a boundary condition for future samples. | ||
# But this can be expensive in systems with large numbers of PV's and can delay startup of PV's significantly. | ||
# In these case, we sacrifice the samples of PV's with ancient timestamps and only record samples from "now on". | ||
# But this may be a more suitable tradeoff for such installations. | ||
# If you set this to false, the boundary condition is the server's current timestamp when the PV is being started. | ||
org.epics.archiverappliance.engine.archivePVSonStartup.determineLastKnownEventFromStores = false | ||
|
||
# One can define a set of named flags (booleans) that can be used to control various processes in the system | ||
# For example, you can control the ETL process in a PlainPBStoragePlugin using a named flag to accomplish a gated ETL. | ||
# Named flags are not persistent; each time the server starts up, all the named flags are set to false | ||
# If a named flag is not defined, it defaults to false. | ||
# You can optionally load values for named flags by specifying the full path to a java properties file here. | ||
# org.epics.archiverappliance.config.NamedFlags.readFromFile=/nfs/fullpathto/namedFlags.properties | ||
|
||
# We pick up named client configuration JSON files for the archive viewer from here. | ||
# To turn off this features, simply comment this property. | ||
#org.epics.archiverappliance.retrieval.bpl.GetClientConfiguration.DocumentRoot=/nfs/slac/g/lcls/tools/ArchiveBrowser/config | ||
|
||
# We impose a throttle on archive requests that are pending. This is an attempt to conserve resources on the engine and also to control CA search broadcast storms | ||
# What this means is that if you have more that this many invalid PV's (PVs that will never connect) in the archive workflow; the ones later in the queue will never get fulfilled unless the archive request queue is cleaned up. | ||
# See abortNeverConnectedPV for a example of how to do this. | ||
# Use this property to increase this batch size. | ||
# Note that there is a limit on how high this can go. We sample the PV for a minute to determine storage rates etc; if this is set very high (> 10000), there may not be enough time to perform the sampling for the archive workflow. | ||
# org.epics.archiverappliance.mgmt.MgmtRuntimeState.archivePVWorkflowBatchSize = 1000 | ||
|
||
# For larger archivePVWorkflowBatchSize, you may need a longer time between the workflow state machine ticks. | ||
# Set this to 60 seconds or more if you are using archivePVWorkflowBatchSize's of 5000 or greater. | ||
# The workflow may take a little longer per PV but overall may be much faster. | ||
# org.epics.archiverappliance.mgmt.MgmtRuntimeState.archivePVWorkflowTickSeconds = 10 | ||
|
||
# Abort PV's in the archive PV workflow after this many minutes if the archiver is not able to connect to the PV. | ||
# The workflow can take a few minutes; so this should be set to a reasonable value (for example, 1 minute would mean that no PV would complete the workflow) | ||
# By default, this is set to a day. So, if the archiver cannot connect to the PV in a day, it will give up and abort. | ||
# To turn this off, set this to -1. | ||
# org.epics.archiverappliance.mgmt.MgmtRuntimeState.abortArchiveRequestInMins = 1440 | ||
|
||
# The initial delay after startup before starting processing of archiving requests in seconds | ||
# As we now wait for the entire cluster to load up before starting the archive workflows, set this value if you need to delay the start of archiving workflows for other reasons. | ||
# org.epics.archiverappliance.mgmt.MgmtRuntimeState.initialDelayBeforeStartingArchiveRequests = 10 | ||
|
||
# See org.epics.archiverappliance.engine.pv.EngineContext for these two entries | ||
# org.epics.archiverappliance.engine.maximum_disconnected_channel_percentage_before_starting_metachannels = 5.0 | ||
# org.epics.archiverappliance.engine.metachannels_to_start_at_a_time = 10000 | ||
|
||
# Choose whether to use pvAccess or Channel Access by default | ||
# Options are CA for Channel Access or PVA for pvAccess | ||
# Default is CA | ||
# org.epics.archiverappliance.mgmt.bpl.ArchivePVAction.defaultAccessProtocol = PVA | ||
|
||
# For single PV requests, use a HTTP redirect to the appliance containg the PV instead of proxying | ||
# This may be faster in some situations. | ||
# However, this may not work in cases where the appliancs are accessed behind a load balancer. | ||
org.epics.archiverappliance.retrieval.DataRetrievalServlet.proxyRetrievalRequest=false |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
<Configuration> | ||
<Appenders> | ||
<Console name="STDOUT" target="SYSTEM_OUT"> | ||
<PatternLayout pattern="%d %-5p [%t] %C{2} (%F:%L) - %m%n"/> | ||
</Console> | ||
</Appenders> | ||
<Loggers> | ||
<Logger name="org.apache.log4j.xml" level="info"/> | ||
<Root level="info"> | ||
<AppenderRef ref="STDOUT"/> | ||
</Root> | ||
</Loggers> | ||
</Configuration> |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,133 @@ | ||
#!/usr/bin/python | ||
|
||
# policies.py | ||
# | ||
# Author: M. Shankar, Jan 31, 2012 | ||
# Modification History | ||
# Jan 31, 2012, Shankar: Initial version of policies.py with comments. | ||
# May 14, 2012, Li, Shankar: Added support for archiving extra fields into policy file. | ||
# | ||
# This is the policies.py used to enforce policies for archiving PVs | ||
# At a very high level, when users request PVs to be archived, the mgmt web app samples the PV to determine event rate and other parameters. | ||
# In addition, various fields of the PV like .NAME, .ADEL, .MDEL, .RTYP etc are also obtained | ||
# These are passed to this python script as a dictionary argument to a method called determinePolicy | ||
# The variable name in the python environment for this information is 'pvInfo' (so use other variable names etc.). | ||
# The method is expected to use this information to make decisions on various archiving parameters. | ||
# The result is expected to be another dictionary that is placed into the variable called "pvPolicy". | ||
# Optionally, fields in addition to the VAL field that are to be archived with the PV are passed in as a property of pvPolicy called 'archiveFields' | ||
# If the user overrides the policy, this is communicated in the pvinfo as a property called 'policyName' | ||
# | ||
# In addition, this script must communicate the list of available policies to the JVM as another method called getPolicyList which takes no arguments. | ||
# The results of this method is placed into a variable called called 'pvPolicies'. | ||
# The dictionary is a name to description mapping - the description is used in the UI; the name is what is communicated to determinePolicy as a user override | ||
# | ||
# In addition, this script must communicate the list of fields that are to be archived as part of the stream in a method called getFieldsArchivedAsPartOfStream. | ||
# The results of this method is placed into a list variable called called 'pvStandardFields'. | ||
|
||
|
||
import sys | ||
import os | ||
|
||
# Generate a list of policy names. This is used to feed the dropdown in the UI. | ||
def getPolicyList(): | ||
pvPoliciesDict = {} | ||
pvPoliciesDict['Default'] = 'The default policy' | ||
pvPoliciesDict['BPMS'] = 'BPMS that generate more than 1GB a year' | ||
pvPoliciesDict['2HzPVs'] = 'PVs with an event rate more than 2Hz' | ||
pvPoliciesDict['3DaysMTSOnly'] = 'Store data for 3 days upto the MTS only.' | ||
return pvPoliciesDict | ||
|
||
# Define a list of fields that will be archived as part of every PV. | ||
# The data for these fields will included in the stream for the PV. | ||
# We also make an assumption that the data type for these fields is the same as that of the .VAL field | ||
def getFieldsArchivedAsPartOfStream(): | ||
return ['HIHI','HIGH','LOW','LOLO','LOPR','HOPR','DRVH','DRVL'] | ||
|
||
|
||
# We use the environment variables ARCHAPPL_SHORT_TERM_FOLDER and ARCHAPPL_MEDIUM_TERM_FOLDER to determine the location of the STS and the MTS in the appliance | ||
shorttermstore_plugin_url = 'pb://localhost?name=STS&rootFolder=${ARCHAPPL_SHORT_TERM_FOLDER}&partitionGranularity=PARTITION_HOUR&consolidateOnShutdown=true' | ||
mediumtermstore_plugin_url = 'pb://localhost?name=MTS&rootFolder=${ARCHAPPL_MEDIUM_TERM_FOLDER}&partitionGranularity=PARTITION_DAY&hold=2&gather=1' | ||
longtermstore_plugin_url = 'pb://localhost?name=LTS&rootFolder=${ARCHAPPL_LONG_TERM_FOLDER}&partitionGranularity=PARTITION_YEAR' | ||
#longtermstore_plugin_url = 'blackhole://localhost' | ||
|
||
def determinePolicy(pvInfoDict): | ||
pvPolicyDict = {} | ||
|
||
userPolicyOverride = '' | ||
if 'policyName' in pvInfoDict: | ||
userPolicyOverride = pvInfoDict['policyName'] | ||
|
||
if userPolicyOverride == '2HzPVs' or (userPolicyOverride == '' and pvInfoDict['eventRate'] > 2.0): | ||
pvPolicyDict['samplingPeriod'] = 1.0 | ||
pvPolicyDict['samplingMethod'] = 'MONITOR' | ||
pvPolicyDict['dataStores'] = [ | ||
shorttermstore_plugin_url, | ||
mediumtermstore_plugin_url, | ||
longtermstore_plugin_url | ||
] | ||
pvPolicyDict['policyName'] = '2HzPVs' | ||
elif userPolicyOverride == 'Default' or (userPolicyOverride == '' and pvInfoDict['pvName'] == 'mshankar:arch:sine'): | ||
pvPolicyDict['samplingPeriod'] = 1.0 | ||
pvPolicyDict['samplingMethod'] = 'MONITOR' | ||
pvPolicyDict['dataStores'] = [ | ||
shorttermstore_plugin_url, | ||
mediumtermstore_plugin_url, | ||
longtermstore_plugin_url + "&pp=firstSample&pp=firstSample_3600" | ||
] | ||
pvPolicyDict['policyName'] = 'Default' | ||
elif userPolicyOverride == 'BPMS' or (userPolicyOverride == '' and pvInfoDict['pvName'].startswith('BPMS') and pvInfoDict['storageRate'] > 35): | ||
# We limit BPM PVs to 1GB/year (34 bytes/sec) | ||
# We reduce the sampling rate (and hence increase the sampling period) to cater to this. | ||
pvPolicyDict['samplingPeriod'] = pvInfoDict['storageRate']/(35*pvInfoDict['eventRate']) | ||
pvPolicyDict['samplingMethod'] = 'MONITOR' | ||
pvPolicyDict['dataStores'] = [ | ||
shorttermstore_plugin_url, | ||
mediumtermstore_plugin_url, | ||
longtermstore_plugin_url | ||
] | ||
pvPolicyDict['policyName'] = 'BPMS' | ||
elif userPolicyOverride == '3DaysMTSOnly': | ||
pvPolicyDict['samplingPeriod'] = 1.0 | ||
pvPolicyDict['samplingMethod'] = 'MONITOR' | ||
pvPolicyDict['dataStores'] = [ | ||
shorttermstore_plugin_url, | ||
# We want to store 3 days worth of data in the MTS. | ||
'pb://localhost?name=MTS&rootFolder=${ARCHAPPL_MEDIUM_TERM_FOLDER}&partitionGranularity=PARTITION_DAY&hold=4&gather=1', | ||
'blackhole://localhost?name=LTS' | ||
] | ||
pvPolicyDict['policyName'] = '2HzPVs' | ||
else: | ||
pvPolicyDict['samplingPeriod'] = 1.0 | ||
pvPolicyDict['samplingMethod'] = 'MONITOR' | ||
pvPolicyDict['dataStores'] = [ | ||
shorttermstore_plugin_url, | ||
mediumtermstore_plugin_url, | ||
longtermstore_plugin_url | ||
] | ||
pvPolicyDict['policyName'] = 'Default' | ||
|
||
archiveFields=[] | ||
|
||
if "RTYP" not in pvInfoDict: | ||
pvPolicyDict["archiveFields"]=archiveFields | ||
else: | ||
pvRTYP=pvInfoDict["RTYP"] | ||
if pvRTYP=="ai": | ||
archiveFields=['HIHI','HIGH','LOW','LOLO','LOPR','HOPR'] | ||
elif pvRTYP=="ao": | ||
archiveFields=['HIHI','HIGH','LOW','LOLO','LOPR','HOPR','DRVH','DRVL'] | ||
elif pvRTYP=="calc": | ||
archiveFields=['HIHI','HIGH','LOW','LOLO','LOPR','HOPR'] | ||
elif pvRTYP=="calcout": | ||
archiveFields=['HIHI','HIGH','LOW','LOLO','LOPR','HOPR'] | ||
elif pvRTYP=="longin": | ||
archiveFields=['HIHI','HIGH','LOW','LOLO','LOPR','HOPR'] | ||
elif pvRTYP=="longout": | ||
archiveFields=['HIHI','HIGH','LOW','LOLO','LOPR','HOPR','DRVH','DRVL'] | ||
elif pvRTYP=="dfanout": | ||
archiveFields=['HIHI','HIGH','LOW','LOLO','LOPR','HOPR'] | ||
elif pvRTYP=="sub": | ||
archiveFields=['HIHI','HIGH','LOW','LOLO','LOPR','HOPR'] | ||
pvPolicyDict["archiveFields"]=archiveFields | ||
|
||
return pvPolicyDict |