-
Notifications
You must be signed in to change notification settings - Fork 0
/
.env.example
105 lines (79 loc) · 3.47 KB
/
.env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# Basic application settings:
LOD_AS_DESC=Getty Activity Stream (Example)
DATABASE=postgresql://postgres:postgres@postgres/postgres
PGDATA=/var/lib/postgresql/data
POSTGRES_PASSWORD=postgres
POSTGRES_USER=postgres
FLASK_GZIP_COMPRESSION=True
FLASK_APP=/app/flaskapp
FLASK_ENV=production
FLASK_RUN_PORT=5100
# Whether to run the flask db upgrade command on startup:
DB_UPGRADE_ON_START=true
# Workers -> somewhere between 2-4 per core
WEB_WORKERS=1
# I've seen values from 90 to 900 for deployed cloud-based services
# which is far in excess of the defaults. Using 600 as a default
WEB_TIMEOUT=600
# Can be deployed using worker/threads model (sync/gthreads) or geventlet
# threads -> 2-4, depending on type of typical load/DB load
# WORKER_CLASS=gthreads
# WEB_THREADS=2
# gevent
WORKER_CLASS=gevent
WORKER_CONNECTIONS=100
# Verbosity levels are CRITICAL, ERROR, WARNING, INFO, DEBUG
DEBUG_LEVEL=INFO
# JSON formatted logging, or false for standard?
JSON_LOGGING=false
# Also format the access logs as json? false to leave as normal
ACCESS_JSON_LOGGING=false
# Auth Token setup:
AUTHORIZATION_TOKEN=AuthToken
# HOST and Namespace (subpath) configuration:
BASE_URL=http://localhost:5100
APPLICATION_NAMESPACE=museum/collection
# RDF related configuration:
RDF_NAMESPACE=museum/collection
PROCESS_RDF=True
SPARQL_QUERY_ENDPOINT=http://fukseki:3030/ds/sparql
SPARQL_UPDATE_ENDPOINT=http://fukseki:3030/ds/update
# Use PyLD for parsing/reformatting JSON-LD?
# Set to False to use RDFLib instead.
USE_PYLD_REFORMAT=True
# Base graph functionality:
## Name of basegraph object:
#RDF_BASE_GRAPH=
## FOR TESTING ONLY! Reload basegraph if ingested (will only affect worker that handles the ingest!)
## Base graph changes should be handled by updating the resource and then reloading the service fully
TESTMODE_BASEGRAPH=False
# Versioning:
KEEP_LAST_VERSION=True
# Keep even after deletion? Returns 404 for the times it is gone.
KEEP_VERSIONS_AFTER_DELETION=False
# Do users need to authenticate to see old versions?
VERSIONING_AUTHENTICATION=True
# Memento TimeMap format default: (application/json or application/link-format)
MEMENTO_PREFERRED_FORMAT=application/link-format
# JSON output options:
JSON_SORT_KEYS=False
JSON_AS_ASCII=False
# Prefix search page size:
ITEMS_PER_PAGE=100
# Sub-addressing resolving:
SUBADDRESSING=True
# Add prefix to relative IDs in resources
# Can use GET param '?relativeid=true' to skip prefixing on request
PREFIX_RECORD_IDS=RECURSIVE
# Id prefixing will not prefix any RDF shorthand URIs (eg 'rdfs:label') in the `id`/`@id` property
# It does this by resolving the context, if present, and getting the list of prefixes to ignore
# NB this only affects prefixes used with a colon (eg 'rdf:type' will be skipped if 'rdf' is in the context but
# 'rdf/type' will be prefixed by the server's host and subpath as normal.)
# The prefixes for a given context will be cached for a number of seconds (default 12 hours as below):
CONTEXTPREFIX_TTL=43200
# RDF Context Cache
## LOD Gateway runs a cache for context documents, defaulting to 30 minutes in cache per context
RDF_CONTEXT_CACHE_EXPIRES=30
## This can be prefilled by RDF_CONTEXT_CACHE (in JSON encoding). See 'document_loader' notes in the base_graph_utils.py module for details.
# eg:
# RDF_CONTEXT_CACHE={"https://linked.art/ns/v1/linked-art.json": {"expires": null, "contextUrl": null, "documentUrl": null, "document": {"@context": {"@version": 1.1, "crm": "http://www.cidoc-crm.org/cidoc-crm/", "sci": "http://www.ics.forth.gr/isl/CRMsci/" .....