-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy path.env.example
More file actions
399 lines (343 loc) · 18.7 KB
/
.env.example
File metadata and controls
399 lines (343 loc) · 18.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
# The name of the ZFS storage pool for HAF to use
ZPOOL="haf-pool"
# The name of the dataset on $ZPOOL where HAF will store its data
# HAF won't read/write anything outside of $ZPOOL/$TOP_LEVEL_DATASET,
# so you can have, e.g., multiple HAF installations on the same
# pool by changing TOP_LEVEL_DATASET
TOP_LEVEL_DATASET="haf-datadir"
# these defaults usually don't need changing
ZPOOL_MOUNT_POINT="/${ZPOOL}"
TOP_LEVEL_DATASET_MOUNTPOINT="${ZPOOL_MOUNT_POINT}/${TOP_LEVEL_DATASET}"
# COMPOSE_PROFILES are the list of HAF services you want to control when
# you run `docker compose up` etc. It's a comma-separated list of profiles
# taken from:
# - core: the minimal HAF system of a database and hived
# - admin: useful tools for administrating HAF: pgadmin, pghero
# - apps: core HAF apps: hivemind, hafah, hafbe (balance-tracker is a subapp)
# - servers: services for routing/caching API calls: haproxy, jussi (JSON caching), varnish (REST caching)
# - monitoring: services for Prometheus, Grafana, Loki, Cadvisor , Nodeexporter, Promtail, Postresexporter, Blackboxexporter...
# - ui: deploy denser and haf block explorer UIs at /blog, /wallet, and /explorer
# COMPOSE_PROFILES="core,admin,hafah,hivemind,servers"
# COMPOSE_PROFILES="core,admin,hafah,hafbe,hivemind,servers,monitoring"
COMPOSE_PROFILES="core,admin,servers,apps"
# The registry where Hive docker images are pulled from. Normally, you
# should set this to the default, `registry.hive.blog` or Docker Hub,
# where stable images will be published. If you want to run pre-release
# images, change this to `registry.gitlab.syncad.com/hive` where both CI
# builds are automatically pushed.
# HIVE_API_NODE_REGISTRY=registry.hive.blog
# HIVE_API_NODE_REGISTRY=hiveio
# HIVE_API_NODE_REGISTRY=registry.gitlab.syncad.com/hive
# To use the same tagged version of all the Hive API node images,
# set it here. You can override the tags for individual images
# below
HIVE_API_NODE_VERSION=1.28.5
# Grab the version from https://hub.docker.com/r/mahdiyari/hafsql/tags
# The last stable version should work fine with the last stable HAF version
HAFSQL_VERSION=2.4.5
# Global settings
# override the HAF core image's version and registry image here:
# HAF_IMAGE=${HIVE_API_NODE_REGISTRY}/haf
# HAF_VERSION=${HIVE_API_NODE_VERSION}
# HAF_DATA_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}"
# HAF_LOG_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}/logs"
# HAF_WAL_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}/shared_memory/haf_wal"
# Directory configuration for shared memory and RocksDB storage
# These variables are optional - if not set, they default to subdirectories under TOP_LEVEL_DATASET_MOUNTPOINT
# If you need to massive sync HAF (i.e. you are not using a ZFS snapshot),
# then you can sync faster by temporarily using an in-memory shared_memory.bin.
# To do this, uncomment the HAF_SHM_DIRECTORY line below, and mount an appropriate
# tmpfs filesystem there.
# After the sync has finished, do `docker compose down` then move the shared_memory.bin
# file to the shared_memory directory, edit this file and comment the HAF_SHM_DIRECTORY
# out again to make it assume its default value, then `docker compose up -d` to restart HAF.
# HAF_SHM_DIRECTORY="/mnt/haf_shared_mem"
# Shared memory size for HAF PostgreSQL instance
# Increase for heavy workloads or large replay operations
# HAF_SHM_SIZE=32gb
# The docker compose project name, gets prefixed onto each container name
PROJECT_NAME=haf
# The docker network name, if you run two HAF instances on the same server,
# give them different network names to keep them isolated. Otherwise
# unimportant.
NETWORK_NAME=haf
# List of arguments for the HAF service
# ARGUMENTS=""
ARGUMENTS="--replay-blockchain"
# ARGUMENTS="--dump-snapshot=20230821"
# ARGUMENTS="--skip-hived"
# ARGUMENTS="--block-log-split=-1"
#
# Example how to use monitoring services
#
# ARGUMENTS="--replay-blockchain --stop-at-block 5000000 --block-stats-report-output=NOTIFY --block-stats-report-type=FULL --notifications-endpoint=hived-pme:9185"
#
# Mandatory options are:
# --block-stats-report-output=NOTIFY --block-stats-report-type=FULL --notifications-endpoint=hived-pme:9185
# For CI environments or when replaying historical blocks, set EXPECTED_BLOCK_TIME
# to the timestamp of the last block being replayed. This allows healthchecks to
# properly validate service synchronization when working with old blockchain data.
# Format: YYYY-MM-DDTHH:MM:SS (e.g., "2016-03-24T16:05:21" for block 10000)
# EXPECTED_BLOCK_TIME=
# Control whether HAF apps track reversible data. Not all apps support this, some
# only track data once it has been made irreversible by being confirmed by a supermajority
# of witnesses.
# Can be overridden per-app with {BALANCE_TRACKER|REPUTATION_TRACKER|HAF_BLOCK_EXPLORER}_IS_FORKING
# IS_FORKING=TRUE
# Which activate endpoint notification for hived-pme service (log converter from hived to Prometheus metrics)
#
# By default, 5 dashboards are available:
#
# - Blockstats(available after replay phase, showing live state of block, times, delays etc)
# - cAdvisor Docker Container - status of containers in stack, CPU, Memory, I/O, Network...)
# - Node Exporter Full - full state of host on which haf-api-node is running(full overview of available metrics)
# - Monitor Services - status of containers included in the monitoring system
# - PostgreSQL Databases - databases stats from postgresexporter
#
# Additional logs are collected from all containers in the stack via Loki and Promtail
# Default login and password for Grafana is admin/admin - remember to change it after first login
# Statistics provided by Grafana are available at the host address on port 3000 (http(s)://hostname:3000)
# The default setup will run the recommended version of HAfAH,
# you can run a custom version by un-commenting and modifying the
# values below
# HAFAH_IMAGE=${HIVE_API_NODE_REGISTRY}/hafah
# HAFAH_VERSION=${HIVE_API_NODE_VERSION}
# The default setup will run the recommended version of Hivemind using the values
# below. You can override them here to run a custom version of Hivemind
# HIVEMIND_IMAGE=${HIVE_API_NODE_REGISTRY}/hivemind
# HIVEMIND_VERSION=${HIVE_API_NODE_VERSION}
# HIVEMIND_REWRITER_IMAGE=${HIVE_API_NODE_REGISTRY}/hivemind/postgrest-rewriter
# Additional Hivemind sync arguments
# HIVEMIND_SYNC_ARGS=
# Timeout for Hivemind API calls, any queries that take longer than
# this time will be aborted. 0 for unlimited, or give a nonzero
# time limit in milliseconds
# HIVEMIND_STATEMENT_TIMEOUT=5000
# The default setup will run the recommended version of balance tracker,
# you can run a custom version by un-commenting and modifying the
# values below
# BALANCE_TRACKER_IMAGE=${HIVE_API_NODE_REGISTRY}/balance_tracker
# BALANCE_TRACKER_VERSION=${HIVE_API_NODE_VERSION}
# BALANCE_TRACKER_IS_FORKING=TRUE
# REPUTATION_TRACKER_ADDON
# REPUTATION_TRACKER_IMAGE=${HIVE_API_NODE_REGISTRY}/reputation_tracker
# REPUTATION_TRACKER_VERSION=${HIVE_API_NODE_VERSION}
# REPUTATION_TRACKER_IS_FORKING=TRUE
# There are two ways of running Balance Tracker: as a standalone app, or
# integrated with HAF Block Explorer. While you can technically run both,
# there's no good reason to do so--you'll just waste disk space and processing
# power maintaining two copies of the data.
# Regardless of which way you decide to run Balance Tracker, you will need
# to run a single API server, and it needs to know which schema the data is
# stored in. It will be in "hafbe_bal" if you're running HAF Block Explorer,
# and "btracker_app" if you're running Balance Tracker standalone.
# The default behavior is to serve data from the HAF Block Explorer, but
# if you're only running the standalone Balance Tracker, uncomment the next
# line:
# BTRACKER_SCHEMA="btracker_app"
# The default setup will run the recommended version of HAF block explorer,
# you can run a custom version by un-commenting and modifying the
# values below
# HAF_BLOCK_EXPLORER_IMAGE=${HIVE_API_NODE_REGISTRY}/haf_block_explorer
# HAF_BLOCK_EXPLORER_VERSION=${HIVE_API_NODE_VERSION}
# HAF_BLOCK_EXPLORER_IS_FORKING=TRUE
# Hivesense
# to compute embeddings yourself, change this to 'local':
# HIVESENSE_CONFIGURATION=sync
#
# HIVESENSE_IMAGE=${HIVE_API_NODE_REGISTRY}/hivesense
# HIVESENSE_VERSION=${HIVE_API_NODE_VERSION}
# HIVESENSE_REWRITER_IMAGE=${HIVE_API_NODE_REGISTRY}/hivesense/postgrest-rewriter
# HIVESENSE_SYNC_ARGS=--stop-at-block=1000000
# HIVESENSE_START_BLOCK=1
# HIVESENSE_WORKERS=16
# HIVESENSE_EMBEDDING_BATCH_SIZE=100
# HIVESENSE_OLLAMA=http://hivesense-ollama:11434
# when building the hivesense indexes, we try to increase (temporarily) maintenance_work_mem to
# this value for the duration of index creation.
# if you're using the default settings (128-bit reduced embeddings, min 75 tokens), this setting
# doesn't matter much -- indexes take about an hour and a half even with only 4GB.
# But getting this right can save days of index creation time if not using reduced embeddings or
# are otherwise indexing more data
# HIVESENSE_MAINTENANCE_WORK_MEM=28
# HIVESENSE_STORE_HALFVEC_EMBEDDINGS=TRUE
# HIVESENSE_USE_HALFVEC_INDEX=TRUE
# HIVESENSE_HNSW_M=16
# HIVESENSE_HNSW_EF_CONSTRUCTION=200
#
# Dimension reduction configuration:
# HIVESENSE_USE_REDUCED_EMBEDDINGS=false
# Option 1: Download from URL (will be cached after first download)
# HIVESENSE_MATRIX_SOURCE=https://gitlab.syncad.com/hive/hivesense/-/wikis/uploads/0eebabf30d7cceb5776cb62c41abb2c9/projection_matrix_768_128_20250722.json.gz
# Option 2: Use local file (for users generating their own matrices). Should be placed in datadir's hivesense/config directory. may be .json or .json.gz
# HIVESENSE_MATRIX_SOURCE=projection_matrix.json
# HIVESENSE_REDUCED_DIMENSION=128
# HIVESENSE_MIN_TOKEN_THRESHOLD=75
# HIVESENSE_MIN_TOKEN_SEARCH_THRESHOLD=0
# HIVESENSE_SYNC_API=https://api.syncad.com/hivesense-api
# if you need to gpu accelerate your ollama, you should probably use the stock
# image instead of our smaller version. To do so, uncomment this:
# OLLAMA_IMAGE=ollama/ollama
# OLLAMA_VERSION=0.6.5
# Default settings:
# HIVESENSE_MODEL=yxchia/multilingual-e5-base:F16
# HIVESENSE_VECTOR_SIZE=768
# HIVESENSE_TOKENS_PER_CHUNK=512
# HIVESENSE_OVERLAP_AMOUNT=0.15
# HIVESENSE_TOKENIZER_MODEL_NAME=e5-base # used by intfloat/multilingual-e5-base
# HIVESENSE_DOCUMENT_PREFIX="passage: "
# HIVESENSE_QUERY_PREFIX="query: "
# or for a better performing but slower model that is still fast enough CPUs can embed query strings:
# HIVESENSE_MODEL=jeffh/intfloat-multilingual-e5-large:f16
# HIVESENSE_VECTOR_SIZE=1024
## or, for a significantly bigger and slower model:
## HIVESENSE_MODEL=rjmalagon/gte-qwen2-1.5b-instruct-embed-f16
## HIVESENSE_VECTOR_SIZE=1536
## HIVESENSE_TOKENS_PER_CHUNK=32768
## HIVESENSE_OVERLAP_AMOUNT=0.05
## HIVESENSE_TOKENIZER_MODEL_NAME=Qwen/Qwen2-1.5B
## HIVESENSE_DOCUMENT_PREFIX=""
## HIVESENSE_QUERY_PREFIX="Instruct: Given a web search query, retrieve relevant passages that answer the query\nQuery: "
## Override the version of NFT tracker deployed on this node
# NFT_TRACKER_VERSION=${HIVE_API_NODE_VERSION}
## Override the version of block_explorer_ui deployed on this node (requires the 'ui' profile)
# BLOCK_EXPLORER_UI_VERSION=${HIVE_API_NODE_VERSION}
## Override the version of denser deployed on this node (requires the 'ui' profile)
# DENSER_VERSION=${HIVE_API_NODE_VERSION}
# override the version of hived used for API docs
# note that this doesn't change the version of hived actually running, you must use
# HAF_VERSION to change that. It's rare that you will want to override this.
# HIVE_VERSION=${HIVE_API_NODE_VERSION}
# The default setup uses "Drone" as the API reverse proxy & cache for the old JSON-RPC-style
# calls. There is the older alternate reverse proxy, "Jussi", that you can choose to use instead.
# For more info about drone/jussi, see:
# https://hive.blog/hive-139531/@deathwing/announcing-drone-or-leveling-up-hive-api-nodes-and-user-experience
# To replace Drone with Jussi, uncomment the next line:
# JSONRPC_API_SERVER_NAME=jussi
# The default setup will run the recommended version of Jussi
# you can run a custom version by un-commenting and modifying the
# values below
# JUSSI_IMAGE=${HIVE_API_NODE_REGISTRY}/jussi
# JUSSI_VERSION=latest
# JUSSI_REDIS_MAX_MEMORY=8G
# If you have chosen to run Drone instead of Jussi, it will run the
# this version by default. You can run a custom version by un-commenting
# and modifying the values below
# DRONE_IMAGE=${HIVE_API_NODE_REGISTRY}/drone
# DRONE_VERSION=latest
# DRONE_LOG_LEVEL=warn,access_log=info
# In the default configuration, synchronous broadcast_transaction calls are not handled by
# your local stack, but instead are sent to a dedicated hived instance on api.hive.blog.
# (asynchronous broadcast_transaction calls and all other hived calls are always handled by
# your local instance of hived).
# Synchronous calls can easily tie up your hived node and cause performance problems for
# all hived API calls. For that reason, synchronous broadcast calls are deprecated. On
# public API servers, we typically run a separate hived instance for synchronous calls,
# so if they cause performance problems, it only impacts other users making synchronous
# calls.
# To avoid forcing every haf_api_node operator to run a second hived server, the default
# config forwards these disruptive calls to a public server dedicated to the purpose.
# If you want to handle these calls using your local hived node, or you want to forward
# these calls to a different server, override these variables:
# the values below will cause synchronous broadcasts to be handled by your own hived
# SYNC_BROADCAST_BACKEND_SERVER=haf
# SYNC_BROADCAST_BACKEND_PORT=8091
# SYNC_BROADCAST_BACKEND_SSL=no-ssl
# For running a full stack:
# The hostname you'll be running this server on. This should be a single hostname, the public
# hostname your server will be accessible from. This is used by the Swagger-UI REST API
# explorer for generating URLs pointing at your server. If this isn't a public server,
# this can be a local domain name.
PUBLIC_HOSTNAME="your.hostname.com"
# Public PostgreSQL Access Configuration (optional, disabled by default)
# To enable direct PostgreSQL access for external users:
# 1. Add postgres-public/compose.postgres-public.yml to COMPOSE_FILE:
# COMPOSE_FILE=compose.yml:postgres-public/compose.postgres-public.yml
# (or append to existing COMPOSE_FILE if you have one)
# 2. Set the credentials below for a read-only PostgreSQL user
# 3. Two connection methods are available:
# - Port 5432 (TLS): postgresql://user:pass@hostname:5432/haf_block_log?sslmode=require&sslnegotiation=direct
# (Requires PostgreSQL 17+ client with libpq 17+)
# - Port 5433 (Plain TCP): postgresql://user:pass@hostname:5433/haf_block_log
# (Works with all PostgreSQL versions, but NOT encrypted)
# HAFSQL_PUBLIC_CONNECTION_LIMIT=5 # Maximum concurrent connections for public users
# HafSQL
# To change the default public HafSQL user and password
# This user has read-only access to the haf and other apps
# HAFSQL_PUBLIC_USERNAME=hafsql_public
# HAFSQL_PUBLIC_PASSWORD=hafsql_public
# Email Alerts Configuration (optional)
# To enable email alerts when HAProxy services go down:
# 1. Set the email configuration variables below
# 2. Add "email" to your COMPOSE_PROFILES (e.g., "core,admin,servers,apps,email")
#
# SMTP server configuration for sending alerts:
# SMTP_HOST="smtp.gmail.com:587"
# SMTP_USER="your-email@gmail.com"
# SMTP_PASS="your-app-password"
# SMTP_AUTH_TYPE="plain" # or "login" for some servers
#
# HAProxy email alert settings:
# HAPROXY_EMAIL_TO="alerts@example.com" # Required to enable alerts
# HAPROXY_EMAIL_FROM="noreply@${PUBLIC_HOSTNAME}" # Sender address (optional)
# HAPROXY_EMAIL_LEVEL="notice" # Alert level: emerg, alert, crit, err, warning, notice, info, debug (optional)
# PostgREST server log levels
# https://docs.postgrest.org/en/latest/references/configuration.html#log-level
#BALANCE_TRACKER_SERVER_LOG_LEVEL=error
#BLOCK_EXPLORER_SERVER_LOG_LEVEL=error
#HAFAH_SERVER_LOG_LEVEL=error
#HIVEMIND_SERVER_LOG_LEVEL=error
#REPUTATION_TRACKER_SERVER_LOG_LEVEL=error
# There are several ways you can configure serving HTTP/HTTPS. Some examples:
# - to serve API using HTTPS with automatic redirect from HTTP -> HTTPS (the default),
# just give the hostname:
# CADDY_SITES="your.hostname.com"
# In the normal case, where you want to serve HTTP/HTTPS from the hostname you set in
# PUBLIC_HOSTNAME above, you don't need to set this variable, it will automatically take
# the value of PUBLIC_HOSTNAME
# - to serve using only HTTP (if you have nginx or something else handling SSL termination),
# you can use:
# CADDY_SITES="http://your.hostname.com"
# or even:
# CADDY_SITES="http://"
# if you want to respond on any hostname
# - to serve on either HTTP or HTTPS (i.e., respond to HTTP requests in the clear, instead of
# issuing a redirect):
# CADDY_SITES="http://your.hostname.com, https://your.hostname.com"
# - to serve on multiple hostnames, separate them with a comma and space:
# CADDY_SITES="your.hostname.com, your.other-hostname.net"
# CADDY_SITES="your.hostname.com"
# By default, we're configured to use a self-signed SSL certificate (by including the
# file below, which tells Caddy to generate a self-signed certificate). To obtain a real
# certificate from LetsEncrypt or otherwise, you can prevent the self-signed config
# from acting by mounting /dev/null in its place, then adding your own config
# files in the caddy/snippets directory
# WARNING: if you disable the self-signed certificate, Caddy will attempt to get a
# real certificate for PUBLIC_HOSTNAME from LetsEncrypt. If this server is
# behind a firewall or NAT, or PUBLIC_HOSTNAME is misconfigured, it will fail
# to get a certificate, and that will count against LetsEncrypt's rate limits.
TLS_SELF_SIGNED_SNIPPET=caddy/self-signed.snippet
# TLS_SELF_SIGNED_SNIPPET=/dev/null
# By default, we restrict access to the /admin URLs to localhost. You can allow
# connections by switching the following variable to /dev/null. First, though,
# you should protect the admin endpoint by a password or to a local network.
# Read caddy/snippets/README.md for how
LOCAL_ADMIN_ONLY_SNIPPET=caddy/local-admin-only.snippet
# LOCAL_ADMIN_ONLY_SNIPPET=/dev/null
# Caddy will only accept requests on the /admin/ endpoints over https by default.
# This is so that you can password-protect them with HTTP basicauth.
# However, if you've configured your server to only serve http, and something
# upstream is providing SSL, you can change this to allow access to the
# admin endpoints.
# ADMIN_ENDPOINT_PROTOCOL=http
# Monitoring env variables
#
# PROMETHEUS_VERSION=v2.49.1
# NODE_EXPORTER_VERSION=v1.7.0
# CADVISOR_VERSION=v0.47.2
# GRAFANA_VERSION=10.3.3
# LOKI_VERSION=2.9.4
# PROMTAIL_VERSION=2.9.4
# HIVED_PME_VERSION=49a7312d
# BLACKBOX_VERSION=v0.24.0
# DATA_SOURCE="postgresql://postgres@haf:5432/postgres?sslmode=disable"