How to run dremio reflection on Asia\Singapore time zone?
because if i set reflection time on 02:05 am but its UTC0 timezone
i need to run my dremio reflection on 02:05 am but its UTC+8 Asia\Singapore timezone
How to do that? i also attached my docker-compose.yml file.
running timezones:

services:
# Nessie Catalog Server Using In-Memory Store
nessie:
image: projectnessie/nessie:latest
container_name: nessie
environment:
- QUARKUS_PROFILE=prod
- QUARKUS_HTTP_PORT=19120
- QUARKUS_LOG_CONSOLE_FORMAT=%d{yyyy-MM-dd HH:mm:ss} %-5p \[%c{1.}\] (%t) %s%e%n
- QUARKUS_LOG_LEVEL=INFO
- JAVA_OPTS=-XX:+UseParallelGC -XX:ParallelGCThreads=8 -Xms1G -Xmx4G
- TZ=Asia/Singapore
volumes:
- ./nessie-data:/nessie/data
- nessie_logs:/nessie/logs
ports:
- "19120:19120"
networks:
intro-network:
deploy:
resources:
limits:
cpus: '8'
memory: 4G
reservations:
cpus: '2'
memory: 1G
# Minio Storage Server
minio:
image: minio/minio
container_name: minio
environment:
- MINIO_ROOT_USER=admin
- MINIO_ROOT_PASSWORD=password
- MINIO_DOMAIN=minio
- MINIO_REGION_NAME=us-east-1
- MINIO_REGION=us-east-1
- TZ=Asia/Singapore
ports:
- "9000:9000"
- "9001:9001"
healthcheck:
test: \["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"\]
interval: 30s
timeout: 20s
retries: 3
volumes:
- ./minio-data:/minio-data
- /data/minio:/data
command: server /data --console-address ':9001'
networks:
intro-network:
deploy:
resources:
limits:
cpus: '8'
memory: 8G
reservations:
cpus: '2'
memory: 2G
# MinIO Client for bucket setup
minio-client:
image: minio/mc
container_name: minio-client
depends_on:
- minio
entrypoint: >
/bin/sh -c "
sleep 10;
/usr/bin/mc config host add myminio http://minio:9000 admin password;
/usr/bin/mc mb myminio/datalake;
/usr/bin/mc mb myminio/datalakehouse;
/usr/bin/mc mb myminio/warehouse;
/usr/bin/mc mb myminio/seed;
/usr/bin/mc cp /minio-data/\* myminio/seed/ || echo 'Warning: Some files may not have copied';
exit 0
"
volumes:
- ./minio-data:/minio-data
networks:
intro-network:
# Spark
spark:
platform: linux/x86_64
image: alexmerced/spark35nb:latest
ports:
- 8080:8080
- 7077:7077
- 8081:8081
- 4040-4045:4040-4045
- 18080:18080
- 8888:8888
environment:
- AWS_REGION=us-east-1
- AWS_ACCESS_KEY_ID=admin
- AWS_SECRET_ACCESS_KEY=password
- SPARK_MASTER_HOST=spark
- SPARK_MASTER_PORT=7077
- SPARK_MASTER_WEBUI_PORT=8080
- SPARK_WORKER_WEBUI_PORT=8081
- SPARK_WORKER_CORES=20
- SPARK_WORKER_MEMORY=35G
- SPARK_DRIVER_MEMORY=10G
- SPARK_EXECUTOR_MEMORY=20G
- SPARK_EXECUTOR_CORES=10
- SPARK_MASTER_OPTS=-XX:+UseG1GC -XX:G1HeapRegionSize=32M
- SPARK_WORKER_OPTS=-XX:+UseG1GC -XX:G1HeapRegionSize=32M
- SPARK_DAEMON_MEMORY=2G
- SPARK_HISTORY_OPTS=-Dspark.history.fs.logDirectory=/tmp/spark-events -Dspark.history.ui.port=18080 -XX:+UseG1GC
- SPARK_HOME=/opt/spark
- TZ=Asia/Singapore
volumes:
- ./notebook-seed:/workspace/seed-data
- spark_notebooks:/workspace/notebooks
- spark_logs:/tmp/spark-events
- spark_data:/workspace/data
container_name: spark
entrypoint: >
/bin/bash -c "
/opt/spark/sbin/start-master.sh && \\
/opt/spark/sbin/start-worker.sh spark://$(hostname):7077 && \\
mkdir -p /tmp/spark-events && \\
start-history-server.sh && \\
jupyter lab --ip=0.0.0.0 --port=8888 --no-browser --allow-root --NotebookApp.token='' --NotebookApp.password='' && \\
tail -f /dev/null
"
networks:
intro-network:
deploy:
resources:
limits:
cpus: '20'
memory: 40G
reservations:
cpus: '10'
memory: 20G
# Dremio service configuration (partial)
dremio:
platform: linux/x86_64
image: dremio/dremio-oss:latest
user: "1000:1000"
\# SIMPLIFIED ENTRYPOINT - Skip upgrade check
entrypoint: \["/opt/dremio/bin/dremio", "start-fg"\]
ports:
- 9047:9047
- 31010:31010
- 32010:32010
- 45678:45678
container_name: dremio
environment:
\# Optimized JVM settings for large datasets
- DREMIO_JAVA_SERVER_EXTRA_OPTS=-Dpaths.dist=file:///opt/dremio/data/dist -Xms8G -Xmx24G -XX:+UseG1GC -XX:G1HeapRegionSize=32M -XX:MaxGCPauseMillis=500 -XX:+UseGCOverheadLimit -XX:+ExplicitGCInvokesConcurrent -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/opt/dremio/log/heap_dump.hprof
\# Increased direct memory for large query processing
- DREMIO_MAX_DIRECT_MEMORY_SIZE_MB=32768
- DREMIO_MAX_HEAP_MEMORY_SIZE_MB=24576
\# Additional performance tuning
- DREMIO_QUERY_MAX_MEMORY_PER_NODE_MB=16384
- DREMIO_EXECUTOR_MEMORY_LIMIT_MB=32768
- TZ=Asia/Singapore
volumes:
- /data/dremio/dremio_data:/opt/dremio/data
- /data/dremio/dremio_logs:/opt/dremio/log
- dremio_conf:/opt/dremio/conf
networks:
intro-network:
\# Health check to auto-restart if Dremio becomes unresponsive
healthcheck:
test: \["CMD", "curl", "-f", "http://localhost:9047/"\]
interval: 30s
timeout: 10s
retries: 5
start_period: 3600s
restart: unless-stopped
deploy:
resources:
limits:
cpus: '12'
memory: 60G
reservations:
cpus: '6'
memory: 40G
\# Add logging configuration
logging:
driver: "json-file"
options:
max-size: "100m"
max-file: "10"
# Dremio Proxy
dremio-proxy:
build:
context: /home/ganbaatar/dremio_datalakehouse
dockerfile: Dockerfile.middleware
container_name: dremio-proxy
ports:
- "31011:31011"
volumes:
- /home/ganbaatar/dremio_datalakehouse/dremio_middleware.py:/app/dremio_middleware.py
- /home/ganbaatar/dremio_datalakehouse/logs:/app/logs
environment:
- DREMIO_HOST=dremio
- DREMIO_PORT=31010
- DEBUG=false
- ALLOW_ALL=false
- TZ=Asia/Singapore
networks:
intro-network:
restart: unless-stopped
deploy:
resources:
limits:
cpus: '2'
memory: 1G
reservations:
cpus: '0.5'
memory: 256M
# Seeder Service
seeder:
build:
context: .
dockerfile: Dockerfile.seeder
container_name: ondo-seeder
depends_on:
- nessie
- minio
- spark
- dremio
networks:
- intro-network
volumes:
- ./OndoSeeder.py:/app/OndoSeeder.py
environment:
- PYTHONUNBUFFERED=1
- TZ=Asia/Singapore
restart: "no"
networks:
intro-network:
# Define named volumes
volumes:
nessie_logs:
minio_storage:
spark_notebooks:
spark_logs:
spark_data:
dremio_data:
dremio_conf:
dremio_logs:
