From time to time I struggle with issues regarding dependencies running with docker. I guess it’s all related to the network and DNS as usual. I scraped the whole forum and I want to share my configurations with you all, and maybe, someone has the right answer for me.
I’m using docker desktop version 4.24.1
I have some errors for the elastic container “org.elasticsearch.action.search.SearchPhaseExecutionException: all shards failed”, but that has never been an issue.
My docker-compose.yml
# Docker compose below sets up the containers needed to run Litium locally:
version: '3'
services:
dnsresolver:
# https://github.com/cytopia/docker-bind
image: cytopia/bind:stable-0.28
container_name: Test-dnsresolver
ports:
- "53:53/tcp"
- "53:53/udp"
environment:
- DNS_CNAME=*.localtest.me=host.docker.internal
- DNS_FORWARDER=192.168.65.7
dns: 192.168.65.7
restart: unless-stopped
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
container_name: Test-elasticsearch
depends_on:
- dnsresolver
dns: 192.168.65.254
restart: unless-stopped
ports:
- "9200:9200"
environment:
- discovery.type=single-node
# Allocate 2GB RAM instead of the default 512MB
# comment out the line below for additional memory allocation
# - "ES_JAVA_OPTS=-Xms2g -Xmx2g"
user: root
volumes:
- ./volumes/elasticsearch/data:/usr/share/elasticsearch/data
entrypoint:
- /bin/sh
- -c
# The accelerator implementation of Elasticsearch require the analysis-dynamic-synonym.
# The plugin refreshes the list of synonyms in Elasticsearch every minute allowing synonyms
# to be added/modified in Litium backoffice and updated in Elasticsearch without downtime.
- "./bin/elasticsearch-plugin list | grep -q analysis-dynamic-synonym || ./bin/elasticsearch-plugin install -b https://github.com/Tasteful/elasticsearch-analysis-dynamic-synonym/releases/download/v7.6.2/elasticsearch-analysis-dynamic-synonym.zip; /usr/local/bin/docker-entrypoint.sh"
synonymserver:
# Synonym server to provide elasticsearch with synonyms.
image: registry.litium.cloud/apps/synonym-server:1.2.0
container_name: Test-synonymserver
restart: unless-stopped
ports:
- "9210:80"
environment:
- DataFolder=/app_data
volumes:
- ./volumes/synonymserver/data:/app_data
kibana:
# The Kibana image tries, by default, to connect to a host/container called elasticsearch.
image: docker.elastic.co/kibana/kibana:7.6.2
container_name: Test-kibana
depends_on:
- elasticsearch
restart: unless-stopped
ports:
- "5601:5601"
redis:
image: redis:5.0.5-alpine
container_name: Test-redis
restart: unless-stopped
ports:
- "6379:6379"
sqlserver:
image: mcr.microsoft.com/mssql/server:2019-latest
container_name: Test-sqlserver
environment:
- SA_PASSWORD=Pass@word
- ACCEPT_EULA=Y
restart: unless-stopped
ports:
# Make the SQL Container available on port 5434 to not conflict with a previously installed local SQL instance.
# If you do not have SQL Server installed you can use 1433:1433 as mapping and skip port number in connectionstrings.
- "5434:1433"
user: root
volumes:
# Map [local directory:container directory] - this is so that db/log files are
# stored on the "host" (your local computer, outside of container) and thereby
# persisted when container restarts.
# by starting local path with "." it gets relative to current folder, meaning that the database
# files will be on your computer in the same directory as you have this docker-compose.yaml file
- ./data/mssql/data:/var/opt/mssql/data
- ./data/mssql/log:/var/opt/mssql/log
entrypoint:
# Due to an issue with the sqlserver image, permissions to db-files may be lost on container restart
# by using the specific permissions_check entrypoint you assert that permissions are set on every restart
- /bin/sh
- -c
- "/opt/mssql/bin/permissions_check.sh && /opt/mssql/bin/sqlservr"
direct-payment:
image: registry.litium.cloud/apps/direct-payment:1.2.0
container_name: Test-direct-payment
dns:
- 192.168.65.254
restart: unless-stopped
ports:
- "10010:80"
- "10111:443"
environment:
# Enable HTTPS binding
- ASPNETCORE_URLS=https://+;http://+
- ASPNETCORE_HTTPS_PORT=10111
# Configuration for HTTPS inside the container, exported dotnet dev-certs with corresponding password
- ASPNETCORE_Kestrel__Certificates__Default__Password=SuperSecretPassword
- ASPNETCORE_Kestrel__Certificates__Default__Path=/https/localhost.pfx
# Folder for the configuraiton, this is volume-mapped
- CONFIG_PATH=/app_config
# Folder where logfiles should be placed, this is volume-mapped
- APP_LOG_PATH=/logs
# Don't validate certificates
- AppConfiguration__ValidateCertificate=false
# Url to this app
- AppMetadata__AppUrl=https://host.docker.internal:10111
# Url to the litium installation
- LitiumApi__ApiUrl=https://test.localtest.me:5001
volumes:
- ./data/direct-payment/config:/app_config
- ./data/direct-payment/data:/app_data
- ./data/direct-payment/logs:/logs
- ./data/direct-payment/DataProtection-Keys:/root/.aspnet/DataProtection-Keys
- ./data/https:/https:ro
direct-shipment:
image: registry.litium.cloud/apps/direct-shipment:1.2.0
container_name: Test-direct-shipment
dns:
- 192.168.65.254
restart: unless-stopped
ports:
- "10020:80"
- "10121:443"
environment:
# Enable HTTPS binding
- ASPNETCORE_URLS=https://+;http://+
- ASPNETCORE_HTTPS_PORT=10121
# Configuration for HTTPS inside the container, exported dotnet dev-certs with corresponding password
- ASPNETCORE_Kestrel__Certificates__Default__Password=SuperSecretPassword
- ASPNETCORE_Kestrel__Certificates__Default__Path=/https/localhost.pfx
# Folder for the configuraiton, this is volume-mapped
- CONFIG_PATH=/app_config
# Folder where logfiles should be placed, this is volume-mapped
- APP_LOG_PATH=/logs
# Don't validate certificates
- AppConfiguration__ValidateCertificate=false
# Url to this app
- AppMetadata__AppUrl=https://host.docker.internal:10121
# Url to the litium installation
- LitiumApi__ApiUrl=https://test.localtest.me:5001
volumes:
- ./data/direct-shipment/config:/app_config
- ./data/direct-shipment/data:/app_data
- ./data/direct-shipment/logs:/logs
- ./data/direct-shipment/DataProtection-Keys:/root/.aspnet/DataProtection-Keys
- ./data/https:/https:ro
mailhog:
image: mailhog/mailhog:latest
container_name: Test-mailhog
restart: unless-stopped
logging:
driver: 'none'
ports:
- "1025:1025" # SMTP-server
- "8025:8025" # Web UI
My appsettings.Development.json
{
"Litium": {
"Accelerator": {
"Smtp": {
"Host": "host.docker.internal",
"Port": 1025,
"Password": "",
"Username": "",
"EnableSecureCommunication": false
}
},
"Data": {
"ConnectionString": "Pooling=true;User Id=sa;Password=Pass@word;Database=Test;Server=127.0.0.1,5434",
"EnableSensitiveDataLogging": false
},
"Folder": {
"Local": "../files/local",
"Shared": "../files/shared"
},
"Elasticsearch": {
"ConnectionString": "http://localhost:9200",
"Username": null,
"Password": null,
"Prefix": "Test_7",
"Synonym": {
"Server": "http://host.docker.internal:9210",
"ApiKey": null
}
},
"Redis": {
"Prefix": "Test_7",
"Cache": {
"ConnectionString": "localhost:6379",
"Password": null
},
"DistributedLock": {
"ConnectionString": "localhost:6379",
"Password": null
},
"ServiceBus": {
"ConnectionString": "localhost:6379",
"Password": null
}
}
}
}
My Hosts file
# Copyright (c) 1993-2009 Microsoft Corp.
#
# This is a sample HOSTS file used by Microsoft TCP/IP for Windows.
#
# This file contains the mappings of IP addresses to host names. Each
# entry should be kept on an individual line. The IP address should
# be placed in the first column followed by the corresponding host name.
# The IP address and the host name should be separated by at least one
# space.
#
# Additionally, comments (such as these) may be inserted on individual
# lines or following the machine name denoted by a '#' symbol.
#
# For example:
#
# 102.54.94.97 rhino.acme.com # source server
# 38.25.63.10 x.acme.com # x client host
# localhost name resolution is handled within DNS itself.
# 127.0.0.1 localhost
# ::1 localhost
# Added by Docker Desktop
192.168.65.254 host.docker.internal
192.168.65.254 gateway.docker.internal
# To allow the same kube context to work on the host and the container:
127.0.0.1 kubernetes.docker.internal
# End of section
When I try to create new indicies for elastic search when everythings running, they are created, I can see that in kibana, but I dont get anything back to the Litium Application. There seem to be a one-way communication to elastic. Another collegue has the same setup and it works for him.
Litium version: 8.12.0