I have a Microsoft SQL Server which I wish to ( contuously) push into Elasticsearch.
In my base directory I have a docker-compose.yml file
version: "3.7"
services:
        backend:
                build: ./flask
                image: backend
                container_name: backend
                restart: always
                volumes:
                        - ./flask:/usr/src/app
                ports:
                        - 5000:5000
                command: python manage.py run -h 0.0.0.0
                env_file:
                        - .env
                depends_on:
                        - mssql
                networks:
                        - microsoftSQL
        mssql:
                image: mcr.microsoft.com/mssql/server:2019-GA-ubuntu-16.04
                container_name: sql1
                restart: always
                ports:
                        - 1433:1433
                environment:
                        - ACCEPT_EULA=Y
                        - SA_PASSWORD=${SQL_PASSWORD}
                networks:
                        - microsoftSQL
                
        elasticsearch:
                image: elasticsearch:7.5.1
                container_name: elasticsearch
                environment:
                        - cluster.name=docker-cluster
                        - bootstrap.memory_lock=true
                        - discovery.type=single-node
                        - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
                ulimits:
                        memlock:
                                soft: -1
                                hard: -1
                volumes:
                        - esdata1:/usr/share/elasticsearch/data
                restart: always
                ports:
                        - 9200:9200
                networks:
                        - microsoftSQL
               
        logstash:
                build: ./logstash
                environment:
                        LS_JAVA_OPTS: "-Xmx256m -Xms256m"
                env_file:
                        - .env
                ports:
                        - '5001:5001'
                container_name: logstash
                restart: always
                environment:
                        - DEBUG=1
                networks:
                        - microsoftSQL
                links:
                        - elasticsearch
                depends_on:
                        - elasticsearch
        kibana:
                image: docker.elastic.co/kibana/kibana:7.5.1
                container_name: kibana
                restart: always
                ports:
                        - 5601:5601
                environment:
                        - ELASTICSEARCH_URL=http://elasticsearch:9200
                networks:
                        - microsoftSQL
                links:
                        - elasticsearch
networks:
        microsoftSQL:
                driver: bridge
volumes:
        db-data:
        esdata1:
                driver: local
the relevenat files in my directory are
logstash/
├── Dockerfile
└── config
    └── logstash.conf
where Dockerfile:
FROM docker.elastic.co/logstash/logstash:7.5.1
RUN rm -f /usr/share/logstash/pipeline/logstash.conf
ADD config/ /usr/share/logstash/config
USER root 
#CMD bin/logstash-plugin install logstash-input-jdbc
#COPY mssql-jdbc-*.jre9.jar /opt/
# Add logstash plugins setup here:
RUN logstash-plugin install logstash-input-jdbc
and logstash.conf
input {
  jdbc {
    jdbc_driver_library => "/opt/mssql-jdbc-7.5.1.jre9.jar"
    jdbc_driver_class => "com.microsoft.sqlserver.jdbc.SQLServerDriver"
    jdbc_connection_string => "jdbc:sqlserver://sql1:1433.database.windows.net"
    jdbc_user => "******"
    jdbc_password => "******"
    statement => "SELECT * from TestDB.Inventory;"
    type => "db-logs-access"
  }
}
are as above. I think I should expect MS SQL to index to Elasticsearch from all this, but what I actually see is Logstash terminating
[2019-12-20T10:08:40,557][INFO ][logstash.outputs.elasticsearch] New Elasticsearch output {:class=>"LogStash::Outputs::Elasticsearch", :hosts=>["http://elasticsearch:9200"]}
[2019-12-20T10:08:40,686][INFO ][logstash.javapipeline ] Starting pipeline {:pipeline_id=>".monitoring-logstash", "pipeline.workers"=>1, "pipeline.batch.size"=>2, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>2, "pipeline.sources"=>["monitoring pipeline"], :thread=>"#<Thread:0x63c4280a run>"}
[2019-12-20T10:08:40,737][INFO ][logstash.javapipeline ] Pipeline started {"pipeline.id"=>".monitoring-logstash"}
[2019-12-20T10:08:40,831][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:".monitoring-logstash"], :non_running_pipelines=>}
[2019-12-20T10:08:41,171][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2019-12-20T10:08:41,926][INFO ][logstash.javapipeline ] Pipeline terminated {"pipeline.id"=>".monitoring-logstash"}
[2019-12-20T10:08:42,630][INFO ][logstash.runner ] Logstash shut down.
So the pipeline appears to shut down pretty quickly after starting. I haven't overwritten the default pipelines?