Client request timeout - Sync Elasticsearch with MySQL

Hello,

I am getting below error while executing the get query on kibana console:

Also, if I am executing for another index that one is also taking around 30s.

Why it's behaving like this ?

docker.yml file:

elasticsearch:
        image: docker.elastic.co/elasticsearch/elasticsearch:8.13.2
        container_name: elasticsearch
        # restart: on-failure
        environment:
        - discovery.type=single-node
        - bootstrap.memory_lock=true
        - xpack.security.enabled=false
        - "ES_JAVA_OPTS=-Xms1g -Xmx1g"
        ulimits:
            memlock:
                soft: -1
                hard: -1
        volumes:
        - ./volumes/elasticsearch:/usr/share/elasticsearch/data
        logging:
            driver: "json-file"
            options:
                max-size: "10k"
                max-file: "10"
        networks:
            - backend


    logstash:
        build:
            context: .
            dockerfile: Dockerfile-logstash
        container_name: logstash
        # restart: on-failure
        depends_on:
        - laravel_db
        - elasticsearch
        volumes:
        - ./volumes/logstash/pipeline/:/usr/share/logstash/pipeline/
        - ./volumes/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
        - ./volumes/logstash/config/pipelines.yml:/usr/share/logstash/config/pipelines.yml
        - ./volumes/logstash/config/queries/:/usr/share/logstash/config/queries/
        logging:
            driver: "json-file"
            options:
                max-size: "10k"
                max-file: "10"
        networks:
            - backend


    kibana:
        image: docker.elastic.co/kibana/kibana:8.13.2
        container_name: kibana
        environment:
            - ELASTICSEARCH_HOSTS=http://elasticsearch:9200
        ports:
            - 5601:5601
        depends_on:
            - elasticsearch
        networks:
            - backend

volumes:
    laravel_db:

networks:
    backend:
        name: backend
        driver: bridge

Logstash configuration file for detecting any changes:

input {
  jdbc {
    jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar"
    jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
    jdbc_connection_string => "jdbc:mysql://mysql_8:3306/pcs_accounts_db"
    jdbc_user => "pcs_db_user"
    jdbc_password => "laravel_db"
    type => "txn"
    use_column_value => true
    tracking_column => 'transaction_dump_id'
    last_run_metadata_path => "/usr/share/logstash/.logstash_jdbc_last_run_a'"
    sql_log_level => "debug"  
    schedule => "*/5 * * * * *"  
    statement => "
                  SELECT * 
                  FROM ac_transaction_dump 
                  WHERE (created_at > :sql_last_value)
                  OR (updated_at > :sql_last_value);
                "
  }

  jdbc {
    jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar"
    jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
    jdbc_connection_string => "jdbc:mysql://mysql_8:3306/pcs_accounts_db"
    jdbc_user => "pcs_db_user"
    jdbc_password => "laravel_db"
    type => "trial"
    use_column_value => true
    tracking_column => 'daily_trial_balance_id'
    last_run_metadata_path => "/usr/share/logstash/.logstash_jdbc_last_run_b"
    sql_log_level => "debug"  
    schedule => "*/5 * * * * *"  
    statement => "
                  SELECT * 
                  FROM ac_daily_trial_balance 
                  WHERE (created_at > :sql_last_value)
                  OR (updated_at > :sql_last_value);
                "
  }
}

filter {
    if [deleted_at] {
    mutate { 
        add_field => { "[@metadata][action]" => "delete" }
        }
    }
    mutate {
    remove_field => ["@version", "@timestamp"]
    }
}

#   stdout { codec => rubydebug { metadata => true } }
output {
    if [type] == "txn" {
        if [@metadata][action] == "delete" {
            elasticsearch {
            hosts => ["http://elasticsearch:9200"]
            index => "ac_transaction_dump"
            action => "delete"
            document_id => "%{transaction_dump_id}"
            }
        }
        else {
            elasticsearch {
            hosts => ["http://elasticsearch:9200"]
            index => "ac_transaction_dump"
            document_id => "%{transaction_dump_id}"
            }
        }
    }
    
    if [type] == "trial" {
        if [@metadata][action] == "delete" {
            elasticsearch {
            hosts => ["http://elasticsearch:9200"]
            index => "ac_daily_trial_balance"
            action => "delete"
            document_id => "%{daily_trial_balance_id}"
            }
        }
        else {
            elasticsearch {
            hosts => ["http://elasticsearch:9200"]
            index => "ac_daily_trial_balance"
            document_id => "%{daily_trial_balance_id}"
            }
        }
    }
}

pipelines.yml:

# This file is where you define your pipelines. You can define multiple.
# For more information on multiple pipelines, see the documentation:
#   https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html

# - pipeline.id: base-pipeline
#   path.config: "/usr/share/logstash/pipeline/base.conf"

- pipeline.id: incremental-pipeline
  path.config: "/usr/share/logstash/pipeline/change.conf"

Note that till now I didn't change anything in the database. Both tables have a fix number of records. But still it's taking so much time for query execution.