skip to Main Content

Installed ES 8.3.2 using docker on my local dev machine. Before this installation I had ES 6.3.2 running and I had deleted all my test indices in 6.3.2 before installing 8.3.2 so that I would avoid any v6 issues with 8 on my local dev. When I attempt docker-compose up --build I get this error, right before my es container exits.

es_usamm-db          | org.elasticsearch.ElasticsearchException: failed to bind service
es_usamm-db          | Likely root cause: org.elasticsearch.gateway.CorruptStateException: Format version is not supported. Upgrading to [8.3.2] is only supported from version [7.17.0].

How do I completely wipe out any elasticsearch docker data so that I can just install 8.3.2? I thought deleting all indices in 6.3.2 would have allowed a clean install of 8.3.2, but there must still be some 6.3.2 data persisting somewhere?

Here is my docker-compose.yml

services:
  elasticsearch:
    container_name: es_usamm-db
    image: docker.elastic.co/elasticsearch/elasticsearch:8.3.2
    environment:
      - node.name=es01
      - cluster.name=es-docker-cluster
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - xpack.security.enabled=false
      - discovery.type=single-node
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - data01:/usr/share/elasticsearch/data
      - ./server/elastic/scripts:/server/elastic/scripts
    ports:
      - 9200:9200
    networks:
      - default
  kibana:
    container_name: kibana_usamm-db
    image: docker.elastic.co/kibana/kibana:8.3.2
    ports:
      - 5601:5601
    networks:
      - default

2

Answers


  1. Well in that case, I would recommend to delete all your docker images, container, Volumes etc.

    Single Node Cluster

    To get start with single node cluster you can refer below blog:

    https://ashish.one/blogs/elastic-docker-compose/

    Here is the steps from blog:

    This cluster will include

    • Elasticsearch
    • Kibana

    For other stack like APM, beats etc please refer above link.

    1. Make directory

    mkdir docker-elastic
    cd docker-elastic
    

    2. Create .env file

    # Password for the 'elastic' user (at least 6 characters).
    ELASTIC_PASSWORD=yourpass
    
    # Password for the 'kibana_system' user (at least 6 characters)
    KIBANA_PASSWORD=yourpass
    
    # Version of Elastic products
    STACK_VERSION=8.2.2
    
    # Set the cluster name
    CLUSTER_NAME=docker-cluster
    
    # Set to 'basic' or 'trial' to automatically start the 30-day trial
    LICENSE=basic
    #LICENSE=trial
    
    # Port to expose Elasticsearch HTTP API to the host
    ES_PORT=9200
    #ES_PORT=127.0.0.1:9200
    
    # Port to expose Kibana to the host
    KIBANA_PORT=5601
    #KIBANA_PORT=80
    
    # Port to expose APM Server to the host
    APM_PORT=8200
    
    # Increase or decrease based on the available host memory (in bytes)
    MEM_LIMIT=1073741824
    
    # Project namespace (defaults to the current folder name if not set)
    #COMPOSE_PROJECT_NAME=myproject
    

    Save and close.

    3. Create docker-compose.yml file

    version: "2.2"
    
    services:
      setup:
        image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
        volumes:
          - certs:/usr/share/elasticsearch/config/certs
        user: "0"
        command: >
          bash -c '
            if [ x${ELASTIC_PASSWORD} == x ]; then
              echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
              exit 1;
            elif [ x${KIBANA_PASSWORD} == x ]; then
              echo "Set the KIBANA_PASSWORD environment variable in the .env file";
              exit 1;
            fi;
            if [ ! -f config/certs/ca.zip ]; then
              echo "Creating CA";
              bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
              unzip config/certs/ca.zip -d config/certs;
            fi;
            if [ ! -f config/certs/certs.zip ]; then
              echo "Creating certs";
              echo -ne 
              "instances:n"
              "  - name: es01n"
              "    dns:n"
              "      - es01n"
              "      - localhostn"
              "    ip:n"
              "      - 127.0.0.1n"
              > config/certs/instances.yml;
              bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
              unzip config/certs/certs.zip -d config/certs;
            fi;
            echo "Setting file permissions"
            chown -R root:root config/certs;
            find . -type d -exec chmod 750 {} ;;
            find . -type f -exec chmod 640 {} ;;
            echo "Waiting for Elasticsearch availability";
            until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
            echo "Setting kibana_system password";
            until curl -s -X POST --cacert config/certs/ca/ca.crt -u elastic:${ELASTIC_PASSWORD} -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{"password":"${KIBANA_PASSWORD}"}" | grep -q "^{}"; do sleep 10; done;
            echo "All done!";
          '
        healthcheck:
          test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
          interval: 1s
          timeout: 5s
          retries: 120
    
      es01:
        depends_on:
          setup:
            condition: service_healthy
        image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
        volumes:
          - certs:/usr/share/elasticsearch/config/certs
          - esdata01:/usr/share/elasticsearch/data
        ports:
          - ${ES_PORT}:9200
        environment:
          - node.name=es01
          - cluster.name=${CLUSTER_NAME}
          - cluster.initial_master_nodes=es01
          - discovery.seed_hosts=
          - ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
          - bootstrap.memory_lock=true
          - xpack.security.enabled=true
          - xpack.security.http.ssl.enabled=true
          - xpack.security.http.ssl.key=certs/es01/es01.key
          - xpack.security.http.ssl.certificate=certs/es01/es01.crt
          - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
          - xpack.security.http.ssl.verification_mode=certificate
          - xpack.security.transport.ssl.enabled=true
          - xpack.security.transport.ssl.key=certs/es01/es01.key
          - xpack.security.transport.ssl.certificate=certs/es01/es01.crt
          - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
          - xpack.security.transport.ssl.verification_mode=certificate
          - xpack.license.self_generated.type=${LICENSE}
        mem_limit: ${MEM_LIMIT}
        ulimits:
          memlock:
            soft: -1
            hard: -1
        healthcheck:
          test:
            [
              "CMD-SHELL",
              "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
            ]
          interval: 10s
          timeout: 10s
          retries: 120
    
      kibana:
        depends_on:
          es01:
            condition: service_healthy
        image: docker.elastic.co/kibana/kibana:${STACK_VERSION}
        volumes:
          - certs:/usr/share/kibana/config/certs
          - kibanadata:/usr/share/kibana/data
        ports:
          - ${KIBANA_PORT}:5601
        environment:
          - SERVERNAME=kibana
          - ELASTICSEARCH_HOSTS=https://es01:9200
          - ELASTICSEARCH_USERNAME=kibana_system
          - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
          - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
        mem_limit: ${MEM_LIMIT}
        healthcheck:
          test:
            [
              "CMD-SHELL",
              "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
            ]
          interval: 10s
          timeout: 10s
          retries: 120
    
          
    volumes:
      certs:
        driver: local
      esdata01:
        driver: local
      kibanadata:
        driver: local
    

    Save and close.

    3. Start

    docker-compose up -d
    

    4. Stop

    docker-compose down
    

    5. Stop with deleting network, containers and volumes

    docker-compose down -v 
    

    Multi Node Cluster

    Please refer below official doc to spin multi node cluster.

    https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-compose-file

    Login or Signup to reply.
  2. I answered this on your forum post – https://discuss.elastic.co/t/elasticsearch-8-3-2-docker-org-elasticsearch-gateway-corruptstateexception/310765

    but you need to use a new volume instead of reusing the old one – or delete all the files in the volume directory from your underlying OS

    even if you delete all the indices from the old node/cluster, you still have things like cluster state that are stored, and these are only compatible if you step through the upgrade process

    Login or Signup to reply.
Please signup or login to give your own answer.
Back To Top
Search