skip to Main Content

I’m trying to launch Rabbitmq with docker-compose alongside DRF and Celery.
Here’s my docker-compose file. Everything else works fine, except for rabbitmq:

version: '3.7'

services:
  drf:
    build: ./drf
    entrypoint: ["/bin/sh","-c"]
    command:
      - |
        python manage.py migrate
        python manage.py runserver 0.0.0.0:8000
    volumes:
      - ./drf/:/usr/src/drf/
    ports:
      - 8000:8000
    env_file:
      - ./.env.dev
    depends_on:
      - db

  db:
    image: postgres:12.0-alpine
    volumes:
      - postgres_data:/var/lib/postgresql/data/
    environment:
      - POSTGRES_USER=postgres
      - POSTGRES_PASSWORD=postgres
      - POSTGRES_DB=base_test

  redis:
    image: redis:alpine
    volumes:
      - redis:/data
    ports:
      - "6379:6379"
    depends_on:
      - drf

  rabbitmq:
    image: rabbitmq:3-management-alpine
    container_name: 'rabbitmq'
    ports:
      - 5672:5672
      - 15672:15672
    volumes:
      - ~/.docker-conf/rabbitmq/data/:/var/lib/rabbitmq/
      - ~/.docker-conf/rabbitmq/log/:/var/log/rabbitmq
    networks:
      - net_1

  celery_worker:
    command: sh -c "wait-for redis:3000 && wait-for drf:8000 -- celery -A base-test worker -l info"
    depends_on:
      - drf
      - db
      - redis
    deploy:
      replicas: 2
      restart_policy:
        condition: on-failure
      resources:
        limits:
          cpus: '0.50'
          memory: 50M
        reservations:
          cpus: '0.25'
          memory: 20M
    hostname: celery_worker
    image: app-image
    networks:
      - net_1
    restart: on-failure

  celery_beat:
    command: sh -c "wait-for redis:3000 && wait-for drf:8000 -- celery -A mysite beat -l info --scheduler django_celery_beat.schedulers:DatabaseScheduler"
    depends_on:
      - drf
      - db
      - redis
    hostname: celery_beat
    image: app-image
    networks:
      - net_1
    restart: on-failure

networks:
  net_1:
    driver: bridge

volumes:
  postgres_data:
  redis:

And here’s what happens when I launch it. Can someone please help me find the problem? I can’t even follow the instruction and read the generated dump file because rabbitmq container exits after the error.

rabbitmq         |   Starting broker...2021-04-05 16:49:58.330 [info] <0.273.0>
rabbitmq         |  node           : rabbit@0e652f57b1b3
rabbitmq         |  home dir       : /var/lib/rabbitmq
rabbitmq         |  config file(s) : /etc/rabbitmq/rabbitmq.conf
rabbitmq         |  cookie hash    : ZPam/SOKy2dEd/3yt0OlaA==
rabbitmq         |  log(s)         : <stdout>
rabbitmq         |  database dir   : /var/lib/rabbitmq/mnesia/rabbit@0e652f57b1b3
rabbitmq         | 2021-04-05 16:50:09.542 [info] <0.273.0> Feature flags: list of feature flags found:
rabbitmq         | 2021-04-05 16:50:09.542 [info] <0.273.0> Feature flags:   [x] drop_unroutable_metric
rabbitmq         | 2021-04-05 16:50:09.542 [info] <0.273.0> Feature flags:   [x] empty_basic_get_metric
rabbitmq         | 2021-04-05 16:50:09.542 [info] <0.273.0> Feature flags:   [x] implicit_default_bindings
rabbitmq         | 2021-04-05 16:50:09.542 [info] <0.273.0> Feature flags:   [x] maintenance_mode_status
rabbitmq         | 2021-04-05 16:50:09.542 [info] <0.273.0> Feature flags:   [ ] quorum_queue
rabbitmq         | 2021-04-05 16:50:09.543 [info] <0.273.0> Feature flags:   [ ] user_limits
rabbitmq         | 2021-04-05 16:50:09.545 [info] <0.273.0> Feature flags:   [ ] virtual_host_metadata
rabbitmq         | 2021-04-05 16:50:09.546 [info] <0.273.0> Feature flags: feature flag states written to disk: yes
rabbitmq         | 2021-04-05 16:50:10.844 [info] <0.273.0> Running boot step pre_boot defined by app rabbit
rabbitmq         | 2021-04-05 16:50:10.845 [info] <0.273.0> Running boot step rabbit_core_metrics defined by app rabbit
rabbitmq         | 2021-04-05 16:50:10.846 [info] <0.273.0> Running boot step rabbit_alarm defined by app rabbit
rabbitmq         | 2021-04-05 16:50:10.854 [info] <0.414.0> Memory high watermark set to 2509 MiB (2631391641 bytes) of 6273 MiB (6578479104 bytes) total
rabbitmq         | 2021-04-05 16:50:10.864 [info] <0.416.0> Enabling free disk space monitoring
rabbitmq         | 2021-04-05 16:50:10.864 [info] <0.416.0> Disk free limit set to 50MB
rabbitmq         | 2021-04-05 16:50:10.872 [info] <0.273.0> Running boot step code_server_cache defined by app rabbit
rabbitmq         | 2021-04-05 16:50:10.872 [info] <0.273.0> Running boot step file_handle_cache defined by app rabbit
rabbitmq         | 2021-04-05 16:50:10.872 [info] <0.419.0> Limiting to approx 1048479 file handles (943629 sockets)
rabbitmq         | 2021-04-05 16:50:10.873 [info] <0.420.0> FHC read buffering:  OFF
rabbitmq         | 2021-04-05 16:50:10.873 [info] <0.420.0> FHC write buffering: ON
rabbitmq         | 2021-04-05 16:50:10.874 [info] <0.273.0> Running boot step worker_pool defined by app rabbit
rabbitmq         | 2021-04-05 16:50:10.874 [info] <0.372.0> Will use 4 processes for default worker pool
rabbitmq         | 2021-04-05 16:50:10.874 [info] <0.372.0> Starting worker pool 'worker_pool' with 4 processes in it
rabbitmq         | 2021-04-05 16:50:10.876 [info] <0.273.0> Running boot step database defined by app rabbit
rabbitmq         | 2021-04-05 16:50:10.899 [info] <0.273.0> Waiting for Mnesia tables for 30000 ms, 9 retries left
rabbitmq         | 2021-04-05 16:50:10.900 [info] <0.273.0> Successfully synced tables from a peer
rabbitmq         | 2021-04-05 16:50:10.908 [info] <0.44.0> Application mnesia exited with reason: stopped
rabbitmq         |
rabbitmq         | 2021-04-05 16:50:10.908 [info] <0.44.0> Application mnesia exited with reason: stopped
rabbitmq         | 2021-04-05 16:50:10.908 [error] <0.273.0>
rabbitmq         | 2021-04-05 16:50:10.908 [error] <0.273.0> BOOT FAILED
rabbitmq         | BOOT FAILED
rabbitmq         | ===========
rabbitmq         | Error during startup: {error,
rabbitmq         | 2021-04-05 16:50:10.909 [error] <0.273.0> ===========
rabbitmq         | 2021-04-05 16:50:10.909 [error] <0.273.0> Error during startup: {error,
rabbitmq         | 2021-04-05 16:50:10.909 [error] <0.273.0>                           {schema_integrity_check_failed,
rabbitmq         |                           {schema_integrity_check_failed,
rabbitmq         |                               [{table_attributes_mismatch,rabbit_queue,
rabbitmq         | 2021-04-05 16:50:10.910 [error] <0.273.0>                               [{table_attributes_mismatch,rabbit_queue,
rabbitmq         | 2021-04-05 16:50:10.910 [error] <0.273.0>                                    [name,durable,auto_delete,exclusive_owner,
rabbitmq         | 2021-04-05 16:50:10.911 [error] <0.273.0>                                     arguments,pid,slave_pids,sync_slave_pids,
rabbitmq         | 2021-04-05 16:50:10.911 [error] <0.273.0>                                     recoverable_slaves,policy,operator_policy,
rabbitmq         |                                    [name,durable,auto_delete,exclusive_owner,
rabbitmq         |                                     arguments,pid,slave_pids,sync_slave_pids,
rabbitmq         | 2021-04-05 16:50:10.911 [error] <0.273.0>                                     gm_pids,decorators,state,policy_version,
rabbitmq         | 2021-04-05 16:50:10.911 [error] <0.273.0>                                     slave_pids_pending_shutdown,vhost,options],
rabbitmq         | 2021-04-05 16:50:10.912 [error] <0.273.0>                                    [name,durable,auto_delete,exclusive_owner,
rabbitmq         | 2021-04-05 16:50:10.912 [error] <0.273.0>                                     arguments,pid,slave_pids,sync_slave_pids,
rabbitmq         | 2021-04-05 16:50:10.913 [error] <0.273.0>                                     recoverable_slaves,policy,operator_policy,
rabbitmq         | 2021-04-05 16:50:10.913 [error] <0.273.0>                                     gm_pids,decorators,state,policy_version,
rabbitmq         | 2021-04-05 16:50:10.913 [error] <0.273.0>                                     slave_pids_pending_shutdown,vhost,options,
rabbitmq         |                                     recoverable_slaves,policy,operator_policy,
rabbitmq         |                                     gm_pids,decorators,state,policy_version,
rabbitmq         |                                     slave_pids_pending_shutdown,vhost,options],
rabbitmq         |                                    [name,durable,auto_delete,exclusive_owner,
rabbitmq         |                                     arguments,pid,slave_pids,sync_slave_pids,
rabbitmq         |                                     recoverable_slaves,policy,operator_policy,
rabbitmq         |                                     gm_pids,decorators,state,policy_version,
rabbitmq         |                                     slave_pids_pending_shutdown,vhost,options,
rabbitmq         |                                     type,type_state]}]}}
rabbitmq         | 2021-04-05 16:50:10.914 [error] <0.273.0>                                     type,type_state]}]}}
rabbitmq         | 2021-04-05 16:50:10.916 [error] <0.273.0>
rabbitmq         |
rabbitmq         | 2021-04-05 16:50:11.924 [info] <0.272.0> [{initial_call,{application_master,init,['Argument__1','Argument__2','Argument__3','Argument__4']}},{pid,<0.272.0>},{registered_name,[]},{error_info
,{exit,{{schema_integrity_check_failed,[{table_attributes_mismatch,rabbit_queue,[name,durable,auto_delete,exclusive_owner,arguments,pid,slave_pids,sync_slave_pids,recoverable_slaves,policy,operator_policy,gm_
pids,decorators,state,policy_version,slave_pids_pending_shutdown,vhost,options],[name,durable,auto_delete,exclusive_owner,arguments,pid,slave_pids,sync_slave_pids,recoverable_slaves,policy,operator_policy,gm_
pids,decorators,state,policy_version,slave_pids_pending_shutdown,vhost,options,type,type_state]}]},{rabbit,start,[normal,[]]}},[{application_master,init,4,[{file,"application_master.erl"},{line,138}]},{proc_l
ib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,226}]}]}},{ancestors,[<0.271.0>]},{message_queue_len,1},{messages,[{'EXIT',<0.273.0>,normal}]},{links,[<0.271.0>,<0.44.0>]},{dictionary,[]},{trap_exit,true},{
status,running},{heap_size,610},{stack_size,28},{reductions,534}], []
rabbitmq         | 2021-04-05 16:50:11.924 [error] <0.272.0> CRASH REPORT Process <0.272.0> with 0 neighbours exited with reason: {{schema_integrity_check_failed,[{table_attributes_mismatch,rabbit_queue,[name
,durable,auto_delete,exclusive_owner,arguments,pid,slave_pids,sync_slave_pids,recoverable_slaves,policy,operator_policy,gm_pids,decorators,state,policy_version,slave_pids_pending_shutdown,vhost,options],[name
,durable,auto_delete,exclusive_owner,arguments,pid,slave_pids,sync_slave_pids,recoverable_slaves,policy,operator_policy,gm_pids,decorators,state,policy_version,slave_pids_pending_shutdown,vhost,options,type,t
ype_state]}]},...} in application_master:init/4 line 138
rabbitmq         | 2021-04-05 16:50:11.924 [info] <0.44.0> Application rabbit exited with reason: {{schema_integrity_check_failed,[{table_attributes_mismatch,rabbit_queue,[name,durable,auto_delete,exclusive_o
wner,arguments,pid,slave_pids,sync_slave_pids,recoverable_slaves,policy,operator_policy,gm_pids,decorators,state,policy_version,slave_pids_pending_shutdown,vhost,options],[name,durable,auto_delete,exclusive_o
wner,arguments,pid,slave_pids,sync_slave_pids,recoverable_slaves,policy,operator_policy,gm_pids,decorators,state,policy_version,slave_pids_pending_shutdown,vhost,options,type,type_state]}]},...}
rabbitmq         | 2021-04-05 16:50:11.925 [info] <0.44.0> Application rabbit exited with reason: {{schema_integrity_check_failed,[{table_attributes_mismatch,rabbit_queue,[name,durable,auto_delete,exclusive_o
wner,arguments,pid,slave_pids,sync_slave_pids,recoverable_slaves,policy,operator_policy,gm_pids,decorators,state,policy_version,slave_pids_pending_shutdown,vhost,options],[name,durable,auto_delete,exclusive_o
wner,arguments,pid,slave_pids,sync_slave_pids,recoverable_slaves,policy,operator_policy,gm_pids,decorators,state,policy_version,slave_pids_pending_shutdown,vhost,options,type,type_state]}]},...}
rabbitmq         | {"Kernel pid terminated",application_controller,"{application_start_failure,rabbit,{{schema_integrity_check_failed,[{table_attributes_mismatch,rabbit_queue,[name,durable,auto_delete,exclusi
ve_owner,arguments,pid,slave_pids,sync_slave_pids,recoverable_slaves,policy,operator_policy,gm_pids,decorators,state,policy_version,slave_pids_pending_shutdown,vhost,options],[name,durable,auto_delete,exclusi
ve_owner,arguments,pid,slave_pids,sync_slave_pids,recoverable_slaves,policy,operator_policy,gm_pids,decorators,state,policy_version,slave_pids_pending_shutdown,vhost,options,type,type_state]}]},{rabbit,start,
[normal,[]]}}}"}
rabbitmq         | Kernel pid terminated (application_controller) ({application_start_failure,rabbit,{{schema_integrity_check_failed,[{table_attributes_mismatch,rabbit_queue,[name,durable,auto_delete,exclusiv
e_owner,arg
rabbitmq         |
rabbitmq         | Crash dump is being written to: /var/log/rabbitmq/erl_crash.dump...done
rabbitmq exited with code 0

2

Answers


  1. Chosen as BEST ANSWER

    I've managed to make it work by removing container_name and volumes from rabbitmq section of docker-compose file. Still would be nice to have an explanation of this behavior.


  2. Btw, the same error (Application mnesia exited with reason: stopped) appears when you have syntax errors in your definitions.json.

    Login or Signup to reply.
Please signup or login to give your own answer.
Back To Top
Search