DEV Community

esga1102
esga1102

Posted on

Setup OpenSearch,Logstash,Kafka locally with Docker Compose (windows)

Directory tree


  D:/
 -  opensearch
       + kafka
          - docker-compose.yaml
       + logstash
         + pipeline
            - input.conf
         - docker-compose.yaml
    - .env
    - docker-compose.yaml
Enter fullscreen mode Exit fullscreen mode

opensearch

add docker-compose file with content :

version: '3'
services:
  opensearch-node1: # This is also the hostname of the container within the Docker network (i.e. https://opensearch-node1/)
    image: opensearchproject/opensearch:latest # Specifying the latest available image - modify if you want a specific version
    container_name: opensearch-node1
    environment:
      - cluster.name=opensearch-cluster # Name the cluster
      - node.name=opensearch-node1 # Name the node that will run in this container
      - discovery.seed_hosts=opensearch-node1,opensearch-node2 # Nodes to look for when discovering the cluster
      - cluster.initial_cluster_manager_nodes=opensearch-node1,opensearch-node2 # Nodes eligible to serve as cluster manager
      - bootstrap.memory_lock=true # Disable JVM heap memory swapping
      - "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" # Set min and max JVM heap sizes to at least 50% of system RAM
      - OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_INITIAL_ADMIN_PASSWORD}    # Sets the demo admin user password when using demo configuration, required for OpenSearch 2.12 and later
    ulimits:
      memlock:
        soft: -1 # Set memlock to unlimited (no soft or hard limit)
        hard: -1
      nofile:
        soft: 65536 # Maximum number of open files for the opensearch user - set to at least 65536
        hard: 65536
    volumes:
      - opensearch-data1:/usr/share/opensearch/data # Creates volume called opensearch-data1 and mounts it to the container
    ports:
      - 9200:9200 # REST API
      - 9600:9600 # Performance Analyzer
    networks:
      - opensearch-net # All of the containers will join the same Docker bridge network
  opensearch-node2:
    image: opensearchproject/opensearch:latest # This should be the same image used for opensearch-node1 to avoid issues
    container_name: opensearch-node2
    environment:
      - cluster.name=opensearch-cluster
      - node.name=opensearch-node2
      - discovery.seed_hosts=opensearch-node1,opensearch-node2
      - cluster.initial_cluster_manager_nodes=opensearch-node1,opensearch-node2
      - bootstrap.memory_lock=true
      - "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m"
      - OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_INITIAL_ADMIN_PASSWORD}
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    volumes:
      - opensearch-data2:/usr/share/opensearch/data
    networks:
      - opensearch-net
  opensearch-dashboards:
    image: opensearchproject/opensearch-dashboards:latest # Make sure the version of opensearch-dashboards matches the version of opensearch installed on other nodes
    container_name: opensearch-dashboards
    ports:
      - 5601:5601 # Map host port 5601 to container port 5601
    expose:
      - "5601" # Expose port 5601 for web access to OpenSearch Dashboards
    environment:
      OPENSEARCH_HOSTS: '["https://opensearch-node1:9200","https://opensearch-node2:9200"]' # Define the OpenSearch nodes that OpenSearch Dashboards will query
    networks:
      - opensearch-net
volumes:
  opensearch-data1:
  opensearch-data2:
networks:
  opensearch-net:
  kafka_opensearch-net:
    external: true
Enter fullscreen mode Exit fullscreen mode

then add .env file with your password :
OPENSEARCH_INITIAL_ADMIN_PASSWORD=mypassword1@2

kafka

add docker-compose file with content :

version: '3'

services:
  zookeeper:
    container_name: zookeeper-cntr
    image: confluentinc/cp-zookeeper:7.2.0
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_TICK_TIME: 2000
    networks:
      - opensearch-net
  kafka:
    container_name: kafka-cntr
    image: confluentinc/cp-kafka:7.2.0
    depends_on:
      - zookeeper
    ports:
      - 29092:29092
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: zookeeper-cntr:2181
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-cntr:9092,PLAINTEXT_HOST://localhost:29092
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
      KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_ADVERTISED_HOST_NAME: localhost
    networks:
      - opensearch-net
    healthcheck:
      test: nc -vz kafka-cntr 9092 || exit -1
      # start_period: 15s
      interval: 5s
      timeout: 10s
      retries: 10
networks:
  opensearch-net:
Enter fullscreen mode Exit fullscreen mode

logstash

add docker-compose file with content :

version: '3'

services:
  logstash:
    image: opensearchproject/logstash-oss-with-opensearch-output-plugin:8.4.0
    ports:
      - "5044:5044"
    expose:
      - "5044"
    volumes:
      - ./pipeline:/usr/share/logstash/pipeline
    networks:
      - opensearch_opensearch-net  
      - kafka_opensearch-net
networks:
  opensearch_opensearch-net:
    external: true
  kafka_opensearch-net:
    external: true
Enter fullscreen mode Exit fullscreen mode

pipeline

add input.conf file with content :

input {
 kafka
    {
        bootstrap_servers => "kafka-cntr:9092"
        topics => "ServiceCentrallog"
        codec => json
    }
}

filter {
    mutate {
      remove_field => ["event"]
    }  
}
output {
  opensearch {
    hosts       => ["https://opensearch-node1:9200"]
    user        => "admin"
    password    => "mypassword1@2"
    index       => "service-centrallog-%{+YYYY.MM.dd}"
    ssl_certificate_verification => false
    ecs_compatibility => disabled
  }
}
Enter fullscreen mode Exit fullscreen mode

Step 1 : Run opensearch docker-compose
Step 2 : Run kafka docker-compose
Step 3 : Run logtash after kafka has finished starting
Local application can send log to kafka via: 127.0.0.1:29092
If opensearch fails to
start : https://stackoverflow.com/questions/51445846/elasticsearch-max-virtual-memory-areas-vm-max-map-count-65530-is-too-low-inc

Top comments (0)