亚洲最大看欧美片,亚洲图揄拍自拍另类图片,欧美精品v国产精品v呦,日本在线精品视频免费

  • 站長(zhǎng)資訊網(wǎng)
    最全最豐富的資訊網(wǎng)站

    CentOS 7.6 部署ELK日志分析系統(tǒng)步驟

    記錄在CentOS 7.6下部署ELK日志分析系統(tǒng)的過(guò)程步驟,希望對(duì)大家有所幫助。

    下載elasticsearch

    創(chuàng)建elk用戶(hù)并授權(quán)
    useradd elk
    chown -R elk:elk /home/elk/elasticsearch
    chown -R elk:elk /home/elk/elasticsearch1
    chown -R elk:elk /home/elk/elasticsearch2
    mkdir -p /home/eladata
    mkdir -p /var/log/elk
    chown -R elk:elk /home/eladata
    chown -R elk:elk /var/log/elk

    主節(jié)點(diǎn)master

    elasticsearch解壓,修改配置文件
    /home/elk/elasticsearch/config
    [root@localhost config]# grep -v  “^#” elasticsearch.yml
    cluster.name: my-application
    node.name: node0
    node.master: true
    node.attr.rack: r1
    node.max_local_storage_nodes: 3
    path.data: /home/eladata
    path.logs: /var/log/elk
    http.cors.enabled: true
    http.cors.allow-origin: “*”
    network.host: 192.168.1.70
    http.port: 9200
    transport.tcp.port: 9301
    discovery.zen.minimum_master_nodes: 1
    cluster.initial_master_nodes: [“node0”]

    手動(dòng)啟動(dòng)命令
    su elk -l -c ‘/home/elk/elasticsearch/bin/elasticsearch -d’

    啟動(dòng)文件 elasticsearch.service
    [root@localhost system]# pwd
    /lib/systemd/system
    [root@localhost system]# cat elasticsearch.service
    [Unit]
    Description=Elasticsearch
    Documentation=http://www.elastic.co
    Wants=network-online.target
    After=network-online.target
    [Service]
    RuntimeDirectory=elasticsearch
    PrivateTmp=true
    Environment=ES_HOME=/home/elk/elasticsearch
    Environment=ES_PATH_CONF=/home/elk/elasticsearch/config
    Environment=PID_DIR=/var/run/elasticsearch
    EnvironmentFile=-/etc/sysconfig/elasticsearch
    WorkingDirectory=/home/elk/elasticsearch
    User=elk
    Group=elk
    ExecStart=/home/elk/elasticsearch/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid –quiet
    StandardOutput=journal
    StandardError=inherit
    LimitNOFILE=65536
    LimitNPROC=4096
    LimitAS=infinity
    LimitFSIZE=infinity
    TimeoutStopSec=0
    KillSignal=SIGTERM
    KillMode=process
    SendSIGKILL=no
    SuccessExitStatus=143
    [Install]
    WantedBy=multi-user.target

    [root@localhost system]#

    Node1節(jié)點(diǎn)
    /home/elk/elasticsearch1/config
    [root@localhost config]# grep -v  “^#” elasticsearch.yml
    cluster.name: my-application
    node.name: node1
    node.master: false
    node.attr.rack: r1
    node.max_local_storage_nodes: 3
    path.data: /home/eladata
    path.logs: /var/log/elk
    http.cors.enabled: true
    http.cors.allow-origin: “*”
    network.host: 192.168.1.70
    transport.tcp.port: 9303
    http.port: 9302
    discovery.zen.ping.unicast.hosts: [“192.168.1.70:9301”]
    [root@localhost config]#

    手動(dòng)啟動(dòng)命令
    su elk -l -c ‘/home/elk/elasticsearch1/bin/elasticsearch1 -d’

    啟動(dòng)文件 elasticsearch1.service
    [root@localhost system]# pwd
    /lib/systemd/system
    [root@localhost system]# cat elasticsearch1.service
    [Unit]
    Description=Elasticsearch
    Documentation=http://www.elastic.co
    Wants=network-online.target
    After=network-online.target
    [Service]
    RuntimeDirectory=elasticsearch1
    PrivateTmp=true
    Environment=ES_HOME=/home/elk/elasticsearch1
    Environment=ES_PATH_CONF=/home/elk/elasticsearch1/config
    Environment=PID_DIR=/var/run/elasticsearch
    EnvironmentFile=-/etc/sysconfig/elasticsearch
    WorkingDirectory=/home/elk/elasticsearch
    User=elk
    Group=elk
    ExecStart=/home/elk/elasticsearch1/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid –quiet
    StandardOutput=journal
    StandardError=inherit
    LimitNOFILE=65536
    LimitNPROC=4096
    LimitAS=infinity
    LimitFSIZE=infinity
    TimeoutStopSec=0
    KillSignal=SIGTERM
    KillMode=process
    SendSIGKILL=no
    SuccessExitStatus=143
    [Install]
    WantedBy=multi-user.target

    [root@localhost system]#

    Node2節(jié)點(diǎn)
    /home/elk/elasticsearch2/config
    [root@localhost config]# grep -v  “^#” elasticsearch.yml
    cluster.name: my-application
    node.name: node2
    node.attr.rack: r1
    node.master: false
    node.max_local_storage_nodes: 3
    path.data: /home/eladata
    path.logs: /var/log/elk
    http.cors.enabled: true
    http.cors.allow-origin: “*”
    network.host: 192.168.1.70
    http.port: 9203
    transport.tcp.port: 9304
    discovery.zen.ping.unicast.hosts: [“192.168.1.70:9301”]
    discovery.zen.minimum_master_nodes: 1
    [root@localhost config]#

    手動(dòng)啟動(dòng)命令
    su elk -l -c ‘/home/elk/elasticsearch2/bin/elasticsearch2 -d’

    啟動(dòng)文件 elasticsearch2.service
    [root@localhost system]# pwd
    /lib/systemd/system
    [root@localhost system]# cat elasticsearch2.service
    [Unit]
    Description=Elasticsearch
    Documentation=http://www.elastic.co
    Wants=network-online.target
    After=network-online.target
    [Service]
    RuntimeDirectory=elasticsearch2
    PrivateTmp=true
    Environment=ES_HOME=/home/elk/elasticsearch2
    Environment=ES_PATH_CONF=/home/elk/elasticsearch2/config
    Environment=PID_DIR=/var/run/elasticsearch
    EnvironmentFile=-/etc/sysconfig/elasticsearch
    WorkingDirectory=/home/elk/elasticsearch2
    User=elk
    Group=elk
    ExecStart=/home/elk/elasticsearch2/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid –quiet
    StandardOutput=journal
    StandardError=inherit
    LimitNOFILE=65536
    LimitNPROC=4096
    LimitAS=infinity
    LimitFSIZE=infinity
    TimeoutStopSec=0
    KillSignal=SIGTERM
    KillMode=process
    SendSIGKILL=no
    SuccessExitStatus=143
    [Install]
    WantedBy=multi-user.target

    [root@localhost system]#

    下載logstash

    目錄如下,默認(rèn)配置即可
    [root@localhost logstash]# pwd
    /home/elk/logstash
    [root@localhost logstash]#

    手動(dòng)啟動(dòng)命令
    ./logstash -f ../dev.conf
    nohup ./logstash -f ../dev.conf &

    下載kibana

    配置文件如下
    [root@localhost config]# pwd
    /home/elk/kibana/config
    [root@localhost config]# grep -v  “^#” kibana.yml
    server.host: “192.168.1.70”
    elasticsearch.hosts: [“http://192.168.1.70:9200”]
    kibana.index: “.kibana”
    i18n.locale: “zh-CN”

    手動(dòng)啟動(dòng)命令
    ./kibana
    nohup ./kibana &

    kibana啟動(dòng)文件
    [root@localhost system]# pwd
    /lib/systemd/system
    [root@localhost system]# cat kibana.service
    [Unit]
    Description=Kibana  Server Manager
    [Service]
    ExecStart=/home/elk/kibana/bin/kibana
    [Install]
    WantedBy=multi-user.target
    [root@localhost system]#

    端口為:5601 訪(fǎng)問(wèn):192.168.1.70:5601

    安裝Elasticsearch -head
    yum install git npm
    git clone https://github.com/mobz/elasticsearch-head.git
    [root@localhost elasticsearch-head]# pwd
    /home/elk/elasticsearch-head
    [root@localhost elasticsearch-head]#

    啟動(dòng)
    npm install
    npm run start
    nohup npm run start &

    curl -XPUT ‘192.168.2.67:9100/book’

    訪(fǎng)問(wèn)192.168.2.67:9100 即可訪(fǎng)問(wèn)

    下載kafka

    修改配置文件如下
    [root@localhost config]# pwd
    /home/elk/kafka/config
    [root@localhost config]# grep -v “^#” server.properties
    broker.id=0
    listeners=PLAINTEXT://192.168.1.70:9092
    num.network.threads=3
    num.io.threads=8
    socket.send.buffer.bytes=102400
    socket.receive.buffer.bytes=102400
    socket.request.max.bytes=104857600
    log.dirs=/var/log/kafka-logs
    num.partitions=1
    num.recovery.threads.per.data.dir=1
    offsets.topic.replication.factor=1
    transaction.state.log.replication.factor=1
    transaction.state.log.min.isr=1
    log.retention.hours=168
    log.segment.bytes=1073741824
    log.retention.check.interval.ms=300000
    zookeeper.connect=localhost:2181
    zookeeper.connection.timeout.ms=6000
    group.initial.rebalance.delay.ms=0
    delete.topic.enable=true
    [root@localhost config]#

    kafka配置啟動(dòng)zookeeper

    手動(dòng)啟動(dòng)方式
    [root@localhost bin]# pwd
    /home/elk/kafka/bin
    [root@localhost bin]#
    ./zookeeper-server-start.sh ../config/zookeeper.properties

    systemctl 啟動(dòng)zookeeper
    [root@localhost system]# pwd
    /lib/systemd/system
    [root@localhost system]# cat zookeeper.service
    [Service]
    Type=forking
    SyslogIdentifier=zookeeper
    Restart=always
    RestartSec=0s
    ExecStart=/home/elk/kafka/bin/zookeeper-server-start.sh -daemon /home/elk/kafka/config/zookeeper.properties
    ExecStop=/home/elk/kafka/bin/zookeeper-server-stop.sh
    [root@localhost system]#

    啟動(dòng)kafka服務(wù)

    手動(dòng)啟動(dòng)方式
    ./kafka-server-start.sh ../config/server.properties

    systemctl 啟動(dòng)kafka
    [root@localhost system]# pwd
    /lib/systemd/system
    [root@localhost system]# cat kafka.service
    [Unit]
    Description=Apache kafka
    After=network.target
    [Service]
    Type=simple
    Restart=always
    RestartSec=0s
    ExecStart=/home/elk/kafka/bin/kafka-server-start.sh  /home/elk/kafka/config/server.properties
    ExecStop=/home/elk/kafka/bin/kafka-server-stop.sh
    [root@localhost system]#

    測(cè)試kafka

    新建一個(gè)名字為test的topic
    /kafka-topics.sh –create –zookeeper 192.168.1.70:2181 –replication-factor 1 –partitions 1 –topic test

    查看kafka中的topic
    ./kafka-topics.sh –list  –zookeeper 192.168.1.70:2181

    往kafka topic為test中 生產(chǎn)消息
    ./kafka-console-producer.sh –broker-list 192.168.1.70:9092 –topic test

    在kafka topic為test中 消費(fèi)消息
    bin/kafka-console-consumer.sh –bootstrap-server 192.168.1.70:9092 –topic test –from-beginning

    生產(chǎn)的消息,消費(fèi)那邊接受到即是ok的

    目標(biāo)機(jī)器安裝filebeat

    安裝6.5版本的
    [root@localhost filebeat]# pwd
    /usr/local/filebeat
    [root@localhost filebeat]# cat filebeat.yml
    filebeat.prospectors:
    – type: log
      paths:
        – /opt/logs/workphone-tcp/catalina.out
      fields:
        tag: 54_tcp_catalina_out
    – type: log
      paths:
        – /opt/logs/workphone-webservice/catalina.out
      fields:
        tag: 54_web_catalina_out
    name: 192.168.1.54
    filebeat.config.modules:
      path: ${path.config}/modules.d/*.yml
      reload.enabled: false
    setup.template.settings:
      index.number_of_shards: 3
    output.kafka:
      hosts: [“192.168.1.70:9092”]
      topic: “filebeat-log”
      partition.hash:
        reachable_only: true
      compression: gzip
      max_message_bytes: 1000000
      required_acks: 1

    [root@localhost filebeat]#

    安裝完成后去logstash編輯配置文件

    logstash操作
    [root@localhost logstash]# pwd
    /home/elk/logstash
    [root@localhost logstash]# cat dev.conf
    input {
      kafka{
        bootstrap_servers => “192.168.1.70:9092”
        topics => [“filebeat-log”]
        codec => “json”
      }
    }
    filter {
            if [fields][tag]==”jpwebmap” {
                json{
                    source => “message”
                    remove_field => “message”
                }
                geoip {
                source => “client”
                target => “geoip”
                add_field => [ “[geoip][coordinates]”, “%{[geoip][longitude]}” ]
                add_field => [ “[geoip][coordinates]”, “%{[geoip][latitude]}”  ]
                }
                mutate {
                    convert => [ “[geoip][coordinates]”, “float”]
                    }
            }
        if [fields][tag] == “54_tcp_catalina_out”{
                grok {
                    match => [“message”, “%{TIMESTAMP_ISO8601:logdate}”]
                }
                date {
                    match => [“logdate”, “ISO8601”]
                }
                mutate {
                    remove_field => [ “logdate” ]
                }
        }
        if [fields][tag] == “54_web_catalina_out”{
                    grok {
                            match => [“message”, “%{TIMESTAMP_ISO8601:logdate}”]
                    }
                    date {
                            match => [“logdate”, “ISO8601”]
                    }
                    mutate {
                            remove_field => [ “logdate” ]
                    }
            }
        if [fields][tag] == “55_tcp_catalina_out”{
                    grok {
                            match => [“message”, “%{TIMESTAMP_ISO8601:logdate}”]
                    }
                    date {
                            match => [“logdate”, “ISO8601”]
                    }
                    mutate {
                            remove_field => [ “logdate” ]
                    }
            }
            if [fields][tag] == “55_web_catalina_out”{
                    grok {
                            match => [“message”, “%{TIMESTAMP_ISO8601:logdate}”]
                    }
                    date {
                            match => [“logdate”, “ISO8601”]
                    }
                    mutate {
                            remove_field => [ “logdate” ]
                    }
            }
        if [fields][tag] == “51_nginx80_access_log” {
                mutate {
                    add_field => { “spstr” => “%{[log][file][path]}” }
                }
                mutate {
                    split => [“spstr” , “/”]
                    # save the last element of the array as the api_method.
                    add_field => [“src”, “%{[spstr][-1]}” ]
                }
                mutate{
                    remove_field => [ “friends”, “ecs”, “agent” , “spstr” ]
                }
                grok {
                    match => { “message” => “%{IPORHOST:remote_addr} – %{DATA:remote_user} [%{HTTPDATE:time}] “%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}” %{NUMBER:response_code} %{NUMBER:body_sent:bytes} “%{DATA:referrer}” “%{DATA:agent}” “%{DATA:x_forwarded_for}” “%{NUMBER:request_time}” “%{DATA:upstream_addr}” “%{DATA:upstream_status}”” }
                    remove_field => “message”
                }
                date {
                        match => [“time”, “dd/MMM/yyyy:HH:mm:ss Z”]
                        target => “@timestamp”
                }
                geoip {
                    source => “x_forwarded_for”
                    target => “geoip”
                    database => “/home/elk/logstash/GeoLite2-City.mmdb”
                    add_field => [ “[geoip][coordinates]”, “%{[geoip][longitude]}” ]
                    add_field => [ “[geoip][coordinates]”, “%{[geoip][latitude]}”  ]
                }
                mutate {
                    convert => [ “[geoip][coordinates]”, “float”]
                }
        }
    }
    output {
    if [fields][tag] == “wori”{
      elasticsearch {
      hosts => [“192.168.1.70:9200”]
      index => “zabbix”
          }
      }
    if [fields][tag] == “54_tcp_catalina_out”{
      elasticsearch {
      hosts => [“192.168.1.70:9200”]
      index => “54_tcp_catalina_out”
          }
      }
    if [fields][tag] == “54_web_catalina_out”{
      elasticsearch {
      hosts => [“192.168.1.70:9200”]
      index => “54_web_catalina_out”
          }
      }
    if [fields][tag] == “55_tcp_catalina_out”{
      elasticsearch {
      hosts => [“192.168.1.70:9200”]
      index => “55_tcp_catalina_out”
          }
      } 
    if [fields][tag] == “55_web_catalina_out”{
      elasticsearch {
      hosts => [“192.168.1.70:9200”]
      index => “55_web_catalina_out”
          }
      }
    if [fields][tag] == “51_nginx80_access_log” {
          stdout{}
        elasticsearch {
        hosts => [“192.168.1.70:9200”]
        index => “51_nginx80_access_log”
        }
      }
    }

    其他的配置文件

    index.conf
    filter {
        mutate {
            add_field => { “spstr” => “%{[log][file][path]}” }
        }
            mutate {
            split => [“spstr” , “/”]
            # save the last element of the array as the api_method.
            add_field => [“src”, “%{[spstr][-1]}” ]
            }
            mutate{
        remove_field => [ “friends”, “ecs”, “agent” , “spstr” ]
        }
    }

    Java.conf
    filter {
    if [fields][tag] == “java”{
        grok {
            match => [“message”, “%{TIMESTAMP_ISO8601:logdate}”]
        }
        date {
            match => [“logdate”, “ISO8601”]
        }
        mutate {
            remove_field => [ “logdate” ]
        }
      } #End if
    }

    kafkainput.conf
    input {
      kafka{
        bootstrap_servers => “172.16.11.68:9092”
        #topics => [“ql-prod-tomcat” ]
        topics => [“ql-prod-dubbo”,”ql-prod-nginx”,”ql-prod-tomcat” ]
        codec => “json”
        consumer_threads => 5
        decorate_events => true
        #auto_offset_reset => “latest”
        group_id => “logstash”
        #client_id => “”
        ############################# HELK Optimizing Latency #############################
        fetch_min_bytes => “1”
        request_timeout_ms => “305000”
        ############################# HELK Optimizing Availability #############################
        session_timeout_ms => “10000”
        max_poll_records => “550”
        max_poll_interval_ms => “300000”
      }

    }
    #input {
    #  kafka{
    #    bootstrap_servers => “172.16.11.68:9092”
    #    topics => [“ql-prod-java-dubbo”,”ql-prod”,”ql-prod-java” ]
    #    codec => “json”
    #    consumer_threads => 15
    #    decorate_events => true
    #    auto_offset_reset => “latest”
    #    group_id => “logstash-1”
    #    ############################# HELK Optimizing Latency #############################
    #    fetch_min_bytes => “1”
    #    request_timeout_ms => “305000”
    #    ############################# HELK Optimizing Availability #############################
    #    session_timeout_ms => “10000”
    #    max_poll_records => “550”
    #    max_poll_interval_ms => “300000”
    #  }

    #}

    nginx.conf
    filter {
    if [fields][tag] == “nginx-access” {
            mutate {
            add_field => { “spstr” => “%{[log][file][path]}” }
            }
            mutate {
            split => [“spstr” , “/”]
            # save the last element of the array as the api_method.
            add_field => [“src”, “%{[spstr][-1]}” ]
            }
            mutate{
            remove_field => [ “friends”, “ecs”, “agent” , “spstr” ]
            }

        grok {
            match => { “message” => “%{IPORHOST:remote_addr} – %{DATA:remote_user} [%{HTTPDATE:time}] “%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}” %{NUMBER:response_code} %{NUMBER:body_sent:bytes} “%{DATA:referrer}” “%{DATA:agent}” “%{DATA:x_forwarded_for}” “%{NUMBER:request_time}” “%{DATA:upstream_addr}” “%{DATA:upstream_status}”” }
            remove_field => “message”
        }
        date {
                    match => [“time”, “dd/MMM/yyyy:HH:mm:ss Z”]
                    target => “@timestamp”
            }
        geoip {
            source => “x_forwarded_for”
            target => “geoip”
            database => “/opt/logstash-6.2.4/GeoLite2-City.mmdb”
            add_field => [ “[geoip][coordinates]”, “%{[geoip][longitude]}” ]
            add_field => [ “[geoip][coordinates]”, “%{[geoip][latitude]}”  ]

            }
        mutate {
            convert => [ “[geoip][coordinates]”, “float”]
        }

      } #endif
    }

    ouput.conf
    output{
      if [fields][tag] == “nginx-access” {
          stdout{}
        elasticsearch {
        user => elastic
        password => WR141bp2sveJuGFaD4oR
        hosts => [“172.16.11.67:9200”]
        index => “logstash-%{[fields][proname]}-%{+YYYY.MM.dd}”
        }
      }
          #stdout{}
      if [fields][tag] == “java” {
            elasticsearch {
            user => elastic
            password => WR141bp2sveJuGFaD4oR
            hosts => [“172.16.11.66:9200″,”172.16.11.68:9200”]
            index => “%{[host][name]}-%{[src]}”
            }
      }
    }

    贊(0)
    分享到: 更多 (0)
    網(wǎng)站地圖   滬ICP備18035694號(hào)-2    滬公網(wǎng)安備31011702889846號(hào)