-
NGINX(192.168.132.7)参考
59. 创建nginx的topic ./bin/kafka-topics.sh --create --zookeeper 192.168.132.24:2181 --replication-factor 2 --partitions 3 --topic nginx
60. yum -y install epel-release
61. yum -y install nginx
62. systemctl start nginx
63. tailf /var/log/nginx/access.log AB压测工具(yum -y install httpd-tools 192.168.132.17)
64. ab -n 100 -c 100 http://192.168.132.7/index.html配置filebeat文件(192.168.132.7)
- vim /etc/filebeat/filebeat.yml
-
filebeat.inputs:
- type: log enabled: true paths:
- /var/log/messages fields:
log_topics: msg
- /var/log/messages fields:
- type: log enabled: true paths:
- /var/log/nginx/access.log fields:
log_topics: nginx
- /var/log/nginx/access.log fields:
output.kafka: enabled: true # Array of hosts to connect to.
hosts:
[“192.168.141.130:9202”,“192.168.141.146:9202”,“192.168.141.147:9092”]
#topic: msg topic: ‘%{[fields][log_topics]}’
67. systemctl restart filebeat
68. tailf /var/log/filebeat/filebeat
69.安装包查看
70. ctrl c
71. ./filebeat -c filebeat.yml -e消费(192.168.132.17)
1…/bin/kafka-console-consumer.sh –bootstrap-server 192.168.132.7:9092 –topic nginx –from-beginning配置elasticsearch(192.168.132.17)
- systemctl restart elasticsearch logstash(192.168.132.7)
- vim /etc/logstash/conf.d/nginx.conf
- 配置管道 vim pipelines.yml
- systemctl restart logstash
- ss -ntlp | grep 9600
- tailf /var/log/logstash/logstash-plain.log
-
压测(192.168.132.17)
- ab -n 100 -c 100 http://192.168.132.7/index.html
查看索引(192.168.132.17) - curl -X GET http://192.168.132.17:9200/_cat/indices?v
-
配置LOGSTASH过滤模块(正则匹配)192.168.132.7
- cd /usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-patterns-core-4.1.2/patterns
- vim nginx_access
- URIPARAM1 [A-Za-z0-9$.+!’|(){},[email protected]#%&/=:;_?-[]]
- NGINXACCESS %{IPORHOST:client_ip} (%{USER:ident}|- ) (%{USER:auth}|-) [%{HTTPDATE:timestamp}] “(?:%{WORD:verb}
(%{NOTSPACE:request}|-)(?: HTTP/%{NUMBER:http_version})?|-)”
%{NUMBER:status} (?:%{NUMBER:bytes}|-) “(?:%{URI:referrer}|-)”
“%{GREEDYDATA:agent}” - logstash grok 内置正则:https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns
- vim /etc/logstash/conf.d/nginx.conf
-
input {
kafka {
bootstrap_servers => [“192.168.141.130:9092,192.168.141.146:9092,192.168.141.147:9092”]
group_id => “logstash”
topics => “nginx”
consumer_threads => 5
} } filter {
json {
source => “message”
}
mutate {
remove_field => ["@version",“fields”,“prospector”,“source”,“host”,“beat”,“input”,“offset”,“log”]
}
grok {
match => { “message” => “%{NGINXACCESS}” }
} } output {
elasticsearch {
hosts => “192.168.141.146:9200”
index => “nginx-%{+YYYY.MM.dd}”
} } - systemctl restart logstash
- ss -ntlp | grep 9600
- tailf /var/log/logstash/logstash-plain.log
- 删除原来的索引添加新索引
- ab -n 100 -c 100 http://192.168.132.17/index.html
- curl -X GET http://192.168.132.17:9200/_cat/indices?v
相关文章: