|
1 | 1 | inputs: |
| 2 | + # - PlainFile: |
| 3 | + # path: |
| 4 | + # - /tmp/file/^file.*\.txt$ |
| 5 | + # heartbeatHost: 172.16.1.52:8854 |
| 6 | + # userToken: kRhisbdoTQMCZU5KqQqGkQ7sDA7BM9kpldnQ5Nf2al8ER9yp |
| 7 | + # offsetDbSyncIntervalMs: 60000 |
| 8 | + # heartbeatIntevalMs: 60000 |
| 9 | + |
2 | 10 | - Kafka: |
3 | 11 | codec: json |
4 | 12 | encoding: UTF8 # defaut UTF8 |
5 | | - topic: |
6 | | - dt_all_test_log: 6 |
| 13 | + topic: |
| 14 | + {"dt_all_log": 1} |
| 15 | + |
7 | 16 | consumerSettings: |
8 | | - group.id: jlogstashvvvvv |
9 | | - zookeeper.connect: 127.0.0.1:2181 |
| 17 | + group.id: dt_all_log_group_hao |
| 18 | + zookeeper.connect: 172.16.1.181:2181,172.16.1.186:2181,172.16.1.226:2181/kafka2 |
10 | 19 | auto.commit.interval.ms: "1000" |
11 | | - auto.offset.reset: smallest |
12 | | - |
| 20 | + # - KafkaReset: |
| 21 | + # codec: json |
| 22 | + # encoding: UTF8 # defaut UTF8 |
| 23 | + # minTime: "2017-08-11 20:33:33" |
| 24 | + # maxTime: "2017-08-11 20:34:33" |
| 25 | + # topic: |
| 26 | + # {"dt_all_log": 3} |
| 27 | + # consumerSettings: |
| 28 | + # group.id: dt_all_log_group_hao |
| 29 | + # zookeeper.connect: 172.16.1.181:2181,172.16.1.186:2181,172.16.1.226:2181/kafka2 |
| 30 | + # auto.commit.interval.ms: "1000" |
| 31 | + # - Netty: |
| 32 | + # port: 8635 |
| 33 | + # whiteListPath: |
| 34 | + # codec: json |
| 35 | + # isExtract: false |
13 | 36 | filters: |
14 | | - - dtstack.jdtlogparser.DtLogParser: |
15 | | - redisHost: 127.0.0.1 |
| 37 | + - Performance: |
| 38 | + interval: 1 |
| 39 | + path: /tmp/filter-performance-%{+YYYY.MM.dd}.txt |
| 40 | + timeZone: Asia/Shanghai |
| 41 | + monitorPath: {"/tmp/filter-performance-%{+YYYY.MM.dd}.txt":"8"} |
| 42 | + - dtstack.jdtloguserauth.DtLogUserAuth: |
| 43 | + apiServer: 172.16.1.52:8668 |
| 44 | + useSsl: false |
| 45 | + redisHost: 172.16.1.52 |
16 | 46 | redisPort: 6379 |
17 | | - debug: false |
18 | | - - dtstack.jdtlogipip.DtLogIpIp: |
19 | | - - dtstack.jdtlogsecurity.DtLogSecurity: |
20 | | - - DateISO8601: |
21 | | - match: {"timestamp":{"srcFormat":"dd/MMM/yyyy:HH:mm:ss Z","target":"timestamp","locale":"en"}} |
22 | | - - Remove: |
23 | | - fields: ["user_token","IPORHOST=~/\\./"] |
| 47 | + isRedis: true |
| 48 | + redisDB: 0 |
| 49 | + redisPassword: taukwnlwrd9 |
| 50 | + - dtstack.jdtlogparser.DtLogParser: |
| 51 | + apiServer: 172.16.1.52:82 |
| 52 | + useSsl: false |
| 53 | + redisHost: 172.16.1.52 |
| 54 | + redisPort: 6379 |
| 55 | + isRedis: true |
| 56 | + redisDB: 0 |
| 57 | + redisPassword: taukwnlwrd9 |
| 58 | + timeWasteConfig: {"/tmp/timewaste-%{+yyyy.MM.dd}.log":"7"} #记录解析日志耗时的配置,格式和Performance的一致,key为独立保存文件的路径,val为保留的最近日志数 |
| 59 | + timeWasteLogMaxFreq: 100 #设置解析日志耗时的写io的频率的上限,避免把磁盘打爆 |
| 60 | + parsedTimeThreshold: 2 |
| 61 | + parseFailedConfig : {"/tmp/parsefailed-%{+yyyy.MM.dd}.log":"7"} #记录解析失败的配置,格式和Performance的一致,key为独立保存文件的路径,val为保留的最近日志数 |
| 62 | + parseFailedLogMaxFreq: 100 #设置解析失败时的写io的频率的上限,避免把磁盘打爆 |
24 | 63 |
|
25 | 64 | outputs: |
26 | | - - File: |
27 | | - path: /Users/sishuyss/ysq_%{tenant_id}_%{+YYYY.MM.dd}.txt |
28 | | - timezone: Asia/Shanghai |
29 | | - - Performance: |
30 | | - path: /Users/sishuyss/performance.txt |
31 | | -# - Elasticsearch: |
32 | | -# hosts: ["172.16.1.185:9300","172.16.1.188:9300"] |
33 | | -# hosts: ["127.0.0.1:9300"] |
34 | | -# indexTimezone: Asia/Shanghai |
35 | | -# cluster: tes_dtstack |
36 | | -# concurrentRequests: 2 |
37 | | -# index: 'dtlog-%{tenant_id}-%{+YYYY.MM.dd}' |
38 | | -# documentType: logs # default logs |
39 | | -# bulkActions: 40000 #default 20000 |
40 | | -# bulkSize: 30 # default 15 MB |
41 | | -# flushInterval: 3 # default 10 seconds |
42 | | -# timezone: "Asia/Shanghai" # defaut UTC 时区. 只用于生成索引名字的字符串格式化 |
43 | | -# sniff: false #default true |
| 65 | + - Performance: |
| 66 | + interval: 1 |
| 67 | + path: /tmp/beat-performance-%{+YYYY.MM.dd}.txt |
| 68 | + timeZone: Asia/Shanghai |
| 69 | + monitorPath: {"/tmp/beat-performance-%{+YYYY.MM.dd}.txt":"8"} |
| 70 | + - Odps: |
| 71 | + configs: |
| 72 | + redis.address: redis://:taukwnlwrd9@172.16.1.52:6379/1 |
| 73 | + redis.max.idle: 100 |
| 74 | + redis.max.total: 1024 |
| 75 | + redis.max.wait.mills: 3000 |
| 76 | + redis.timeout: 2000 |
| 77 | + redis.map.info.key: od-ps-cfg |
| 78 | + redis.queue.info.key: od-ps-cfg-msg |
| 79 | + http.map.info.api: http://172.16.1.52:81/api/v1/odps/provide_task_list/ |
| 80 | + task.thread.pool.size: 5000 |
| 81 | + #task.thread.cycle.commit.time: 30000 |
| 82 | + task.thread.commit.interval: 30000 |
| 83 | + task.tunnel.timezone: Asia/Shanghai |
| 84 | + task.tunnel.retry.limit: 720 #commit日志失败的重试次数 |
| 85 | + task.tunnel.retry.interval: 5 #commit日志的间隔时间,秒级别 |
| 86 | + task.partitions.lru.size: 30000 |
| 87 | + task.report.status.address: 172.16.1.52:81 |
| 88 | + task.report.status.interval: 300000 |
| 89 | + scala.kafka.producer.brokerlist: 172.16.1.145:9092 |
| 90 | + scala.kafka.zookeeper: 172.16.1.181:2181,172.16.1.186:2181,172.16.1.226:2181/kafka |
| 91 | + task.retry,kafka.groupid: odps_retry_event_group_test |
| 92 | + task.retry.kafka.topic: odps_retry_event_topic_test |
| 93 | + |
| 94 | + # - Elasticsearch5: |
| 95 | + # hosts: ["172.16.1.145:9300"] |
| 96 | + # indexTimezone: "UTC" |
| 97 | + # cluster: poc_dtstack |
| 98 | + # concurrentRequests: 1 |
| 99 | + # index: 'dtlog-%{tenant_id}-%{appname}-%{keeptype}-%{+YYYY.MM.dd}' |
| 100 | + # errorEventLogConfig: {"/tmp/error-event-%{+YYYY.MM.dd}.txt":"3"} |
| 101 | + # ERROR_PROTECT_KEYS: "@timestamp,appname,keeptype,logtype,tag,message,timestamp,local_ip,tenant_id,hostname,path,agent_type,offset,uuid,bajie_test" |
| 102 | + # documentType: logs # default logs |
| 103 | + # consistency: true # defalut false |
| 104 | + |
| 105 | + |
| 106 | + |
| 107 | +# inputs: |
| 108 | +# # - Beats: |
| 109 | +# # codec: json |
| 110 | +# # port: 8635 |
| 111 | +# - Kafka: |
| 112 | +# codec: json |
| 113 | +# encoding: UTF8 # defaut UTF8 |
| 114 | +# topic: |
| 115 | +# dt_all_log: 5 |
| 116 | +# consumerSettings: |
| 117 | +# group.id: dt_all_log_group |
| 118 | +# zookeeper.connect: 172.16.1.181:2181,172.16.1.186:2181,172.16.1.226:2181/kafka |
| 119 | +# auto.commit.interval.ms: "1000" |
| 120 | +# filters: |
| 121 | +# # - Performance: |
| 122 | +# # path: /home/admin/jlogserver/logs/beat-filters-performance-%{+YYYY.MM.dd}.txt |
| 123 | +# # timeZone: Asia/Shanghai |
| 124 | +# # monitorPath: {"/home/admin/jlogserver/logs/beat-filters-performance-%{+YYYY.MM.dd}.txt":"8"} |
| 125 | +# - dtstack.jdtloguserauth.DtLogUserAuth: |
| 126 | +# apiServer: 172.16.1.52 |
| 127 | +# useSsl: false |
| 128 | +# redisHost: 172.16.1.52 |
| 129 | +# redisPort: 6379 |
| 130 | +# isRedis: true |
| 131 | +# redisDB: 1 |
| 132 | +# redisPassword: taukwnlwrd9 |
| 133 | +# - Add: |
| 134 | +# fields: {"agent_type":"@metadata.beat","hostname":"beat.hostname","host":"beat.name"} |
| 135 | +# - Remove: |
| 136 | +# fields: ["@metadata","count","offset","beat"] |
| 137 | +# - Rename: |
| 138 | +# fields: {"source":"path"} |
| 139 | +# - dtstack.jdtlogparser.DtLogParser: |
| 140 | +# apiServer: 172.16.1.52:81 |
| 141 | +# useSsl: false |
| 142 | +# redisHost: 172.16.1.52 |
| 143 | +# redisPort: 6379 |
| 144 | +# isRedis: true |
| 145 | +# redisDB: 0 |
| 146 | +# redisPassword: taukwnlwrd9 |
| 147 | +# - dtstack.jdtlogcreatemessage.DtLogCreateMessage: |
| 148 | +# repeatFields: ["path"] |
| 149 | +# - Flow: |
| 150 | +# configs: |
| 151 | +# flow.control.counttype: unset |
| 152 | +# flow.control.threshold: 10KB |
| 153 | +# flow.stat.counttype: unset |
| 154 | +# flow.stat.report.commit.delay.second: 3 |
| 155 | +# flow.stat.report.interval: 1000 |
| 156 | +# flow.stat.report.addr.template: "http://172.16.10.123:8854/api/logagent/test?uuid=%s&time=%s&bandwidth=%s" |
| 157 | +# outputs: |
| 158 | +# # - Performance: |
| 159 | +# # path: /home/admin/jlogserver/logs/beat-performance-%{+YYYY.MM.dd}.txt |
| 160 | +# # timeZone: Asia/Shanghai |
| 161 | +# # monitorPath: {"/tmp/output-performance-%{+YYYY.MM.dd}.txt":"8"} |
| 162 | +# - Netty: |
| 163 | +# host: 172.16.1.58 |
| 164 | +# port: 8635 |
| 165 | +# openCompression: true |
| 166 | +# compressionLevel: 6 |
| 167 | +# openCollectIp: true |
| 168 | +# # format: ${HOSTNAME} ${appname} [${user_token} type=${logtype} tag=${logtag}] ${path} jlogstash/$${timestamp}/$${message} |
| 169 | + |
| 170 | + |
0 commit comments