A-A+

suricata.yaml 配置文件详解

2019年01月12日 16:58 学习笔记 暂无评论 共71706字 (阅读10,344 views次)

【注意:此文章为博主原创文章!转载需注意,请带原文链接,至少也要是txt格式!】

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
%YAML 1.1
---
 
#Suricata配置文件。除了描述所有的评论
#此文件中的#个选项,完整文档可在以下位置找到:
#https://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html
 
##
## Step 1: inform Suricata about your network
## 步骤1:告知Suricata您的网络
 
vars:
  # more specific is better for alert accuracy and performance
  address-groups:
    HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]"
    #HOME_NET: "[192.168.0.0/16]"
    #HOME_NET: "[10.0.0.0/8]"
    #HOME_NET: "[172.16.0.0/12]"
    #HOME_NET: "any"
 
    EXTERNAL_NET: "!$HOME_NET"
    #EXTERNAL_NET: "any"
 
    HTTP_SERVERS: "$HOME_NET"
    SMTP_SERVERS: "$HOME_NET"
    SQL_SERVERS: "$HOME_NET"
    DNS_SERVERS: "$HOME_NET"
    TELNET_SERVERS: "$HOME_NET"
    AIM_SERVERS: "$EXTERNAL_NET"
    DC_SERVERS: "$HOME_NET"
    DNP3_SERVER: "$HOME_NET"
    DNP3_CLIENT: "$HOME_NET"
    MODBUS_CLIENT: "$HOME_NET"
    MODBUS_SERVER: "$HOME_NET"
    ENIP_CLIENT: "$HOME_NET"
    ENIP_SERVER: "$HOME_NET"
 #端口组
  port-groups:
    HTTP_PORTS: "80"
    SHELLCODE_PORTS: "!80"
    ORACLE_PORTS: 1521
    SSH_PORTS: 22
    DNP3_PORTS: 20000
    MODBUS_PORTS: 502
    FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
    FTP_PORTS: 21
 
##
## Step 2: select outputs to enable
##  选择要启用的输出
 
# The default logging directory.  Any log or output file will be
# placed here if its not specified with a full path name. This can be
# overridden with the -l command line parameter.
#默认日志记录目录。 如果未使用完整路径名指定任何日志或输出文件,则此处将放置在此处。 
#可以使用-l命令行参数覆盖它。
default-log-dir: /var/log/suricata/
 
# global stats configuration #global stats配置
stats:
  enabled: yes
  # The interval field (in seconds) controls at what interval
  # interval字段(以秒为单位)控制在什么时间间隔
  # the loggers are invoked.
  # 调用记录器。  下面的意思间隔8秒
  interval: 8
  # Add decode events as stats.
  # 将解码事件添加为统计信息。
  #decoder-events: true
  # Add stream events as stats.
  # 将流事件添加为统计信息。
  #stream-events: false
 
# Configure the type of alert (and other) logging you would like.
# 配置您想要的警报(和其他)日志记录的类型。
outputs:
  # a line based alerts log similar to Snort's fast.log
  # 一个基于行的警报日志类似于Snort的fast.log
  - fast:
      enabled: yes
      filename: fast.log
      append: yes
      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
 
  # Extensible Event Format (nicknamed EVE) event log in JSON format
  # JSON格式的可扩展事件格式(昵称EVE)事件日志
  - eve-log:
      enabled: yes
      filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
      filename: eve.json
      #prefix: "@cee: " # prefix to prepend to each log entry
      # the following are valid when type: syslog above
      # 键入以下内容时有效:syslog above
      #identity: "suricata"
      #facility: local5
      #level: Info ## possible levels: Emergency, Alert, Critical,    可能的级别:紧急,警报,严重,
                   ## Error, Warning, Notice, Info, Debug   错误,警告,通知,信息,调试
      #redis:
      #  server: 127.0.0.1
      #  port: 6379
      #  async: true ## if redis replies are read asynchronously   ##如果redis回复是异步读取的
      #  mode: list ## possible values: list|lpush (default), rpush, channel|publish   可能的值:list | lpush(默认),rpush,channel | publish
      #             ## lpush and rpush are using a Redis list. "list" is an alias for lpush   lpush和rpush正在使用Redis列表。“list”是lpush的别名
      #             ## publish is using a Redis channel. "channel" is an alias for publish  publish正在使用Redis频道。“channel”是发布的别名
      #  key: suricata ## key or channel to use (default to suricata) 键或使用的频道(默认为suricata)
      # Redis pipelining set up. This will enable to only do a query every  Redis管道设置。这样就可以只进行查询
      # 'batch-size' events. This should lower the latency induced by network  'batch-size'事件。这应该可以降低网络引起的延迟
      # connection at the cost of some memory. There is no flushing implemented 连接以某些内存为代价。没有实施冲洗
      # so this setting as to be reserved to high traffic suricata. 所以这个设置保留给高流量的suricata。
      #  pipelining:
      #    enabled: yes ## set enable to yes to enable query pipelining set  #enable为yes以启用查询流水线操作
      #    batch-size: 10 ## number of entry to keep in buffer  保留在缓冲区中的条目数
 
      # Include top level metadata. Default yes.  #包括顶级元数据。默认是。
      #metadata: no
 
      # include the name of the input pcap file in pcap file processing mode #包括pcap文件处理模式下输入pcap文件的名称
      pcap-file: false
 
      # Community Flow ID   #社区流ID
      # Adds a 'community_id' field to EVE records. These are meant to give #在EVE记录中添加'community_id'字段。这些都是为了给出
      # a records a predictable flow id that can be used to match records to #a记录可用于匹配记录的可预测流ID
      # output of other tools such as Bro.  #Bro等其他工具的输出。
      #
      # Takes a 'seed' that needs to be same across sensors and tools  #采用传感器和工具需要相同的“种子”
      # to make the id less predictable. #使id不易预测。
 
      # enable/disable the community id feature.  #enable / disable社区ID功能。
      community-id: false
      # Seed value for the ID output. Valid values are 0-65535.  #ID输出的种子值。有效值为0-65535。
      community-id-seed: 0
 
      # HTTP X-Forwarded-For support by adding an extra field or overwriting
      # the source or destination IP address (depending on flow direction)
      # with the one reported in the X-Forwarded-For HTTP header. This is
      # helpful when reviewing alerts for traffic that is being reverse
      # or forward proxied.
      ##HTTP X-Forwarded-通过添加额外字段或覆盖X-Forwarded-For 
      ##HTTP标头中报告的源或目标IP地址(取决于流向)来获得支持。 在查看正在反向或向前代理的流量的警报时,这非常有用。
      xff:
        enabled: no
        # Two operation modes are available, "extra-data" and "overwrite".
        # 有两种操作模式,“额外数据”和“覆盖”。
        mode: extra-data
        # Two proxy deployments are supported, "reverse" and "forward". In
        # a "reverse" deployment the IP address used is the last one, in a
        # "forward" deployment the first IP address is used.
        # 支持两种代理部署,“反向”和“转发”。 
        # 在“反向”部署中,使用的IP地址是最后一个,在“转发”部署中使用第一个IP地址。
        deployment: reverse
        # Header name where the actual IP address will be reported, if more
        # than one IP address is present, the last IP address will be the
        # one taken into consideration.
        # 将报告实际IP地址的标头名称,如果存在多个IP地址,则最后一个IP地址将被考虑。
        header: X-Forwarded-For
 
      types:
        - alert:
            # payload: yes             # 在Base64中启用转储有效负载 enable dumping payload in Base64 
            # payload-buffer-size: 4kb # 在eve-log中输出的有效负载缓冲区的最大大小 max size of payload buffer to output in eve-log
            # payload-printable: yes   # 以可打印(有损)格式启用转储有效负载 enable dumping payload in printable (lossy) format
            # packet: yes              # 启用数据包转储(没有流段)enable dumping of packet (without stream segments)
            # http-body: yes           # 启用在Base64中转储http正文enable dumping of http body in Base64
            # http-body-printable: yes # 启用以可打印格式转储http正文enable dumping of http body in printable format
            # metadata: no             # 启用包含警报的应用层元数据。 默认是 enable inclusion of app layer metadata with alert. Default yes
 
            # 使用以下命令启用已标记数据包的记录 Enable the logging of tagged packets for rules using the
            # "tag" keyword.
            tagged-packets: yes
        - http:
            extended: yes     # 启用此功能以获取扩展日志记录 enable this for extended logging information
            # custom允许在eve-log中包含其他http字段 custom allows additional http fields to be included in eve-log
            # 下面的示例在取消注释时添加了三个附加字段 the example below adds three additional fields when uncommented
            #custom: [Accept-Encoding, Accept-Language, Authorization]
        - dns:
            # 此配置使用新的DNS日志记录格式 This configuration uses the new DNS logging format,
            # 旧配置仍然可用 the old configuration is still available:
            # http://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html#eve-extensible-event-format
            # 使用新格式的版本2日志记录 Use version 2 logging with the new format:
            # DNS answers will be logged in one single event
            # rather than an event for each of it.
            # DNS应答将记录在一个事件中,而不是每个事件的事件中。
            # Without setting a version the version
            # will fallback to 1 for backwards compatibility.
            # 在不设置版本的情况下,版本将回退到1以实现向后兼容性。
            version: 2
 
            # Enable/disable this logger.开启或关闭这个记录器默认启用 Default: enabled.
            #enabled: no
 
            # 控制请求和响应的日志记录 Control logging of requests and responses:
            # 请求:启用DNS查询的记录 - requests: enable logging of DNS queries
            # 响应:启用DNS应答的记录- responses: enable logging of DNS answers
            # 默认情况下,会记录请求和响应。 By default both requests and responses are logged.
            #requests: no
            #responses: no
 
            # 应答记录的格式 Format of answer logging:
            # 详细:每个应答的数组项目 - detailed: array item per answer
            # 分组:按类型汇总的应答 - grouped: answers aggregated by type
            # 默认所有 Default: all
            #formats: [detailed, grouped]
 
            # 应答要记录的类型。 Answer types to log.
            # 默认全部 Default: all
            #types: [a, aaaa, cname, mx, ns, ptr, txt]
        - tls:
            extended: yes     # 启用此功能以获取扩展日志记录  enable this for extended logging information
            # 输出TLS事务,其中使用一个会话来恢复  output TLS transaction where the session is resumed using a
            # session id
            #session-resumption: no
            # custom允许控制包含的tls字段 custom allows to control which tls fields that are included
            # in eve-log
            #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3]
        - files:
            force-magic: no   # 在所有记录的文件上强制记录魔法 force logging magic on all logged files
            # 强制记录校验和,可用的哈希函数是md5 force logging of checksums, available hash functions are md5,
            # sha1 and sha256
            #force-hash: [md5]
        #- drop:
        #    alerts: yes      # 记录导致丢弃的警报 log alerts that caused drops
        #    flows: all       # 开始或全部:'开始'只记录一滴 start or all: 'start' logs only a single drop
        #                     # 每个流动方向。 所有日志都丢弃了pkt。 per flow direction. All logs each dropped pkt.
        - smtp:
            #extended: yes # 启用此功能以获取扩展日志记录 enable this for extended logging information
            # this includes: bcc, message-id, subject, x_mailer, user-agent
            # 从列表中记录自定义字段 custom fields logging from the list:
            #  reply-to, bcc, message-id, subject, x-mailer, user-agent, received,
            #  x-originating-ip, in-reply-to, references, importance, priority,
            #  sensitivity, organization, content-md5, date
            #custom: [received, x-mailer, x-originating-ip, relays, reply-to, bcc]
            # output md5 of fields: body, subject
            # 对于你需要设置app-layer.protocols.smtp.mime.body-md5 for the body you need to set app-layer.protocols.smtp.mime.body-md5
            # to yes
            #md5: [body, subject]
 
        #- dnp3
        #- nfs
        #- smb
        #- tftp
        #- ikev2
        #- krb5
        - dhcp:
            # DHCP日志记录需要Rust DHCP logging requires Rust.
            enabled: no
            # When extended mode is on, all DHCP messages are logged
            # with full detail. When extended mode is off (the
            # default), just enough information to map a MAC address
            # to an IP address is logged.
            # 启用扩展模式后,将记录所有DHCP消息的完整详细信息。 
            # 当扩展模式关闭(默认)时,会记录足够的信息以将MAC地址映射到IP地址。
            extended: no
        - ssh
        - stats:
            totals: yes       # 所有线程的统计信息合并在一起 stats for all threads merged together
            threads: no       # 每线程统计  per thread stats
            deltas: no        # 包括delta值 include delta values
        # 双向流动 bi-directional flows
        - flow
        # 单向流动 uni-directional flows
        #- netflow
 
        # Metadata event type. Triggered whenever a pktvar is saved
        # and will include the pktvars, flowvars, flowbits and flowints.
        # 元数据事件类型。 保存pktvar时触发,包括pktvars,flowvars,flowbits和flowint。
        #- metadata
 
  # alert output for use with Barnyard2  用于Barnyard2的警报输出
  - unified2-alert:    # unified2警报
      enabled: no
      filename: unified2.alert
 
      # File size limit.  Can be specified in kb, mb, gb.  Just a number
      # is parsed as bytes.
      # 文件大小限制。 可以用kb,mb,gb指定。 只是一个数字被解析为字节。
      #limit: 32mb
 
      # By default unified2 log files have the file creation time (in
      # unix epoch format) appended to the filename. Set this to yes to
      # disable this behaviour.
      # 默认情况下,unified2日志文件的文件数据在(以unix epoch格式)附加到文件名中。 
      # 将此设置为yes可禁用此行为。
      #nostamp: no
 
      # Sensor ID field of unified2 alerts. 统一2警报的传感器ID字段。
      #sensor-id: 0
 
      # Include payload of packets related to alerts. Defaults to true, set to
      # false if payload is not required.
      # 包括与警报相关的数据包的有效负载。 默认为true,如果不需要有效负载,则设置为false。
      #payload: yes
 
      # HTTP X-Forwarded-For support by adding the unified2 extra header or
      # overwriting the source or destination IP address (depending on flow
      # direction) with the one reported in the X-Forwarded-For HTTP header.
      # This is helpful when reviewing alerts for traffic that is being reverse
      # or forward proxied.
      # HTTP X-Forwarded-通过添加unified2额外标头或覆盖X-Forwarded-For HTTP标头中报告的源或目标IP地址(取决于流向)来获得支持。
      # 在查看正在反向或向前代理的流量的警报时,这很有用。
      xff:
        enabled: no
        # Two operation modes are available, "extra-data" and "overwrite". Note
        # that in the "overwrite" mode, if the reported IP address in the HTTP
        # X-Forwarded-For header is of a different version of the packet
        # received, it will fall-back to "extra-data" mode.
        # 有两种操作模式,“额外数据”和“覆盖”。 请注意,在“覆盖”模式下,
        # 如果HTTP X-Forwarded-For标头中报告的IP地址是收到的数据包的不同版本,则它将回退到“额外数据”模式。
        mode: extra-data
        # Two proxy deployments are supported, "reverse" and "forward". In
        # a "reverse" deployment the IP address used is the last one, in a
        # "forward" deployment the first IP address is used.
        # 支持两种代理部署,“反向”和“转发”。 在“反向”部署中,使用的IP地址是最后一个,在“转发”部署中使用第一个IP地址。
        deployment: reverse
        # Header name where the actual IP address will be reported, if more
        # than one IP address is present, the last IP address will be the
        # one taken into consideration.
        # 将报告实际IP地址的标头名称,如果存在多个IP地址,则最后一个IP地址将被考虑。
        header: X-Forwarded-For
 
  # a line based log of HTTP requests (no alerts)
  - http-log:
      enabled: no
      filename: http.log
      append: yes
      #extended: yes     # 启用此功能以获取扩展日志记录 enable this for extended logging information
      #custom: yes       # 启用自定义日志记录格式(由customformat定义) enabled the custom logging format (defined by customformat)
      #customformat: "%{%D-%H:%M:%S}t.%z %{X-Forwarded-For}i %H %m %h %u %s %B %a:%p -> %A:%P"
      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
 
  # a line based log of TLS handshake parameters (no alerts)
  # 基于行的TLS握手参数日志(无警报)
  - tls-log:
      enabled: no  # 记录TLS连接。 Log TLS connections.
      filename: tls.log # 用于存储TLS日志的文件 File to store TLS logs.
      append: yes
      #extended: yes     # 记录指纹等扩展信息  Log extended information like fingerprint
      #custom: yes       #  启用自定义日志记录格式(由customformat定义) enabled the custom logging format (defined by customformat)
      #customformat: "%{%D-%H:%M:%S}t.%z %a:%p -> %A:%P %v %n %d %D"
      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
      # 输出TLS事务,其中使用一个会话恢复 output TLS transaction where the session is resumed using a
      # session id
      #session-resumption: no
 
  # output module to store certificates chain to disk
  # 输出模块将证书链存储到磁盘
  - tls-store:
      enabled: no
      #certs-log-dir: certs # 用于存储证书文件的目录 directory to store the certificates files
 
  # a line based log of DNS requests and/or replies (no alerts)
  # 基于行的DNS请求和/或回复日志(无警报)
  # Note: not available when Rust is enabled (--enable-rust).
  # 注意:启用Rust时不可用(--enable-rust)。
  - dns-log:
      enabled: no
      filename: dns.log
      append: yes
      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
 
  # Packet log... log packets in pcap format. 3 modes of operation: "normal"
  # "multi" and "sguil".
  # 数据包日志...以pcap格式记录数据包。 3种操作模式:“正常”“多”和“sguil”。
  # In normal mode a pcap file "filename" is created in the default-log-dir,
  # or are as specified by "dir".
  # 在正常模式下,pcap文件“filename”在default-log-dir中创建,或者由“dir”指定。
  # In multi mode, a file is created per thread. This will perform much
  # better, but will create multiple files where 'normal' would create one.
  # 在多模式下,每个线程创建一个文件。 这将表现得更好,但会创建多个文件,其中'normal'会创建一个。
  # In multi mode the filename takes a few special variables:
  # 在多模式下,文件名采用一些特殊变量
  # - %n -- thread number
  # - %i -- thread id
  # - %t -- timestamp (secs or secs.usecs based on 'ts-format'
  # E.g. filename: pcap.%n.%t
  #
  # Note that it's possible to use directories, but the directories are not
  # created by Suricata. E.g. filename: pcaps/%n/log.%s will log into the
  # per thread directory.
  # 请注意,可以使用目录,但Suricata不会创建目录。 例如。 filename:pcaps /%n / log。%s将登录到每个线程目录。
  #
  # Also note that the limit and max-files settings are enforced per thread.
  # 另请注意,每个线程都强制执行limit和max-files设置。
  # So the size limit when using 8 threads with 1000mb files and 2000 files
  # is: 8*1000*2000 ~ 16TiB.
  # 因此,当使用8个线程与1000mb文件和2000个文件时的大小限制是:8 * 1000 * 2000~16TiB。
  #
  # In Sguil mode "dir" indicates the base directory. In this base dir the
  # pcaps are created in th directory structure Sguil expects:
  # 在Sguil模式中,“dir”表示基目录。 在这个基础目录中,pcaps是在目录结构中创建的,Sguil期望:
  #
  # $sguil-base-dir/YYYY-MM-DD/$filename.<timestamp>
  #
  # By default all packets are logged except:
  # 默认情况下,将记录所有数据包,
  # - TCP streams beyond stream.reassembly.depth  TCP流超出stream.reassembly.depth
  # - encrypted streams after the key exchange  密钥交换后的加密流
  #
  - pcap-log:
      enabled: no
      filename: log.pcap
 
      # File size limit.  Can be specified in kb, mb, gb.  Just a number
      # is parsed as bytes.
      # 文件大小限制。 可以用kb,mb,gb指定。 只是一个数字被解析为字节。
      limit: 1000mb
 
      # If set to a value will enable ring buffer mode. Will keep Maximum of "max-files" of size "limit"
      # 如果设置为某个值,则启用环形缓冲区模式。 将保持最大“max-files”大小“限制”
      max-files: 2000
 
      # Compression algorithm for pcap files. Possible values: none, lz4.
      # pcap文件的压缩算法。 可能的值:none,lz4。
      # Enabling compression is incompatible with the sguil mode. Note also
      # that on Windows, enabling compression will *increase* disk I/O.
      # 启用压缩与sguil模式不兼容。 另请注意,在Windows上,启用压缩将*增加*磁盘I / O.
      compression: none
 
      # Further options for lz4 compression. The compression level can be set
      # to a value between 0 and 16, where higher values result in higher
      # compression.
      # lz4压缩的更多选项。 压缩级别可以设置为0到16之间的值,其中较高的值会导致较高的压缩。
      #lz4-checksum: no
      #lz4-level: 0
 
      mode: normal # normal, multi or sguil.
 
      # Directory to place pcap files. If not provided the default log
      # directory will be used. Required for "sguil" mode.
      # 放置pcap文件的目录。 如果未提供,将使用默认日志目录。 “sguil”模式需要。
      #dir: /nsm_data/
 
      #ts-format: usec # sec or usec second format (default) is filename.sec usec is filename.sec.usec
                       # sec或usec第二种格式(默认)是filename.sec usec是filename.sec.usec
      use-stream-depth: no #If set to "yes" packets seen after reaching stream inspection depth are ignored. "no" logs all packets
                           # 如果设置为“是”,则在到达流检查深度后看到的数据包将被忽略。 “no”记录所有数据包
      honor-pass-rules: no # If set to "yes", flows in which a pass rule matched will stopped being logged.
                           # 如果设置为“yes”,则将停止记录匹配的传递规则的流。
 
  # a full alerts log containing much information for signature writers
  # or for investigating suspected false positives.
  # 一个完整的警报日志,包含签名编写者的大量信息或调查可疑的误报。
  - alert-debug:
      enabled: no
      filename: alert-debug.log
      append: yes
      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
 
  # alert output to prelude (https://www.prelude-siem.org/) only
  # available if Suricata has been compiled with --enable-prelude
  # 警报输出前奏(https://www.prelude-siem.org/)仅在Suricata编译时使用--enable-prelude时才可用
  - alert-prelude:
      enabled: no
      profile: suricata
      log-packet-content: no
      log-packet-header: yes
 
  # Stats.log contains data from various counters of the Suricata engine.
  # Stats.log包含来自Suricata引擎的各种计数器的数据。
  - stats:
      enabled: yes
      filename: stats.log
      append: yes       # 附加到文件(是)或覆盖它(否) append to file (yes) or overwrite it (no)
      totals: yes       # 所有线程的统计信息合并在一起 stats for all threads merged together
      threads: no       # 每线程统计 per thread stats
      #null-values: yes  # 打印值为0的计数器 print counters that have value 0
 
  # a line based alerts log similar to fast.log into syslog
  # 基于行的警报日志类似于fast.log进入syslog
  - syslog:
      enabled: no
      # reported identity to syslog. If ommited the program name (usually
      # suricata) will be used.
      # 向syslog报告身份。 如果省略,将使用程序名称(通常是suricata)。
      #identity: "suricata"
      facility: local5
      #level: Info ## possible levels: Emergency, Alert, Critical,
                   ## Error, Warning, Notice, Info, Debug
 
  # a line based information for dropped packets in IPS mode
  # IPS模式下丢弃的数据包的基于行的信息
  - drop:
      enabled: no
      filename: drop.log
      append: yes
      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
 
  # Output module for storing files on disk. Files are stored in a
  # directory names consisting of the first 2 characters of the
  # SHA256 of the file. Each file is given its SHA256 as a filename.
  #用于在磁盘上存储文件的输出模块。 文件存储在由文件SHA256的前2个字符组成的目录名中。 每个文件都以SHA256作为文件名。
  # When a duplicate file is found, the existing file is touched to
  # have its timestamps updated.
  # 找到重复文件时,会触摸现有文件以更新其时间戳。
  # Unlike the older filestore, metadata is not written out by default
  # as each file should already have a "fileinfo" record in the
  # eve.log. If write-fileinfo is set to yes, the each file will have
  # one more associated .json files that consists of the fileinfo
  # record. A fileinfo file will be written for each occurrence of the
  # file seen using a filename suffix to ensure uniqueness.
  #与旧文件存储库不同,默认情况下不会写出元数据,因为每个文件应该已经在eve.log中具有“fileinfo”记录。 
  #如果write-fileinfo设置为yes,则每个文件将包含一个由fileinfo记录组成的关联.json文件。 
  #将使用文件名后缀为每次出现的文件写入fileinfo文件,以确保唯一性。
  # To prune the filestore directory see the "suricatactl filestore
  # prune" command which can delete files over a certain age.
  # 要修剪filestore目录,请参阅“suricatactl filestore prune”命令,该命令可以删除特定年龄的文件。
  - file-store:
      version: 2
      enabled: no
 
      # Set the directory for the filestore. If the path is not
      # absolute will be be relative to the default-log-dir.
      # 设置文件存储的目录。 如果路径不是绝对的,则相对于default-log-dir。
      #dir: filestore
 
      # Write out a fileinfo record for each occurrence of a
      # file. Disabled by default as each occurrence is already logged
      # as a fileinfo record to the main eve-log.
      # 为每次出现的文件写出一个fileinfo记录。
      # 默认情况下禁用,因为每次出现都已作为fileinfo记录记录到主eve-log。
      #write-fileinfo: yes
 
      # Force storing of all files. Default: no.
      # 强制存储所有文件。 默认值:不。
      #force-filestore: yes
 
      # Override the global stream-depth for sessions in which we want
      # to perform file extraction. Set to 0 for unlimited.
      # 覆盖我们要执行文件提取的会话的全局流深度。 设置为0表示无限制。
      #stream-depth: 0
 
      # Uncomment the following variable to define how many files can
      # remain open for filestore by Suricata. Default value is 0 which
      # means files get closed after each write
      # 取消注释以下变量以定义Suricata为文件存储保留的文件数。 
      # 默认值为0表示文件在每次写入后关闭
      #max-open-files: 1000
 
      # Force logging of checksums, available hash functions are md5,
      # sha1 and sha256. Note that SHA256 is automatically forced by
      # the use of this output module as it uses the SHA256 as the
      # file naming scheme.
      #强制记录校验和,可用的哈希函数是md5,sha1和sha256。 
      #请注意,使用此输出模块会自动强制使用SHA256,因为它使用SHA256作为文件命名方案。
      #
      #force-hash: [sha1, md5]
      # NOTE: X-Forwarded configuration is ignored if write-fileinfo is disabled
      # HTTP X-Forwarded-For support by adding an extra field or overwriting
      # the source or destination IP address (depending on flow direction)
      # with the one reported in the X-Forwarded-For HTTP header. This is
      # helpful when reviewing alerts for traffic that is being reverse
      # or forward proxied.
      # 注意:如果禁用write-fileinfo,则忽略X-Forwarded配置
      # HTTP X-Forwarded-通过添加额外字段或覆盖X-Forwarded中报告的源或目标IP地址(取决于流向)来支持 -  对于HTTP标头。 
      # 在查看正在反向或向前代理的流量的警报时,这非常有用。
      xff:
        enabled: no
        # Two operation modes are available, "extra-data" and "overwrite".
        # 有两种操作模式,“额外数据”和“覆盖”。
        mode: extra-data
        # Two proxy deployments are supported, "reverse" and "forward". In
        # a "reverse" deployment the IP address used is the last one, in a
        # "forward" deployment the first IP address is used.
        # 支持两种代理部署,“反向”和“转发”。 
        # 在“反向”部署中,使用的IP地址是最后一个,在“转发”部署中使用第一个IP地址。
        deployment: reverse
        # Header name where the actual IP address will be reported, if more
        # than one IP address is present, the last IP address will be the
        # one taken into consideration.
        # 将报告实际IP地址的标头名称,如果存在多个IP地址,则最后一个IP地址将被考虑。
        header: X-Forwarded-For
 
  # output module to store extracted files to disk (old style, deprecated)
  # 输出模块将提取的文件存储到磁盘(旧样式,已弃用)
  # The files are stored to the log-dir in a format "file.<id>" where <id> is
  # an incrementing number starting at 1. For each file "file.<id>" a meta
  # file "file.<id>.meta" is created. Before they are finalized, they will
  # have a ".tmp" suffix to indicate that they are still being processed.
  #
  #这些文件以“file。<id>”格式存储到log-dir中,其中<id>是从1开始的递增编号。
  #对于每个文件“file。<id>”,元文件“file。<id> .meta“已创建。 
  # 在最终确定之前,它们将具有“.tmp”后缀,表示它们仍在处理中。
  #
  # If include-pid is yes, then the files are instead "file.<pid>.<id>", with
  # meta files named as "file.<pid>.<id>.meta"
  # 如果include-pid为yes,则文件为“file。<pid>。<id>”,其中元文件名为“file。<pid>。<id> .meta”
  # File extraction depends on a lot of things to be fully done:
  # 文件提取取决于很多事情要完全完成
  # - file-store stream-depth. For optimal results, set this to 0 (unlimited)
  # 文件存储流 - 深度。 为获得最佳效果,请将其设置为0(无限制)
  # - http request / response body sizes. Again set to 0 for optimal results.
  # http请求/响应正文大小。 再次设置为0以获得最佳结果。
  # - rules that contain the "filestore" keyword.
  # 包含“filestore”关键字的规则。
  - file-store:
      enabled: no       # 设置为yes以启用 set to yes to enable
      log-dir: files    # 用于存储文件的目录 directory to store the files
      force-magic: no   # 强制记录所有存储文件的魔法 force logging magic on all stored files
      # force logging of checksums, available hash functions are md5,sha1 and sha256
      # 强制记录校验和,可用的哈希函数是md5,sha1和sha256 
      #force-hash: [md5]
      force-filestore: no # 强制存储所有文件 force storing of all files
      # override global stream-depth for sessions in which we want to
      # perform file extraction. Set to 0 for unlimited.
      # 覆盖我们要执行文件提取的会话的全局流深度。 设置为0表示无限制。
      #stream-depth: 0
      #waldo: file.waldo # waldo文件,用于跨运行存储file_id waldo file to store the file_id across runs
      # 取消注释以禁用元文件写入 uncomment to disable meta file writing
      #write-meta: no
      # uncomment the following variable to define how many files can
      # remain open for filestore by Suricata. Default value is 0 which
      # means files get closed after each write
      # 取消注释以下变量以定义Suricata可以为文件存储保持打开的文件数。 
      # 默认值为0表示文件在每次写入后关闭
      #max-open-files: 1000
      include-pid: no # set to yes to include pid in file names
 
  # output module to log files tracked in a easily parsable JSON format
  # 输出模块以便以易于分析的JSON格式跟踪文件
  - file-log:
      enabled: no
      filename: files-json.log
      append: yes
      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
 
      force-magic: no   # 在所有记录的文件上强制记录魔法 force logging magic on all logged files
      # force logging of checksums, available hash functions are md5, sha1 and sha256
      # 强制记录校验和,可用的哈希函数是md5,sha1和sha256
      #force-hash: [md5]
 
  # Log TCP data after stream normalization
  # 流规范化后记录TCP数据
  # 2 types: file or dir. File logs into a single logfile. Dir creates
  # 2 files per TCP session and stores the raw TCP data into them.
  # 种类型:文件或目录。 文件记录到单个日志文件中。
  # Dir为每个TCP会话创建2个文件,并将原始TCP数据存储到它们中。
  # Using 'both' will enable both file and dir modes.
  # 使用'both'将启用文件和目录模式。
  # 注意:受stream.depth的限制 Note: limited by stream.depth
  - tcp-data:
      enabled: no
      type: file
      filename: tcp-data.log
 
  # Log HTTP body data after normalization, dechunking and unzipping.
  #在规范化,去块和解压缩后记录HTTP正文数据。
  # 2 types: file or dir. File logs into a single logfile. Dir creates
  # 2 files per HTTP session and stores the normalized data into them.
  # 2类型:文件或目录。 文件记录到单个日志文件中。 Dir为每个HTTP会话创建2个文件,并将规范化数据存储到它们中。
  # Using 'both' will enable both file and dir modes.
  # 使用'both'将启用文件和目录模式。
  # 注意:受内容限制设置的限制 Note: limited by the body limit settings
  - http-body-data:
      enabled: no
      type: file
      filename: http-data.log
 
  # Lua Output Support - execute lua script to generate alert and event output.
  # Lua输出支持 - 执行lua脚本以生成警报和事件输出。 
  # Documented at:
  # https://suricata.readthedocs.io/en/latest/output/lua-output.html
  - lua:
      enabled: no
      #scripts-dir: /etc/suricata/lua-output/
      scripts:
      #   - script1.lua
 
# Logging configuration.  This is not about logging IDS alerts/events, but
# output about what Suricata is doing, like startup messages, errors, etc.
#记录配置。 这不是关于记录IDS警报/事件,而是关于Suricata正在做什么的输出,例如启动消息,错误等。
logging:
  # The default log level, can be overridden in an output section.
  # Note that debug level logging will only be emitted if Suricata was
  # compiled with the --enable-debug configure option.
  #可以在输出节中覆盖默认日志级别。
  # 请注意,只有在使用--enable-debug configure选项编译Suricata时才会发出调试级别日志记录。
  # This value is overridden by the SC_LOG_LEVEL env var.
  # SC_LOG_LEVEL env var会覆盖此值。
  default-log-level: notice
 
  # The default output format.  Optional parameter, should default to
  # something reasonable if not provided.  Can be overridden in an
  # output section.  You can leave this out to get the default.
  # 默认输出格式。 可选参数,如果没有提供,应该默认为合理的。 
  # 可以在输出节中重写。 您可以将其保留以获取默认值。
  #
  # This value is overridden by the SC_LOG_FORMAT env var.
  # SC_LOG_FORMAT env var会覆盖此值
  #default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- "
 
  # A regex to filter output.  Can be overridden in an output section.
  # 用于过滤输出的正则表达式。 可以在输出节中重写。
  # Defaults to empty (no filter).
  # 默认为空(无过滤器)。
  # This value is overridden by the SC_LOG_OP_FILTER env var.
  # SC_LOG_OP_FILTER env var会覆盖此值。
  default-output-filter:
 
  # Define your logging outputs.  If none are defined, or they are all
  # disabled you will get the default - console output.
  #定义日志记录输出。 如果没有定义,或者它们都被禁用,您将获得默认 - 控制台输出。
  outputs:
  - console:
      enabled: yes
      # type: json
  - file:
      enabled: yes
      level: info
      filename: /var/log/suricata/suricata.log
      # type: json
  - syslog:
      enabled: no
      facility: local5
      format: "[%i] <%d> -- "
      # type: json
 
 
##
## Step 4: configure common capture settings
##         配置常见捕获设置
## See "Advanced Capture Options" below for more options, including NETMAP
## and PF_RING.
##有关更多选项,请参阅下面的“高级捕获选项”,包括NETMAP和PF_RING。
 
# Linux high speed capture support
# Linux高速捕获支持
af-packet:
  - interface: eth0
    # Number of receive threads. "auto" uses the number of cores
    # 接收线程数。 “auto”使用核心数
    #threads: auto
    # Default clusterid. AF_PACKET will load balance packets based on flow.
    # 默认的clusterid。 AF_PACKET将根据流量对数据包进行负载均衡。
    cluster-id: 99
    # Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash.
    # 默认AF_PACKET群集类型。 AF_PACKET可以为每个流或每个哈希加载平衡。
    # This is only supported for Linux kernel > 3.1
    # 这仅适用于Linux内核> 3.1
    # possible value are(可以使用的值):
    #  * cluster_round_robin: 循环负载平衡 round robin load balancing 
    #  * cluster_flow: 给定流的所有数据包都发送到同一个套接字 all packets of a given flow are send to the same socket
    #  * cluster_cpu: CPU在内核中处理的所有数据包都发送到同一个套接字 all packets treated in kernel by a CPU are send to the same socket
    #  * cluster_qm: 所有通过网卡链接到RSS队列的数据包都被发送到同一个数据包 all packets linked by network card to a RSS queue are sent to the same
    #  socket. 至少需要Linux 3.14。 Requires at least Linux 3.14.
    #  * cluster_random: 数据包随机发送到套接字,但带有均分。 packets are sent randomly to sockets but with an equipartition.
    #  至少需要Linux 3.14。 Requires at least Linux 3.14.
    #  * cluster_rollover: 内核在插入每个套接字的插槽之间旋转,然后移动到下一个插槽 至少需要Linux 3.10。 kernel rotates between sockets filling each socket before moving to the next. Requires at least Linux 3.10.
    #  * cluster_ebpf: eBPF文件负载均衡。 有关详细信息,请参阅doc / userguide / capture-hardware / ebpf-xdp.rst。 eBPF file load balancing. See doc/userguide/capture-hardware/ebpf-xdp.rst for more info.
    # Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system
    # with capture card using RSS (require cpu affinity tuning and system irq tuning)
    # 建议的模式是大多数盒子上的cluster_flow和使用RSS的捕获卡系统上的cluster_cpu或cluster_qm(需要cpu亲和性调整和系统irq调整)
    cluster-type: cluster_flow
    # In some fragmentation case, the hash can not be computed. If "defrag" is set
    # to yes, the kernel will do the needed defragmentation before sending the packets.
    # 在某些碎片情况下,无法计算散列。 如果“defrag”设置为yes,则内核将在发送数据包之前执行所需的碎片整理。
    defrag: yes
    # After Linux kernel 3.10 it is possible to activate the rollover option: if a socket is
    # full then kernel will send the packet on the next socket with room available. This option
    # can minimize packet drop and increase the treated bandwidth on single intensive flow.
    #在Linux内核3.10之后,可以激活翻转选项:如果套接字已满,则内核将在具有可用空间的下一个套接字上发送数据包。 
    #此选项可以最大限度地减少数据包丢失并增加单个密集流上的处理带宽。
    #rollover: yes
    # To use the ring feature of AF_PACKET, set 'use-mmap' to yes
    # 要使用AF_PACKET的铃声功能,请将“use-mmap”设置为yes
    #use-mmap: yes
    # Lock memory map to avoid it goes to swap. Be careful that over subscribing could lock
    # your system
    # 锁定内存映射以避免它进行交换。 请注意,超过订阅可能会锁定您的系统
    #mmap-locked: yes
    # Use tpacket_v3 capture mode, only active if use-mmap is true
    # Don't use it in IPS or TAP mode as it causes severe latency
    #使用tpacket_v3捕获模式,仅在use-mmap为true时才激活不要在IPS或TAP模式下使用它,因为它会导致严重的延迟
    #tpacket-v3: yes
    # Ring size will be computed with respect to max_pending_packets and number
    # of threads. You can set manually the ring size in number of packets by setting
    # the following value. If you are using flow cluster-type and have really network
    # intensive single-flow you could want to set the ring-size independently of the number
    # of threads:
    #将根据max_pending_packets和线程数计算环大小。 您可以通过设置以下值手动设置环数大小的数据包。 
    #如果您正在使用流集群类型且具有真正的网络密集型单流,您可能希望设置环大小而与线程数无关
    #ring-size: 2048
    # Block size is used by tpacket_v3 only. It should set to a value high enough to contain
    # a decent number of packets. Size is in bytes so please consider your MTU. It should be
    # a power of 2 and it must be multiple of page size (usually 4096).
    # 块大小仅由tpacket_v3使用。 它应该设置为足够高的值以包含相当数量的数据包。 
    #大小以字节为单位,请考虑您的MTU。 它应该是2的幂,它必须是页面大小的倍数(通常为4096)。
    #block-size: 32768
    # tpacket_v3 block timeout: an open block is passed to userspace if it is not
    # filled after block-timeout milliseconds.
    # tpacket_v3块超时:如果在块超时毫秒后未填充打开块,则将其传递给用户空间。
    #block-timeout: 10
    # On busy system, this could help to set it to yes to recover from a packet drop
    # phase. This will result in some packets (at max a ring flush) being non treated.
    # 在繁忙的系统上,这可以帮助将其设置为是以从数据包丢弃阶段恢复。 
    # 这将导致一些数据包(最大振铃次数)未被处理。
    #use-emergency-flush: yes
    # recv buffer size, increase value could improve performance
    # recv缓冲区大小,增加值可以提高性能
    # buffer-size: 32768
    # Set to yes to disable promiscuous mode
    # 设置为yes以禁用混杂模式
    # disable-promisc: no
    # Choose checksum verification mode for the interface. At the moment
    # of the capture, some packets may be with an invalid checksum due to
    # offloading to the network card of the checksum computation.
    # 选择接口的校验和验证模式。 在捕获时,由于校验和计算的网卡卸载,一些分组可能具有无效的校验和。
    # Possible values are(可以使用的值):
    #  - kernel: 使用内核为每个数据包发送的指示 默认use indication sent by kernel for each packet (default)
    #  - yes: 校验和验证是强制的  checksum validation is forced
    #  - no: 校验和验证已禁用 checksum validation is disabled
    #  - auto: suricata uses a statistical approach to detect when checksum off-loading is used.
    #          suricata使用统计方法来检测何时使用校验和卸载。
    # Warning: 'checksum-validation' must be set to yes to have any validation
    # 警告:'checksum-validation'必须设置为yes才能进行任何验证
    #checksum-checks: kernel
    # BPF filter to apply to this interface. The pcap filter syntax apply here.
    # BPF过滤器适用于此接口。 pcap过滤器语法适用于此处。
    #bpf-filter: port 80 or udp
    # You can use the following variables to activate AF_PACKET tap or IPS mode.
    # If copy-mode is set to ips or tap, the traffic coming to the current
    # interface will be copied to the copy-iface interface. If 'tap' is set, the
    # copy is complete. If 'ips' is set, the packet matching a 'drop' action
    # will not be copied.
    # 您可以使用以下变量激活AF_PACKET tap或IPS模式。 
    # 如果copy-mode设置为ips或tap,则进入当前接口的流量将被复制到copy-iface界面。
    # 如果设置了“点击”,则复制完成。 如果设置了“ips”,则不会复制与“drop”操作匹配的数据包。
    #copy-mode: ips
    #copy-iface: eth1
    #  For eBPF and XDP setup including bypass, filter and load balancing, please
    #  see doc/userguide/capture/ebpf-xdt.rst for more info.
    # 有关eBPF和XDP设置(包括旁路,过滤和负载平衡),请参阅doc / userguide / capture / ebpf-xdt.rst了解更多信息。
 
  # Put default values here. These will be used for an interface that is not in the list above.
  # 在此处输入默认值。 这些将用于不在上面列表中的接口。
  - interface: default
    #threads: auto
    #use-mmap: no
    #rollover: yes
    #tpacket-v3: yes
 
# Cross platform libpcap capture support
# 跨平台libpcap捕获支持
pcap:
  - interface: eth0
    # On Linux, pcap will try to use mmaped capture and will use buffer-size
    # as total of memory used by the ring. So set this to something bigger
    # than 1% of your bandwidth.
    #在Linux上,pcap将尝试使用mmaped捕获,并将使用缓冲区大小作为环使用的内存总量。 
    #因此,将其设置为大于带宽的1%。
    #buffer-size: 16777216
    #bpf-filter: "tcp and port 25"
    # Choose checksum verification mode for the interface. At the moment
    # of the capture, some packets may be with an invalid checksum due to
    # offloading to the network card of the checksum computation.
    # 选择接口的校验和验证模式。 在捕获时,由于校验和计算的网卡卸载,一些分组可能具有无效的校验和。
    # Possible values are:
    #  - yes: checksum validation is forced
    #  - no: checksum validation is disabled
    #  - auto: Suricata uses a statistical approach to detect when
    #  checksum off-loading is used. (default)
    # Warning: 'checksum-validation' must be set to yes to have any validation
    # 警告:'checksum-validation'必须设置为yes才能进行任何验证
    #checksum-checks: auto
    # With some accelerator cards using a modified libpcap (like myricom), you
    # may want to have the same number of capture threads as the number of capture
    # rings. In this case, set up the threads variable to N to start N threads
    # listening on the same interface.
    # 有些加速器卡使用修改后的libpcap(如myricom),您可能希望获得与捕获环数相同数量的捕获线程。
    # 在这种情况下,将threads变量设置为N以启动在同一接口上侦听的N个线程。
    #threads: 16
    # set to no to disable promiscuous mode:
    # 设置为no以禁用混杂模式:
    #promisc: no
    # set snaplen, if not set it defaults to MTU if MTU can be known
    # via ioctl call and to full capture if not.
    #设置snaplen,如果没有设置,如果可以通过ioctl调用知道MTU,则默认为MTU,否则设置为完全捕获。
    #snaplen: 1518
  # 在此处输入默认值 Put default values here
  - interface: default
    #checksum-checks: auto
 
# Settings for reading pcap files
# 用于读取pcap文件的设置
pcap-file:
  # Possible values are:
  #  - yes: checksum validation is forced
  #  - no: checksum validation is disabled
  #  - auto: Suricata uses a statistical approach to detect when
  #  checksum off-loading is used. (default)
  # Warning: 'checksum-validation' must be set to yes to have checksum tested
  checksum-checks: auto
 
# See "Advanced Capture Options" below for more options, including NETMAP and PF_RING.
# 有关更多选项,请参阅下面的“高级捕获选项”,包括NETMAP和PF_RING。
 
##
## Step 5: App Layer Protocol Configuration
##    应用层协议配置
 
# Configure the app-layer parsers. The protocols section details each protocol.
# 配置应用层解析器。 协议部分详述了每个协议。
#
# The option "enabled" takes 3 values - "yes", "no", "detection-only".
# "yes" enables both detection and the parser, "no" disables both, and
# "detection-only" enables protocol detection only (parser disabled).
#“启用”选项需要3个值 - “是”,“否”,“仅检测”。 
#“yes”启用检测和解析器,“no”禁用两者,“仅检测”仅启用协议检测(禁用解析器)。
app-layer:
  protocols:
    krb5:
      enabled: no
    ikev2:
      enabled: yes
    tls:
      enabled: yes
      detection-ports:
        dp: 443
 
      # 从客户端hello生成JA3指纹 Generate JA3 fingerprint from client hello
      ja3-fingerprints: no
 
      # What to do when the encrypted communications start:
      # 加密通信开始时该怎么办:
      # - default: keep tracking TLS session, check for protocol anomalies,
      #            inspect tls_* keywords. Disables inspection of unmodified
      #            'content' signatures.
      # 继续跟踪TLS会话,检查协议异常,检查tls_ *关键字。 禁用对未修改的“内容”签名的检查。
      # - bypass:  stop processing this flow as much as possible. No further
      #            TLS parsing and inspection. Offload flow bypass to kernel
      #            or hardware if possible.
      # 尽可能地停止处理这个流程。 没有进一步的TLS解析和检查。 如果可能,将流量旁路卸载到内核或硬件。
      # - full:    keep tracking and inspection as normal. Unmodified content
      #            keyword signatures are inspected as well.
      # 保持跟踪和检查正常。 还会检查未修改的内容关键字签名。
      # For best performance, select 'bypass'.
      # 为获得最佳性能,请选择“旁路”。
      #encrypt-handling: default
 
    dcerpc:
      enabled: yes
    ftp:
      enabled: yes
      # memcap: 64mb
    ssh:
      enabled: yes
    smtp:
      enabled: yes
      # Configure SMTP-MIME Decoder
      mime:
        # Decode MIME messages from SMTP transactions
        # (may be resource intensive)
        # This field supercedes all others because it turns the entire
        # process on or off
        # 从SMTP事务解码MIME消息(可能是资源密集型)此字段取代所有其他字段,因为它会打开或关闭整个过程
        decode-mime: yes
 
        # Decode MIME entity bodies (ie. base64, quoted-printable, etc.)
        # 解码MIME实体主体(即base64,quoted-printable等)
        decode-base64: yes
        decode-quoted-printable: yes
 
        # Maximum bytes per header data value stored in the data structure
        # 存储在数据结构中的每个标头数据值的最大字节数
        # (default is 2000)
        header-value-depth: 2000
 
        # Extract URLs and save in state data structure
        # 提取URL并保存在状态数据结构中
        extract-urls: yes
        # Set to yes to compute the md5 of the mail body. You will then
        # be able to journalize it.
        # 设置为yes以计算邮件正文的md5。 然后,您就可以对其进行日记。
        body-md5: no
      # Configure inspected-tracker for file_data keyword
      # 为file_data关键字配置checkedpected-tracker
      inspected-tracker:
        content-limit: 100000
        content-inspect-min-size: 32768
        content-inspect-window: 4096
    imap:
      enabled: detection-only
    msn:
      enabled: detection-only
    # Note: --enable-rust is required for full SMB1/2 support. W/o rust
    # only minimal SMB1 support is available.
    #注意: - 完全支持SMB1 / 2需要生成防锈功能。 没有生锈只有最小的SMB1支持可用。
    smb:
      enabled: yes
      detection-ports:
        dp: 139, 445
 
      # Stream reassembly size for SMB streams. By default track it completely.
      # SMB流的流重组大小。 默认情况下完全跟踪它。
      #stream-depth: 0
 
    # Note: NFS parser depends on Rust support: pass --enable-rust
    # to configure.
    # 注意:NFS解析器依赖于Rust支持:传递--enable-rust进行配置。
    nfs:
      enabled: no
    tftp:
      enabled: no
    dns:
      # memcaps。 全球和每个流量/州。 memcaps. Globally and per flow/state.
      #global-memcap: 16mb
      #state-memcap: 512kb
 
      # How many unreplied DNS requests are considered a flood.
      # 有多少未提交的DNS请求被视为泛洪。
      # If the limit is reached, app-layer-event:dns.flooded; will match.
      # 如果达到限制,app-layer-event:dns.flooded; 会匹配。
      #request-flood: 500
 
      tcp:
        enabled: yes
        detection-ports:
          dp: 53
      udp:
        enabled: yes
        detection-ports:
          dp: 53
    http:
      enabled: yes
      # memcap: 64mb
 
      # default-config:           在没有server-config匹配时使用 Used when no server-config matches
      #   personality:            默认使用的个性列表 List of personalities used by default
      #   request-body-limit:     Limit reassembly of request body for inspection
      #                           by http_client_body & pcre /P option.
      #         通过http_client_body和pcre / P选项限制请求体的重组以供检查。
      #   response-body-limit:    Limit reassembly of response body for inspection
      #                           by file_data, http_server_body & pcre /Q option.
      #         通过file_data,http_server_body和pcre / Q选项限制响应主体的重组以供检查。
      #   double-decode-path:     URI的双重解码路径部分 Double decode path section of the URI
      #   double-decode-query:    URI的双重解码查询部分 Double decode query section of the URI
      #   response-body-decompress-layer-limit:
      #                           Limit to how many layers of compression will be
      #                           decompressed. Defaults to 2.
      #           限制将解压缩多少层压缩。 默认为2。
      # server-config:            地址匹配时要使用的服务器配置列表 List of server configurations to use if address matches
      #   address:                此块的IP地址或网络列表 List of IP addresses or networks for this block
      #   personalitiy:           此块使用的个性列表 List of personalities used by this block
      #   request-body-limit:     Limit reassembly of request body for inspection
      #                           by http_client_body & pcre /P option.
      #       通过http_client_body和pcre / P选项限制请求体的重组以供检查。
      #   response-body-limit:    Limit reassembly of response body for inspection
      #                           by file_data, http_server_body & pcre /Q option.
      #    通过file_data,http_server_body和pcre / Q选项限制响应主体的重组以供检查。
      #   double-decode-path:     URI的双重解码路径部分  Double decode path section of the URI
      #   double-decode-query:    URI的双重解码查询部分  Double decode query section of the URI
      #
      #   uri-include-all:        Include all parts of the URI. By default the
      #                           'scheme', username/password, hostname and port
      #                           are excluded. Setting this option to true adds
      #                           all of them to the normalized uri as inspected
      #                           by http_uri, urilen, pcre with /U and the other
      #                           keywords that inspect the normalized uri.
      #                           Note that this does not affect http_raw_uri.
      #                           Also, note that including all was the default in
      #                           1.4 and 2.0beta1.
      #包括URI的所有部分。 默认情况下,排除“方案”,用户名/密码,主机名和端口。 
      #将此选项设置为true会将所有这些选项添加到规范化的uri中,如http_uri,urilen,pcre和/ U以及检查规范化uri的其他关键字一样。 
      #请注意,这不会影响http_raw_uri。 另请注意,包含all是1.4和2.0beta1中的默认值。
      #
      #   meta-field-limit:       Hard size limit for request and response size
      #                           limits. Applies to request line and headers,
      #                           response line and headers. Does not apply to
      #                           request or response bodies. Default is 18k.
      #                           If this limit is reached an event is raised.
      #请求和响应大小限制的硬大小限制。 适用于请求行和标题,响应行和标题。 
      #不适用于请求或响应机构。 默认值是18k。 如果达到此限制,则会引发事件。
      # 目前可用的个性  Currently Available Personalities:
      #   Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0,
      #   IIS_7_0, IIS_7_5, Apache_2
      libhtp:
         default-config:
           personality: IDS
 
           # Can be specified in kb, mb, gb.  Just a number indicates it's in bytes.
           # 可以用kb,mb,gb指定。 只是一个数字表示它是以字节为单位。
           request-body-limit: 100kb
           response-body-limit: 100kb
 
           # 检查限制 inspection limits
           request-body-minimal-inspect-size: 32kb
           request-body-inspect-window: 4kb
           response-body-minimal-inspect-size: 40kb
           response-body-inspect-window: 16kb
 
           # 响应体减压(0禁用) response body decompression (0 disables)
           response-body-decompress-layer-limit: 2
 
           # auto will use http-body-inline mode in IPS mode, yes or no set it statically
           #auto将在IPS模式下使用http-body-inline模式,是或否静态设置
           http-body-inline: auto
 
           # 解压缩SWF文件。  Decompress SWF files.
           # 2 types: 'deflate', 'lzma', 'both' will decompress deflate and lzma
           # compress-depth:
           # 2种类型:'deflate','lzma','both'将解压缩deflate和lzma compress-depth:
           # Specifies the maximum amount of data to decompress,
           # set 0 for unlimited.
           #指定要解压缩的最大数据量,将0设置为无限制。
           # decompress-depth:
           # Specifies the maximum amount of decompressed data to obtain,
           # set 0 for unlimited.
           # 指定要获取的最大解压缩数据量,将0设置为无限制。
           swf-decompression:
             enabled: yes
             type: both
             compress-depth: 0
             decompress-depth: 0
 
           # Take a random value for inspection sizes around the specified value.
           # 对指定值附近的检验大小取随机值。
           # This lower the risk of some evasion technics but could lead
           # detection change between runs. It is set to 'yes' by default.
           # 这降低了一些逃避技术的风险,但可能导致运行之间的检测变化。 默认设置为“是”。
           #randomize-inspection-sizes: yes
           # If randomize-inspection-sizes is active, the value of various
           # inspection size will be choosen in the [1 - range%, 1 + range%] range
           #如果randomize-inspection-sizes有效,则各种检验大小的值将在[1  -  range%,1 + range%]范围内选择。
           # Default value of randomize-inspection-range is 10.
           #  randomize-inspection-range的默认值为10。   
           #randomize-inspection-range: 10
 
           # decoding
           double-decode-path: no
           double-decode-query: no
 
         server-config:
 
           #- apache:
           #    address: [192.168.1.0/24, 127.0.0.0/8, "::1"]
           #    personality: Apache_2
           #    #Can be specified in kb, mb, gb.  Just a number indicates it's in bytes.
           # 可以用kb,mb,gb指定。 只是一个数字表示它是以字节为单位。
           #    request-body-limit: 4096
           #    response-body-limit: 4096
           #    double-decode-path: no
           #    double-decode-query: no
 
           #- iis7:
           #    address:
           #      - 192.168.0.0/24
           #      - 192.168.10.0/24
           #    personality: IIS_7_0
           #    # Can be specified in kb, mb, gb.  Just a number indicates
           #    # it's in bytes.
           #    request-body-limit: 4096
           #    response-body-limit: 4096
           #    double-decode-path: no
           #    double-decode-query: no
 
    # Note: Modbus probe parser is minimalist due to the poor significant field
    # Only Modbus message length (greater than Modbus header length)
    # And Protocol ID (equal to 0) are checked in probing parser
    # It is important to enable detection port and define Modbus port
    # to avoid false positive
    # 注意:Modbus探针解析器由于较差的显着字段而极简主义只有Modbus消息长度(大于Modbus报头长度)
    #和探测解析器中的协议ID(等于0)检查启用检测端口和定义Modbus端口以避免重要 假阳性
    modbus:
      # How many unreplied Modbus requests are considered a flood.
      # 有多少未经撤消的Modbus请求被视为洪水。
      # If the limit is reached, app-layer-event:modbus.flooded; will match.
      # 如果达到限制,app-layer-event:modbus.flooded; 会匹配。
      #request-flood: 500
 
      enabled: no
      detection-ports:
        dp: 502
      # According to MODBUS Messaging on TCP/IP Implementation Guide V1.0b, it
      # is recommended to keep the TCP connection opened with a remote device
      # and not to open and close it for each MODBUS/TCP transaction. In that
      # case, it is important to set the depth of the stream reassembling as
      # unlimited (stream.reassembly.depth: 0)
      #根据TCP / IP实施指南V1.0b上的MODBUS消息传递,建议使用远程设备保持TCP连接打开,
      #而不是为每个MODBUS / TCP事务打开和关闭它。 
      #在这种情况下,重要的是将流重组的深度设置为无限制(stream.reassembly.depth:0)
 
      # Stream reassembly size for modbus. By default track it completely.
      # 用于modbus的流重组大小。 默认情况下完全跟踪它。
      stream-depth: 0
 
    # DNP3
    dnp3:
      enabled: no
      detection-ports:
        dp: 20000
 
    # SCADA EtherNet/IP and CIP protocol support
    # SCADA EtherNet / IP和CIP协议支持
    enip:
      enabled: no
      detection-ports:
        dp: 44818
        sp: 44818
 
    # Note: parser depends on Rust support
    # 注意:解析器依赖于Rust支持
    ntp:
      enabled: no
 
    dhcp:
      enabled: no
 
# Limit for the maximum number of asn1 frames to decode (default 256)
# 限制要解码的asn1帧的最大数量(默认为256)
asn1-max-frames: 256
 
 
##############################################################################
##
## Advanced settings below
## 高级设置如下
##############################################################################
 
##
## Run Options
##
 
# Run suricata as user and group.
# 以用户和组的身份运行suricata。
#run-as:
#  user: suri
#  group: suri
 
# Some logging module will use that name in event as identifier. The default
# value is the hostname
# 某些日志记录模块将在事件中使用该名称作为标识符。 默认值是主机名
#sensor-name: suricata
 
# Default location of the pid file. The pid file is only used in
# daemon mode (start Suricata with -D). If not running in daemon mode
# the --pidfile command line option must be used to create a pid file.
#pid文件的默认位置。 pid文件仅用于守护进程模式(使用-D启动Suricata)。 
#如果未在守护程序模式下运行,则必须使用--pidfile命令行选项来创建pid文件。
#pid-file: /var/run/suricata.pid
 
# 守护进程工作目录 Daemon working directory
# Suricata will change directory to this one if provided
# 如果提供,Suricata会将目录更改为此目录
# Default: "/"
#daemon-directory: "/"
 
# Umask.
# Suricata will use this umask if it is provided. By default it will use the
# umask passed on by the shell.
#如果提供,Suricata将使用此umask。 默认情况下,它将使用shell传递的umask。
#umask: 022
 
# Suricata core dump configuration. Limits the size of the core dump file to
# approximately max-dump. The actual core dump size will be a multiple of the
# page size. Core dumps that would be larger than max-dump are truncated. On
# Linux, the actual core dump size may be a few pages larger than max-dump.
#Suricata核心转储配置。 将核心转储文件的大小限制为大约最大转储。 
#实际核心转储大小将是页面大小的倍数。 将截断大于max-dump的核心转储。 
#在Linux上,实际核心转储大小可能比max-dump大几页。
# Setting max-dump to 0 disables core dumping.
# 将max-dump设置为0将禁用核心转储。
# Setting max-dump to 'unlimited' will give the full core dump file.
# 将max-dump设置为'unlimited'将提供完整的核心转储文件。
# On 32-bit Linux, a max-dump value >= ULONG_MAX may cause the core dump size
# to be 'unlimited'.
#在32位Linux上,max-dump值> = ULONG_MAX可能会导致核心转储大小为“无限制”。
 
coredump:
  max-dump: unlimited
 
# If Suricata box is a router for the sniffed networks, set it to 'router'. If
# it is a pure sniffing setup, set it to 'sniffer-only'.
#如果Suricata框是嗅探网络的路由器,请将其设置为“路由器”。 如果它是纯粹的嗅探设置,请将其设置为“仅限嗅探器”。
# If set to auto, the variable is internally switch to 'router' in IPS mode
# and 'sniffer-only' in IDS mode.
#如果设置为auto,则变量在IPS模式下内部切换到'router',在IDS模式下切换为'sniffer-only'。
# This feature is currently only used by the reject* keywords.
# 此功能目前仅由reject *关键字使用。
host-mode: auto
 
# Number of packets preallocated per thread. The default is 1024. A higher number 
# will make sure each CPU will be more easily kept busy, but may negatively 
# impact caching.
#每个线程预分配的数据包数。 默认值为1024.较高的数字将确保每个CPU更容易保持忙碌,但可能会对缓存产生负面影响。
#max-pending-packets: 1024
 
# Runmode the engine should use. Please check --list-runmodes to get the available
# runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned
# load balancing).
#引擎应该使用Runmode。 请检查--list-run模式以获取每个数据包获取方法的可用运行模式。 默认为“autofp”(自动流量固定负载平衡)。
#runmode: autofp
 
# Specifies the kind of flow load balancer used by the flow pinned autofp mode.
#指定流固定自动fp模式使用的流负载平衡器的类型。
# Supported schedulers are:
#支持的调度程序是:
# round-robin       - 以循环方式分配给线程的流。 Flows assigned to threads in a round robin fashion.
# active-packets    - Flows assigned to threads that have the lowest number of unprocessed packets (default).
#                 分配给具有最少未处理数据包数的线程的流(默认)。
# hash              - Flow allocated using the address hash. More of a random
#                     technique. Was the default in Suricata 1.2.1 and older.
# 使用地址哈希分配的流。 更多随机技术。 是Suricata 1.2.1及更早版本的默认设置。
#autofp-scheduler: active-packets
 
# Preallocated size for packet. Default is 1514 which is the classical
# size for pcap on ethernet. You should adjust this value to the highest
# packet size (MTU + hardware header) on your system.
#数据包的预分配大小。 默认值为1514,这是以太网上pcap的经典大小。 
#您应该将此值调整为系统上的最大数据包大小(MTU +硬件标头)。
#default-packet-size: 1514
 
# Unix command socket can be used to pass commands to Suricata.
# An external tool can then connect to get information from Suricata
# or trigger some modifications of the engine. Set enabled to yes
# to activate the feature. In auto mode, the feature will only be
# activated in live capture mode. You can use the filename variable to set
# the file name of the socket.
#Unix命令套接字可用于将命令传递给Suricata。
#然后可以连接外部工具以从Suricata获取信息或触发引擎的某些修改。 
#设置为启用是以激活该功能。 在自动模式下,该功能仅在实时捕捉模式下激活。 
#您可以使用filename变量来设置套接字的文件名。
unix-command:
  enabled: auto
  #filename: custom.socket
 
# Magic file. The extension .mgc is added to the value here.
#魔术文件。 扩展名.mgc将添加到此处的值中。
#magic-file: /usr/share/file/magic
#magic-file: 
 
legacy:
  uricontent: enabled
 
##
## Detection settings
##检测设置
 
# Set the order of alerts based on actions
# The default order is pass, drop, reject, alert
#根据操作设置警报的顺序
#默认顺序是pass,drop,reject,alert
# action-order:
#   - pass
#   - drop
#   - reject
#   - alert
 
# IP Reputation
#reputation-categories-file: /etc/suricata/iprep/categories.txt    #信誉类别文件 
#default-reputation-path: /etc/suricata/iprep     #默认信誉路径
#reputation-files:
# - reputation.list
 
# When run with the option --engine-analysis, the engine will read each of
# the parameters below, and print reports for each of the enabled sections
# and exit.  The reports are printed to a file in the default log dir
# given by the parameter "default-log-dir", with engine reporting
# subsection below printing reports in its own report file.
#当使用选项--engine-analysis运行时,引擎将读取下面的每个参数,并打印每个已启用部分的报告并退出。 
#报告打印到参数“default-log-dir”给出的默认日志目录中的文件,下面的引擎报告子部分在其自己的报告文件中打印报告。
engine-analysis:
  # enables printing reports for fast-pattern for every rule.
  #为每个规则创建快速模式的打印报告。
  rules-fast-pattern: yes
  # enables printing reports for each rule
  # 为每个规则启用打印报告
  rules: yes
 
#recursion and match limits for PCRE where supported
#支持的PCRE的递归和匹配限制
pcre:
  match-limit: 3500
  match-limit-recursion: 1500
 
##
## Advanced Traffic Tracking and Reconstruction Settings
##高级流量跟踪和重建设置
 
# Host specific policies for defragmentation and TCP stream
# reassembly. The host OS lookup is done using a radix tree, just
# like a routing table so the most specific entry matches.
#用于碎片整理和TCP流重组的主机特定策略。 
#主机OS查找使用基数树完成,就像路由表一样,因此最具体的条目匹配。
host-os-policy:
  # Make the default policy windows.
  #制作默认策略窗口。
  windows: [0.0.0.0/0]
  bsd: []
  bsd-right: []
  old-linux: []
  linux: []
  old-solaris: []
  solaris: []
  hpux10: []
  hpux11: []
  irix: []
  macos: []
  vista: []
  windows2k3: []
 
# Defrag settings:
 
defrag:
  memcap: 32mb
  hash-size: 65536
  trackers: 65535 # 要遵循的碎片整理流的数量 number of defragmented flows to follow
  max-frags: 65535 # 要保留的碎片数量(高于跟踪器) number of fragments to keep (higher than trackers)
  prealloc: yes
  timeout: 60
 
# 为每个主机设置启用碎片整理  Enable defrag per host settings
#  host-config:
#
#    - dmz:
#        timeout: 30
#        address: [192.168.1.0/24, 127.0.0.0/8, 1.1.1.0/24, 2.2.2.0/24, "1.1.1.1", "2.2.2.2", "::1"]
#
#    - lan:
#        timeout: 45
#        address:
#          - 192.168.0.0/24
#          - 192.168.10.0/24
#          - 172.16.14.0/24
 
# 流量设置 Flow settings:
# By default, the reserved memory (memcap) for flows is 32MB. This is the limit
# for flow allocation inside the engine. You can change this value to allow
# more memory usage for flows.
#默认情况下,流的保留内存(memcap)为32MB。 这是引擎内部流量分配的限制。 
#您可以更改此值以允许更多内存使用流量。
# The hash-size determine the size of the hash used to identify flows inside
# the engine, and by default the value is 65536.
#散列大小确定用于标识引擎内部流的散列的大小,默认情况下,该值为65536。
# At the startup, the engine can preallocate a number of flows, to get a better
# performance. The number of flows preallocated is 10000 by default.
#在启动时,引擎可以预先分配多个流,以获得更好的性能。 默认情况下,预分配的流数为10000。
# emergency-recovery is the percentage of flows that the engine need to
# prune before unsetting the emergency state. The emergency state is activated
# when the memcap limit is reached, allowing to create new flows, but
# pruning them with the emergency timeouts (they are defined below).
#紧急恢复是在取消紧急状态之前发动机需要修剪的流量的百分比。 
#当达到memcap限制时,紧急状态被激活,允许创建新流,但是使用紧急超时修剪它们(它们在下面定义)。
# If the memcap is reached, the engine will try to prune flows
# with the default timeouts. If it doesn't find a flow to prune, it will set
# the emergency bit and it will try again with more aggressive timeouts.
# If that doesn't work, then it will try to kill the last time seen flows
# not in use.
#如果到达memcap,引擎将尝试使用默认超时修剪流。 如果它没有找到修剪流程,它将设置紧急位,它将再次尝试更积极的超时。
#如果这不起作用,那么它将尝试杀死上次看到的未使用的流。
# The memcap can be specified in kb, mb, gb.  Just a number indicates it's
# in bytes.
#memcap可以用kb,mb,gb指定。 只是一个数字表示它是以字节为单位。
 
flow:
  memcap: 128mb
  hash-size: 65536
  prealloc: 10000
  emergency-recovery: 30
  #managers: 1 # 默认为一个流管理器 default to one flow manager
  #recyclers: 1 # 默认为一个流循环器线程  default to one flow recycler thread
 
# This option controls the use of vlan ids in the flow (and defrag)
# hashing. Normally this should be enabled, but in some (broken)
# setups where both sides of a flow are not tagged with the same vlan
# tag, we can ignore the vlan id's in the flow hashing.
#此选项控制在流(和碎片整理)散列中使用vlan ID。 
#通常应该启用此功能,但在某些(损坏的)设置中,
#流的两端都没有使用相同的vlan标记进行标记,我们可以忽略流哈希中的vlan id。
vlan:
  use-for-tracking: true
 
# Specific timeouts for flows. Here you can specify the timeouts that the
# active flows will wait to transit from the current state to another, on each
# protocol. 
#流量的具体超时。 在这里,您可以指定活动流在每个协议上等待从当前状态转换到另一个状态的超时。
#The value of "new" determine the seconds to wait after a handshake or
# stream startup before the engine free the data of that flow it doesn't
# change the state to established (usually if we don't receive more packets
# of that flow). 
#“new”的值确定在引擎释放该流的数据之前握手或流启动之后等待的秒数,
#它不会将状态更改为已建立(通常如果我们没有收到该流的更多数据包)。
#The value of "established" is the amount of
# seconds that the engine will wait to free the flow if it spend that amount
# without receiving new packets or closing the connection. 
#“已建立”的值是引擎在没有接收新数据包或关闭连接的情况下花费该数量时等待释放流量的秒数。
#"closed" is the amount of time to wait after a flow is closed (usually zero). "bypassed"
# timeout controls locally bypassed flows. For these flows we don't do any other
# tracking. If no packets have been seen after this timeout, the flow is discarded.
#“关闭”是流量关闭后等待的时间(通常为零)。 “绕过”超时控制本地绕过的流量。 
#对于这些流程,我们不进行任何其他跟踪。 如果在此超时后未看到任何数据包,则丢弃该流。
# There's an emergency mode that will become active under attack circumstances,
# making the engine to check flow status faster. This configuration variables
# use the prefix "emergency-" and work similar as the normal ones.
#紧急模式将在攻击情况下变为活动状态,使引擎更快地检查流动状态。 
#此配置变量使用前缀“emergency-”,其工作方式与普通类似。
# Some timeouts doesn't apply to all the protocols, like "closed", for udp and
# icmp.
#对于udp和icmp,某些超时不适用于所有协议,如“已关闭”。
 
flow-timeouts:
 
  default:
    new: 30
    established: 300
    closed: 0
    bypassed: 100
    emergency-new: 10
    emergency-established: 100
    emergency-closed: 0
    emergency-bypassed: 50
  tcp:
    new: 60
    established: 600
    closed: 60
    bypassed: 100
    emergency-new: 5
    emergency-established: 100
    emergency-closed: 10
    emergency-bypassed: 50
  udp:
    new: 30
    established: 300
    bypassed: 100
    emergency-new: 10
    emergency-established: 100
    emergency-bypassed: 50
  icmp:
    new: 30
    established: 300
    bypassed: 100
    emergency-new: 10
    emergency-established: 100
    emergency-bypassed: 50
 
# Stream engine settings. Here the TCP stream tracking and reassembly
# engine is configured.
# 流引擎设置。 这里配置了TCP流跟踪和重组引擎。
# stream:    #流
#   memcap: 32mb                # Can be specified in kb, mb, gb.  Just a
#                               # number indicates it's in bytes.
                      #可以用kb,mb,gb指定。 只是一个数字表示它是以字节为单位。
#   checksum-validation: yes    # To validate the checksum of received
#                               # packet. If csum validation is specified as
#                               # "yes", then packet with invalid csum will not
#                               # be processed by the engine stream/app layer.
#验证收到的数据包的校验和。 如果将csum验证指定为“yes”,则引擎流/应用层将不会处理具有无效csum的数据包。
#                               # Warning: locally generated traffic can be
#                               # generated without checksum due to hardware offload
#                               # of checksum. You can control the handling of checksum
#                               # on a per-interface basis via the 'checksum-checks'
               #警告:由于校验和的硬件卸载,可以在没有校验和的情况下生成本地生成的流量。 
               #您可以通过“校验和检查”来控制每个接口的校验和处理
#                               # option
#   prealloc-sessions: 2k       # 每个流线程预分配2k个会话    2k sessions prealloc'd per stream thread
#   midstream: false            # 不允许中途会话接听    don't allow midstream session pickups
#   async-oneside: false        # 不要启用异步流处理    don't enable async stream handling
#   inline: no                  # 流内联模式    stream inline mode
#   drop-invalid: yes           # 在内联模式下,丢弃与流引擎无关的数据包  in inline mode, drop packets that are invalid with regards to streaming engine
#   max-synack-queued: 5        # 最大不同的SYN / ACK到队列   Max different SYN/ACKs to queue
#   bypass: no                  # 到达stream.depth时绕过数据包   Bypass packets when stream.depth is reached
#
#   reassembly:    #重组
#     memcap: 64mb              # Can be specified in kb, mb, gb.  Just a number
#                               # indicates it's in bytes.
                  #可以用kb,mb,gb指定。 只是一个数字表示它是以字节为单位。
#     depth: 1mb                # Can be specified in kb, mb, gb.  Just a number
#                               # indicates it's in bytes.
#     toserver-chunk-size: 2560 # inspect raw stream in chunks of at least
#                               # this size.  Can be specified in kb, mb,
#                               # gb.  Just a number indicates it's in bytes.
#检查至少这个大小的块的原始流。 可以用kb,mb,gb指定。 只是一个数字表示它是以字节为单位。
#     toclient-chunk-size: 2560 # inspect raw stream in chunks of at least
#                               # this size.  Can be specified in kb, mb,
#                               # gb.  Just a number indicates it's in bytes.
#     randomize-chunk-size: yes # Take a random value for chunk size around the specified value.
#                               # This lower the risk of some evasion technics but could lead
#                               # detection change between runs. It is set to 'yes' by default.
#获取指定值周围的块大小的随机值。 这降低了一些逃避技术的风险,但可能导致运行之间的检测变化。 默认设置为“是”。
#     randomize-chunk-range: 10 # If randomize-chunk-size is active, the value of chunk-size is
#                               # a random value between (1 - randomize-chunk-range/100)*toserver-chunk-size
#                               # and (1 + randomize-chunk-range/100)*toserver-chunk-size and the same
#                               # calculation for toclient-chunk-size.
#如果randomize-chunk-size是活动的,则chunk-size的值是(1  -  randomize-chunk-range / 100)* toserver-chunk-size和(1 + randomize-chunk-range / 100)之间的随机值* toserver-chunk-size和toclient-chunk-size的相同计算。
#                               # randomize-chunk-range的默认值为10。  Default value of randomize-chunk-range is 10.
#
#     raw: yes                  # 'Raw' reassembly enabled or disabled.
#                               # raw is for content inspection by detection
#                               # engine.
#                        #启用或禁用“原始”重组。 raw用于检测引擎的内容检查。
#
#     segment-prealloc: 2048    # 每个线程预分配的段数 number of segments preallocated per thread
#
#     check-overlap-different-data: true|false
#                               # check if a segment contains different data
#                               # than what we've already seen for that
#                               # position in the stream.
#                     #检查一个段是否包含与我们在流中已经看到的位置不同的数据。
#                               # This is enabled automatically if inline mode
#                               # is used or when stream-event:reassembly_overlap_different_data;
#                               # is used in a rule.
#      如果使用内联模式或stream-event,则会自动启用此选项:reassembly_overlap_different_data; 在规则中使用。
#
stream:
  memcap: 64mb
  checksum-validation: yes      # 拒绝错误的csums  reject wrong csums
  inline: auto                  # auto将在IPS模式下使用内联模式,是或否静态设置  auto will use inline mode in IPS mode, yes or no set it statically
  reassembly:
    memcap: 256mb
    depth: 1mb                  # 将1mb重新组装成流  reassemble 1mb into a stream
    toserver-chunk-size: 2560
    toclient-chunk-size: 2560
    randomize-chunk-size: yes
    #randomize-chunk-range: 10
    #raw: yes
    #segment-prealloc: 2048
    #check-overlap-different-data: true
 
# Host table:
#
# 主机表由标记和每个主机阈值子系统使用。  Host table is used by tagging and per host thresholding subsystems.
#
host:
  hash-size: 4096
  prealloc: 1000
  memcap: 32mb
 
# IP对表: IP Pair table:
#
# Used by xbits 'ippair' tracking.
# 由xbits的'ippair'跟踪使用。
#ippair:
#  hash-size: 4096
#  prealloc: 1000
#  memcap: 32mb
 
# Decoder settings  #Decoder settings
 
decoder:
  # Teredo decoder is known to not be completely accurate
  # it will sometimes detect non-teredo as teredo.
#已知Teredo解码器不完全准确,有时会将非teredo检测为teredo。
  teredo:
    enabled: true
 
 
##
## Performance tuning and profiling
## 性能调整和分析
 
# The detection engine builds internal groups of signatures. The engine
# allow us to specify the profile to use for them, to manage memory on an
# efficient way keeping a good performance. For the profile keyword you
# can use the words "low", "medium", "high" or "custom". If you use custom
# make sure to define the values at "- custom-values" as your convenience.
# Usually you would prefer medium/high/low.
#检测引擎构建内部签名组。 引擎允许我们指定用于它们的配置文件,以有效的方式管理内存,保持良好的性能。 
#对于个人资料关键字,您可以使用“低”,“中”,“高”或“自定义”字样。 
#如果您使用自定义,请确保在“ -  custom-values”中定义值以方便使用。 通常你更喜欢中/高/低。
# "sgh mpm-context", indicates how the staging should allot mpm contexts for
# the signature groups.  "single" indicates the use of a single context for
# all the signature group heads.  "full" indicates a mpm-context for each
# group head.  "auto" lets the engine decide the distribution of contexts
# based on the information the engine gathers on the patterns from each
# group head.
#“sgh mpm-context”表示分段应如何为签名组分配mpm上下文。 
#“single”表示对所有签名组头使用单个上下文。 “full”表示每个组头的mpm-context。 
#“auto”允许引擎根据引擎收集来自每个组头的模式的信息来决定上下文的分布。
# The option inspection-recursion-limit is used to limit the recursive calls
# in the content inspection code.  For certain payload-sig combinations, we
# might end up taking too much time in the content inspection code.
# If the argument specified is 0, the engine uses an internally defined
# default limit.  On not specifying a value, we use no limits on the recursion.
#选项inspection-recursion-limit用于限制内容检查代码中的递归调用。 
#对于某些有效负载sig组合,我们最终可能会在内容检查代码中花费太多时间。 
#如果指定的参数为0,则引擎使用内部定义的默认限制。 在不指定值的情况下,我们对递归使用没有限制。
detect:
  profile: medium
  custom-values:
    toclient-groups: 3
    toserver-groups: 25
  sgh-mpm-context: auto
  inspection-recursion-limit: 3000
  # If set to yes, the loading of signatures will be made after the capture
  # is started. This will limit the downtime in IPS mode.
#如果设置为yes,将在捕获开始后加载签名。 这将限制IPS模式下的停机时间。
  #delayed-detect: yes
 
  prefilter:
    # default prefiltering setting. "mpm" only creates MPM/fast_pattern
    # engines. "auto" also sets up prefilter engines for other keywords.
    # Use --list-keywords=all to see which keywords support prefiltering.
##默认预过滤设置。 “mpm”仅创建MPM / fast_pattern引擎。 
##“auto”还为其他关键字设置了前置过滤引擎。 使用--list-keywords = all可以查看哪些关键字支持预过滤。
    default: mpm
 
  # the grouping values above control how many groups are created per
  # direction. Port whitelisting forces that port to get it's own group.
  # Very common ports will benefit, as well as ports with many expensive
  # rules.
##上面的分组值控制每个方向创建的组数。 端口白名单强制端口获取它自己的组。 
##非常常见的端口将受益,以及具有许多昂贵规则的端口。
  grouping:
    #tcp-whitelist: 53, 80, 139, 443, 445, 1433, 3306, 3389, 6666, 6667, 8080
    #udp-whitelist: 53, 135, 5060
 
  profiling:
    # Log the rules that made it past the prefilter stage, per packet
    # default is off. The threshold setting determines how many rules
    # must have made it past pre-filter for that rule to trigger the
    # logging.
##记录使其超过预过滤阶段的规则,每个数据包默认为关闭。 
##阈值设置确定必须使多少规则使其超过该规则的预过滤器才能触发日志记录。
    #inspect-logging-threshold: 200
    grouping:
      dump-to-disk: false
      include-rules: false      # very verbose
      include-mpm-stats: false
 
# Select the multi pattern algorithm you want to run for scan/search the
# in the engine.
#选择要在引擎中扫描/搜索的多模式算法。
# The supported algorithms are:   #支持的算法是:
# "ac"      - Aho-Corasick, default implementation    #默认实现
# "ac-bs"   - Aho-Corasick, reduced memory implementation    #减少内存实现
# "ac-ks"   - Aho-Corasick, "Ken Steele" variant   #“肯斯蒂尔”变种
# "hs"      - Hyperscan, available when built with Hyperscan support   #在使用Hyperscan支持构建时可用
#
# The default mpm-algo value of "auto" will use "hs" if Hyperscan is
# available, "ac" otherwise.
#如果Hyperscan可用,则“auto”的默认mpm-algo值将使用“hs”,否则将使用“ac”。
#
# The mpm you choose also decides the distribution of mpm contexts for
# signature groups, specified by the conf - "detect.sgh-mpm-context".
#您选择的mpm还决定了由conf  - “detect.sgh-mpm-context”指定的签名组的mpm上下文的分布。
# Selecting "ac" as the mpm would require "detect.sgh-mpm-context"
# to be set to "single", because of ac's memory requirements, unless the
# ruleset is small enough to fit in one's memory, in which case one can
# use "full" with "ac".  Rest of the mpms can be run in "full" mode.
#选择“ac”作为mpm需要将“detect.sgh-mpm-context”设置为“single”,因为ac的内存要求,
##除非规则集足够小以适应一个人的内存,在这种情况下可以 使用“完整”和“交流”。 
##其余的mpms可以在“完整”模式下运行。
#
 
mpm-algo: auto
 
# Select the matching algorithm you want to use for single-pattern searches.
#选择要用于单模式搜索的匹配算法。
#
# Supported algorithms are "bm" (Boyer-Moore) and "hs" (Hyperscan, only
# available if Suricata has been built with Hyperscan support).
#支持的算法是“bm”(Boyer-Moore)和“hs”(Hyperscan,只有在使用Hyperscan支持构建Suricata时才可用)。
#
# The default of "auto" will use "hs" if available, otherwise "bm".
#如果可用,默认值“auto”将使用“hs”,否则使用“bm”。
 
spm-algo: auto
 
# Suricata is multi-threaded. Here the threading can be influenced.
#Suricata是多线程的。 这里可以影响线程。
threading:
  set-cpu-affinity: no
  # Tune cpu affinity of threads. Each family of threads can be bound
  # on specific CPUs.
# 调整线程的cpu亲和性。 每个线程族都可以绑定在特定的CPU上。
  #
  # These 2 apply to the all runmodes:    #这两个适用于所有runmodes
  # management-cpu-set is used for flow timeout handling, counters
  # worker-cpu-set is used for 'worker' threads
#management-cpu-set用于流超时处理,计数器worker-cpu-set用于'worker'线程
  #
  # Additionally, for autofp these apply:   ##此外,对于autofp这些适用
  # receive-cpu-set用于捕获线程    receive-cpu-set is used for capture threads
  # verdict-cpu-set用于IPS判定线程   verdict-cpu-set is used for IPS verdict threads
  #
  cpu-affinity:
    - management-cpu-set:
        cpu: [ 0 ]  # 仅在亲和性设置中包含这些CPU    include only these CPUs in affinity settings
    - receive-cpu-set:
        cpu: [ 0 ]  #仅在亲和性设置中包含这些CPU     include only these CPUs in affinity settings
    - worker-cpu-set:
        cpu: [ "all" ]
        mode: "exclusive"
        # 使用明确3个线程,不使用计算数字    Use explicitely 3 threads and don't compute number by using
        # detect-thread-ratio variable:    #检测线程比率变量 
        # threads: 3
        prio:
          low: [ 0 ]
          medium: [ "1-2" ]
          high: [ 3 ]
          default: "medium"
    #- verdict-cpu-set:
    #    cpu: [ 0 ]
    #    prio:
    #      default: "high"
  #
  # By default Suricata creates one "detect" thread per available CPU/CPU core.
  # This setting allows controlling this behaviour. A ratio setting of 2 will
  # create 2 detect threads for each CPU/CPU core. So for a dual core CPU this
  # will result in 4 detect threads. If values below 1 are used, less threads
  # are created. So on a dual core CPU a setting of 0.5 results in 1 detect
  # thread being created. Regardless of the setting at a minimum 1 detect
  # thread will always be created.
#默认情况下,Suricata为每个可用的CPU / CPU核心创建一个“检测”线程。
#此设置允许控制此行为。 比率设置为2将为每个CPU / CPU核心创建2个检测线程。 
#因此对于双核CPU,这将导致4个检测线程。 如果使用低于1的值,则会创建更少的线程。 
#因此,在双核CPU上,设置为0.5会导致创建1个检测线程。 无论设置是否至少1,都将始终创建检测线程。
  #
  detect-thread-ratio: 1.0
 
# Luajit has a strange memory requirement, it's 'states' need to be in the
# first 2G of the process' memory.
#Luajit有一个奇怪的内存要求,它的“状态”需要位于进程内存的前2G内。
#
# 'luajit.states' is used to control how many states are preallocated.
# State use: per detect script: 1 per detect thread. Per output script: 1 per
# script.
#'luajit.states'用于控制预分配的状态数。 
#状态使用:每个检测脚本:每个检测线程1个。 每个输出脚本:每个脚本1个。
luajit:
  states: 128
 
# Profiling settings. Only effective if Suricata has been built with the
# the --enable-profiling configure flag.
#分析设置。 只有在使用--enable-profiling配置标志构建Suricata时才有效。
#
profiling:
  # Run profiling for every xth packet. The default is 1, which means we
  # profile every packet. If set to 1000, one packet is profiled for every
  # 1000 received.
#对每个第x个数据包运行分析。 默认值为1,这意味着我们会分析每个数据包。 
#如果设置为1000,则每接收1000个数据包就会分析一个数据包。
  #sample-rate: 1000
 
  # rule profiling
  rules:
 
    # Profiling can be disabled here, but it will still have a
    # performance impact if compiled in.
#可以在此处禁用分析,但如果编译,它仍将对性能产生影响。
    enabled: yes
    filename: rule_perf.log
    append: yes
 
    # Sort options: ticks, avgticks, checks, matches, maxticks
    # If commented out all the sort options will be used.
#排序选项:刻度,avgticks,支票,匹配,maxticks如果注释掉,将使用所有排序选项。
    #sort: avgticks
 
    # Limit the number of sids for which stats are shown at exit (per sort).
##限制退出时显示统计数据的sids数量(每种)。
    limit: 10
 
    # output to json
    json: yes
 
  # 每个关键字分析  per keyword profiling
  keywords:
    enabled: yes
    filename: keyword_perf.log
    append: yes
 
  prefilter:
    enabled: yes
    filename: prefilter_perf.log
    append: yes
 
  # 每个规则组分析    per rulegroup profiling
  rulegroups:
    enabled: yes
    filename: rule_group_perf.log
    append: yes
 
  # 包分析  packet profiling
  packets:
 
    # Profiling can be disabled here, but it will still have a
    # performance impact if compiled in.
##可以在此处禁用分析,但如果编译,它仍将对性能产生影响。
    enabled: yes
    filename: packet_stats.log
    append: yes
 
    # per packet csv output
##每个包csv输出
    csv:
 
      # Output can be disabled here, but it will still have a
      # performance impact if compiled in.
  ##此处可以禁用输出,但如果编译,它仍将对性能产生影响。
      enabled: no
      filename: packet_stats.csv
 
  # profiling of locking. Only available when Suricata was built with
  # --enable-profiling-locks.
##锁定的分析。 只有当Suricata使用--enable-profiling-locks构建时才可用。
  locks:
    enabled: no
    filename: lock_stats.log
    append: yes
 
  pcap-log:
    enabled: no
    filename: pcaplog_stats.log
    append: yes
 
##
## Netfilter integration
## Netfilter集成
 
# When running in NFQ inline mode, it is possible to use a simulated
# non-terminal NFQUEUE verdict.
#在NFQ内联模式下运行时,可以使用模拟的非终端NFQUEUE判定。
# This permit to do send all needed packet to Suricata via this a rule:
# 此许可通过以下规则将所有需要的数据包发送到Suricata:
#        iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE
# And below, you can have your standard filtering ruleset. To activate
# this mode, you need to set mode to 'repeat'
# 在下面,您可以拥有标准过滤规则集。 要激活此模式,您需要将模式设置为“重复”
# If you want packet to be sent to another queue after an ACCEPT decision
# set mode to 'route' and set next-queue value.
# 如果希望在ACCEPT决策设置模式为“route”并设置next-queue值之后将数据包发送到另一个队列。
# On linux >= 3.1, you can set batchcount to a value > 1 to improve performance
# by processing several packets before sending a verdict (worker runmode only).
# On linux >= 3.6, you can set the fail-open option to yes to have the kernel
# accept the packet if Suricata is not able to keep pace.
# 在linux> = 3.1上,您可以将batchcount设置为> 1,以便在发送判定(仅限worker worker)之前处理多个数据包以提高性能。 
# 在linux> = 3.6时,您可以将fail-open选项设置为yes,以使内核在Suricata无法跟上时接受数据包。
# bypass mark and mask can be used to implement NFQ bypass. If bypass mark is
# set then the NFQ bypass is activated. Suricata will set the bypass mark/mask
# on packet of a flow that need to be bypassed. The Nefilter ruleset has to
# directly accept all packets of a flow once a packet has been marked.
#旁路标记和掩码可用于实现NFQ旁路。 如果设置了旁路标记,则激活NFQ旁路。 
#Suricata将在需要绕过的流的数据包上设置旁路标记/掩码。
# 一旦数据包被标记,Nefilter规则集必须直接接受流的所有数据包。
nfq:
#  mode: accept
#  repeat-mark: 1
#  repeat-mask: 1
#  bypass-mark: 1
#  bypass-mask: 1
#  route-queue: 2
#  batchcount: 20
#  fail-open: yes
 
#nflog support
nflog:
    # netlink multicast group
    # (与iptables --nflog-group param相同) (the same as the iptables --nflog-group param)
    # 内核使用组0,因此您无法使用它 Group 0 is used by the kernel, so you can't use it
  - group: 2
    # netlink缓冲区大小 netlink buffer size
    buffer-size: 18432
    # 把默认值放在这里 put default value here
  - group: default
    # 设置内核中队列的数量 set number of packet to queue inside kernel
    qthreshold: 1
    # 在内核队列中刷新数据包之前设置延迟 set the delay before flushing packet in the queue inside kernel
    qtimeout: 100
    # netlink最大缓冲区大小 netlink max buffer size
    max-size: 20000
 
##
## Advanced Capture Options
## 高级捕获选项
 
# general settings affecting packet capture
# 影响数据包捕获的常规设置
capture:
  # disable NIC offloading. It's restored when Suricata exits.
  #禁用NIC卸载。 当Suricata退出时它会恢复。 默认启用。
  # Enabled by default.
  #disable-offloading: false
  #
  # disable checksum validation. Same as setting '-k none' on the commandline.
  # 禁用校验和验证。 与在命令行上设置'-k none'相同。 
  #checksum-validation: none
 
# Netmap support
#Netmap支持
# Netmap operates with NIC directly in driver, so you need FreeBSD which have
# built-in netmap support or compile and install netmap module and appropriate
# NIC driver on your Linux system.
# Netmap直接在驱动程序中使用NIC,因此您需要具有内置netmap支持的FreeBSD,或者在Linux系统上编译和安装netmap模块以及相应的NIC驱动程序。
# To reach maximum throughput disable all receive-, segmentation-,
# checksum- offloadings on NIC.
# 要达到最大吞吐量,请禁用NIC上的所有接收,分段和校验和卸载。
# Disabling Tx checksum offloading is *required* for connecting OS endpoint
# with NIC endpoint.
# 对于连接OS端点和NIC端点,*需要禁用Tx校验和卸载*。
# You can find more information at https://github.com/luigirizzo/netmap
#
netmap:
   # To specify OS endpoint add plus sign at the end (e.g. "eth0+")
   # 指定OS端点在末尾添加加号(例如“eth0 +”)
 - interface: eth2
   # Number of receive threads. "auto" uses number of RSS queues on interface.
   # 接收线程数。 “auto”使用接口上的RSS队列数。
   #threads: auto
   # You can use the following variables to activate netmap tap or IPS mode.
   # If copy-mode is set to ips or tap, the traffic coming to the current
   # interface will be copied to the copy-iface interface. If 'tap' is set, the
   # copy is complete. If 'ips' is set, the packet matching a 'drop' action
   # will not be copied.
   #您可以使用以下变量来激活netmap tap或IPS模式。 
   #如果copy-mode设置为ips或tap,则进入当前接口的流量将被复制到copy-iface界面。
   # 如果设置了“点击”,则复制完成。 如果设置了“ips”,则不会复制与“drop”操作匹配的数据包。
   # To specify the OS as the copy-iface (so the OS can route packets, or forward
   # to a service running on the same machine) add a plus sign at the end
   # (e.g. "copy-iface: eth0+"). Don't forget to set up a symmetrical eth0+ -> eth0
   # for return packets. Hardware checksumming must be *off* on the interface if
   # using an OS endpoint (e.g. 'ifconfig eth0 -rxcsum -txcsum -rxcsum6 -txcsum6' for FreeBSD
   # or 'ethtool -K eth0 tx off rx off' for Linux).
   #要将操作系统指定为copy-iface(因此操作系统可以路由数据包,或转发到在同一台机器上运行的服务),
   #最后添加一个加号(例如“copy-iface:eth0 +”)。 不要忘记为返回数据包设置对称的eth0 +  - > eth0。
   # 如果使用OS端点,则接口上的硬件校验和必须为* off *
   #(例如,对于FreeBSD为'ifconfig eth0 -rxcsum -txcsum -rxcsum6 -txcsum6'或对于Linux为'ethtool -K eth0 tx off rx off')。
   #copy-mode: tap
   #copy-iface: eth3
   # 设置为yes以禁用混杂模式 Set to yes to disable promiscuous mode
   # disable-promisc: no
   # Choose checksum verification mode for the interface. At the moment
   # of the capture, some packets may be with an invalid checksum due to
   # offloading to the network card of the checksum computation.
   # 选择接口的校验和验证模式。 在捕获时,由于校验和计算的网卡卸载,一些分组可能具有无效的校验和。
   #  可选的值是 Possible values are:
   #  - yes: checksum validation is forced
   #  - no: checksum validation is disabled
   #  - auto:  Suricata使用统计方法来检测何时 Suricata uses a statistical approach to detect when
   #  校验和卸载使用。 checksum off-loading is used.
   # Warning: 'checksum-validation' must be set to yes to have any validation
   # 警告:'checksum-validation'必须设置为yes才能进行任何验证
   #checksum-checks: auto
   # BPF filter to apply to this interface. The pcap filter syntax apply here.
   # BPF过滤器适用于此接口。 pcap过滤器语法适用于此处。
   #bpf-filter: port 80 or udp
 #- interface: eth3
   #threads: auto
   #copy-mode: tap
   #copy-iface: eth2
   # Put default values here
 - interface: default
 
# PF_RING configuration. for use with native PF_RING support
# PF_RING配置。 用于本机PF_RING支持
# for more info see http://www.ntop.org/products/pf_ring/
pfring:
  - interface: eth0
    # Number of receive threads. If set to 'auto' Suricata will first try
    # to use CPU (core) count and otherwise RSS queue count.
    # 接收线程数。 如果设置为'auto',Suricata将首先尝试使用CPU(核心)计数以及RSS队列计数。
    threads: auto
 
    # Default clusterid.  PF_RING will load balance packets based on flow.
    # 默认的clusterid。 PF_RING将根据流量对数据包进行负载平衡。
    # All threads/processes that will participate need to have the same clusterid.
    # 将参与的所有线程/进程需要具有相同的clusterid。 
    cluster-id: 99
 
    # Default PF_RING cluster type. PF_RING can load balance per flow.
    # 默认PF_RING群集类型。 PF_RING可以按流量进行负载平衡。
    # Possible values are cluster_flow or cluster_round_robin.
    # 可能的值是cluster_flow或cluster_round_robin。
    cluster-type: cluster_flow
 
    # bpf filter for this interface
    # bpf过滤器用于此接口
    #bpf-filter: tcp
 
    # If bypass is set then the PF_RING hw bypass is activated, when supported
    # by the interface in use. Suricata will instruct the interface to bypass
    # all future packets for a flow that need to be bypassed.
    # 如果设置了旁路,则在使用的接口支持时激活PF_RING hw旁路。 Suricata将指示接口绕过所有未来的数据包以获取需要绕过的流。
    #bypass: yes
 
    # Choose checksum verification mode for the interface. At the moment
    # of the capture, some packets may be with an invalid checksum due to
    # offloading to the network card of the checksum computation.
    # 选择接口的校验和验证模式。 在捕获时,由于校验和计算的网卡卸载,一些分组可能具有无效的校验和。
    # 可能的值是 Possible values are:
    #  - rxonly: #仅计算网卡接收的数据包的校验和。only compute checksum for packets received by network card.
    #  - yes: # 校验和验证是强制的 checksum validation is forced
    #  - no: #校验和验证已禁用 checksum validation is disabled
    #  - auto:  #Suricata使用统计方法来检测何时 Suricata uses a statistical approach to detect when
    #  checksum off-loading is used. (default)
    #  校验和卸载使用。 (默认)
    # Warning: 'checksum-validation' must be set to yes to have any validation
    # 警告:'checksum-validation'必须设置为yes才能进行任何验证
    #checksum-checks: auto  #校验和检查
  # 第二个界面 Second interface
  #- interface: eth1
  #  threads: 3
  #  cluster-id: 93
  #  cluster-type: cluster_flow
  # Put default values here
  - interface: default
    #threads: 2
 
# For FreeBSD ipfw(8) divert(4) support.
#对于FreeBSD ipfw(8)转移(4)支持。
# Please make sure you have ipfw_load="YES" and ipdivert_load="YES"
# in /etc/loader.conf or kldload'ing the appropriate kernel modules.
#请确保在/etc/loader.conf中有ipfw_load =“YES”和ipdivert_load =“YES”或kldload'相应的内核模块。
# Additionally, you need to have an ipfw rule for the engine to see
# the packets from ipfw.  For Example:
#此外,您需要为引擎设置ipfw规则才能查看来自ipfw的数据包。 例如:
#   ipfw add 100 divert 8000 ip from any to any
#   ipfw添加100转移8000 ip从任何到任何
# The 8000 above should be the same number you passed on the command
# line, i.e. -d 8000
#上面的8000应该与您在命令行上传递的数字相同,即-d 8000
ipfw:
 
  # Reinject packets at the specified ipfw rule number.  This config
  # option is the ipfw rule number AT WHICH rule processing continues
  # in the ipfw processing system after the engine has finished
  # inspecting the packet for acceptance.  If no rule number is specified,
  # accepted packets are reinjected at the divert rule which they entered
  # and IPFW rule processing continues.  No check is done to verify
  # this will rule makes sense so care must be taken to avoid loops in ipfw.
  #以指定的ipfw规则编号重新注入数据包。 
  #此配置选项是ipfw规则编号AT WHICH规则处理在引擎完成检查数据包接受后在ipfw处理系统中继续。 
  #如果未指定规则编号,则接受的数据包将重新注入其输入的转移规则,并继续进行IPFW规则处理。 
  #没有检查来验证这将是有意义的,所以必须小心避免ipfw中的循环。
  ## The following example tells the engine to reinject packets
  # back into the ipfw firewall AT rule number 5500:
  ##以下示例告诉引擎将数据包重新注入ipfw防火墙AT规则号5500:
  # ipfw-reinjection-rule-number: 5500
 
 
napatech:
    # The Host Buffer Allowance for all streams
    # (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back)
    # This may be enabled when sharing streams with another application.
    # Otherwise, it should be turned off.
    # 所有流的主机缓冲区容限(-1 = OFF,1  -  100 =可以保留的主机缓冲区的百分比)当与另一个应用程序共享流时,可以启用此功能。
    # 否则,应该关闭它。
    hba: -1
 
    # use_all_streams set to "yes" will query the Napatech service for all configured
    # streams and listen on all of them. When set to "no" the streams config array
    # will be used.
    # use_all_streams设置为“yes”将查询Napatech服务以查找所有已配置的流并监听所有已配置的流。 
    # 设置为“no”时,将使用streams配置数组。
    use-all-streams: yes
 
    # The streams to listen on.  This can be either:
    #   a list of individual streams (e.g. streams: [0,1,2,3])
    # 要听的流。 这可以是:单个流的列表
    # or
    #   a range of streams (e.g. streams: ["0-3"])
    streams: ["0-3"]
 
# Tilera mpipe配置。 用于Tilera TILE-Gx。 Tilera mpipe configuration. for use on Tilera TILE-Gx.
mpipe:
 
  # Load balancing modes: "static", "dynamic", "sticky", or "round-robin".
  # 负载均衡模式:“静态”,“动态”,“粘性”或“循环”。
  load-balance: dynamic
 
  # Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536
  # 每个入口数据包队列中的数据包数。 必须是128,512,2028或65536
  iqueue-packets: 2048
 
  # List of interfaces we will listen on.
  # 我们将要监听的接口列表。
  inputs:
  - interface: xgbe2
  - interface: xgbe3
  - interface: xgbe4
 
 
  # Relative weight of memory for packets of each mPipe buffer size.
  # 每个mPipe缓冲区大小的数据包的内存相对权重。
  stack:
    size128: 0
    size256: 9
    size512: 0
    size1024: 0
    size1664: 7
    size4096: 0
    size10386: 0
    size16384: 0
 
##
## Configure Suricata to load Suricata-Update managed rules.
## 配置Suricata以加载Suricata-Update托管规则。
## If this section is completely commented out move down to the "Advanced rule
## file configuration".
## 如果此部分已完全注释掉,请转到“高级规则文件配置”。
 
default-rule-path: /var/lib/suricata/rules
rule-files:
 - suricata.rules
 
##
## Advanced rule file configuration.
## 高级规则文件配置。
## If this section is completely commented out then your configuration
## is setup for suricata-update as it was most likely bundled and
## installed with Suricata.
##如果此部分已完全注释掉,那么您的配置将设置为suricata-update,因为它很可能与Suricata捆绑在一起并安装。
 
#default-rule-path: /var/lib/suricata/rules
 
#rule-files:
# - botcc.rules
# # - botcc.portgrouped.rules
# - ciarmy.rules
# - compromised.rules
# - drop.rules
# - dshield.rules
## - emerging-activex.rules
# - emerging-attack_response.rules
# - emerging-chat.rules
# - emerging-current_events.rules
# - emerging-dns.rules
# - emerging-dos.rules
# - emerging-exploit.rules
# - emerging-ftp.rules
## - emerging-games.rules
## - emerging-icmp_info.rules
## - emerging-icmp.rules
# - emerging-imap.rules
## - emerging-inappropriate.rules
## - emerging-info.rules
# - emerging-malware.rules
# - emerging-misc.rules
# - emerging-mobile_malware.rules
# - emerging-netbios.rules
# - emerging-p2p.rules
# - emerging-policy.rules
# - emerging-pop3.rules
# - emerging-rpc.rules
## - emerging-scada.rules
## - emerging-scada_special.rules
# - emerging-scan.rules
## - emerging-shellcode.rules
# - emerging-smtp.rules
# - emerging-snmp.rules
# - emerging-sql.rules
# - emerging-telnet.rules
# - emerging-tftp.rules
# - emerging-trojan.rules
# - emerging-user_agents.rules
# - emerging-voip.rules
# - emerging-web_client.rules
# - emerging-web_server.rules
## - emerging-web_specific_apps.rules
# - emerging-worm.rules
# - tor.rules
## - decoder-events.rules # 根据规则dir在suricata中提供 available in suricata sources under rules dir
## - stream-events.rules  # available in suricata sources under rules dir
# - http-events.rules    # available in suricata sources under rules dir
# - smtp-events.rules    # available in suricata sources under rules dir
# - dns-events.rules     # available in suricata sources under rules dir
# - tls-events.rules     # available in suricata sources under rules dir
## - modbus-events.rules  # available in suricata sources under rules dir
## - app-layer-events.rules  # available in suricata sources under rules dir
## - dnp3-events.rules       # available in suricata sources under rules dir
## - ntp-events.rules       # available in suricata sources under rules dir
## - ipsec-events.rules       # available in suricata sources under rules dir
## - kerberos-events.rules       # available in suricata sources under rules dir
 
##
## Auxiliary configuration files.
## 辅助配置文件。
 
classification-file: /etc/suricata/classification.config
reference-config-file: /etc/suricata/reference.config
# threshold-file: /etc/suricata/threshold.config
 
##
## Include other configs
## 包括其他配置
 
# Includes.  Files included here will be handled as if they were
# inlined in this configuration file.
# 包括。 此处包含的文件将被处理,就好像它们在此配置文件中内联一样。
#include: include1.yaml
#include: include2.yaml

布施恩德可便相知重

微信扫一扫打赏

支付宝扫一扫打赏

×

给我留言