Project

General

Profile

Support #3119 » suricata.yaml

suricata.yaml - Srijan Nandi, 08/31/2019 06:17 AM

 
%YAML 1.1
---

vars:
address-groups:
HOME_NET: "any"

EXTERNAL_NET: "any"

HTTP_SERVERS: "$HOME_NET"
SMTP_SERVERS: "$HOME_NET"
SQL_SERVERS: "$HOME_NET"
DNS_SERVERS: "$HOME_NET"
TELNET_SERVERS: "$HOME_NET"
AIM_SERVERS: "$EXTERNAL_NET"
DC_SERVERS: "$HOME_NET"
DNP3_SERVER: "$HOME_NET"
DNP3_CLIENT: "$HOME_NET"
MODBUS_CLIENT: "$HOME_NET"
MODBUS_SERVER: "$HOME_NET"
ENIP_CLIENT: "$HOME_NET"
ENIP_SERVER: "$HOME_NET"

port-groups:
HTTP_PORTS: "80"
SHELLCODE_PORTS: "!80"
ORACLE_PORTS: 1521
SSH_PORTS: 22
DNP3_PORTS: 20000
MODBUS_PORTS: 502
FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
FTP_PORTS: 21

default-log-dir: /var/log/suricata/

# global stats configuration
stats:
enabled: yes
interval: 8
#decoder-events: true
decoder-events-prefix: "decoder.event"
#stream-events: false

outputs:
- stats:
enabled: yes
filename: stats.log
append: yes # append to file (yes) or overwrite it (no)
totals: yes # stats for all threads merged together
threads: no # per thread stats

logging:
default-log-level: notice
default-output-filter:
outputs:
- console:
enabled: yes
- file:
enabled: yes
level: info
filename: /var/log/suricata/suricata.log

# Linux high speed capture support
af-packet:
- interface: enp94s0f0
threads: 20
defrag: no
cluster-type: cluster_flow
xdp-mode: driver
xdp-filter-file: /etc/suricata/ebpf/xdp_filter.bpf
bypass: yes
cluster-id: 98
copy-mode: ips
use-mmap: yes
ring-size: 500000
buffer-size: 5368709120
rollover: no
use-emergency-flush: yes
copy-iface: enp94s0f1
- interface: enp94s0f1
threads: 20
cluster-id: 97
defrag: no
cluster-type: cluster_flow
xdp-mode: driver
xdp-filter-file: /etc/suricata/ebpf/xdp_filter.bpf
bypass: yes
copy-mode: ips
use-mmap: yes
ring-size: 500000
buffer-size: 5368709120
rollover: no
use-emergency-flush: yes
copy-iface: enp94s0f0

## Step 5: App Layer Protocol Configuration

app-layer:
protocols:
krb5:
enabled: yes
ikev2:
enabled: yes
tls:
enabled: yes
detection-ports:
dp: 443
ja3-fingerprints: no
encryption-handling: default

dcerpc:
enabled: yes
ftp:
enabled: yes
# memcap: 64mb
ssh:
enabled: yes
smtp:
enabled: yes
mime:
decode-mime: yes
decode-base64: yes
decode-quoted-printable: yes
header-value-depth: 2000
extract-urls: yes
body-md5: no
# Configure inspected-tracker for file_data keyword
inspected-tracker:
content-limit: 100000
content-inspect-min-size: 32768
content-inspect-window: 4096
imap:
enabled: detection-only
msn:
enabled: detection-only
smb:
enabled: yes
detection-ports:
dp: 139, 445
#stream-depth: 0
nfs:
enabled: yes
tftp:
enabled: yes
dns:
#global-memcap: 16mb
#state-memcap: 512kb
#request-flood: 500
tcp:
enabled: yes
detection-ports:
dp: 53
udp:
enabled: yes
detection-ports:
dp: 53
http:
enabled: yes
# memcap: 64mb
libhtp:
default-config:
personality: IDS
request-body-limit: 200kb
response-body-limit: 200kb
request-body-minimal-inspect-size: 32kb
request-body-inspect-window: 4kb
response-body-minimal-inspect-size: 40kb
response-body-inspect-window: 16kb
response-body-decompress-layer-limit: 2
http-body-inline: auto
swf-decompression:
enabled: yes
type: both
compress-depth: 0
decompress-depth: 0
double-decode-path: no
double-decode-query: no

server-config:
modbus:
enabled: no
detection-ports:
dp: 502
stream-depth: 0

# DNP3
dnp3:
enabled: no
detection-ports:
dp: 20000

# Note: parser depends on Rust support
ntp:
enabled: yes

dhcp:
enabled: yes

# Limit for the maximum number of asn1 frames to decode (default 256)
asn1-max-frames: 256

## Advanced settings below

coredump:
max-dump: unlimited
host-mode: auto
max-pending-packets: 1024
runmode: workers
#autofp-scheduler: active-packets
#default-packet-size: 1514
unix-command:
enabled: auto

legacy:
uricontent: enabled

action-order:
- pass
- drop
- reject
- alert


engine-analysis:
rules-fast-pattern: yes
rules: yes

#recursion and match limits for PCRE where supported
pcre:
match-limit: 3500
match-limit-recursion: 1500


host-os-policy:
windows: [0.0.0.0/0]
bsd: []
bsd-right: []
old-linux: []
linux: []
old-solaris: []
solaris: []
hpux10: []
hpux11: []
irix: []
macos: []
vista: []
windows2k3: []

# Defrag settings:

defrag:
memcap: 1gb
hash-size: 65536
trackers: 65535 # number of defragmented flows to follow
max-frags: 1000000 # number of fragments to keep (higher than trackers)
prealloc: yes
timeout: 30

flow:
memcap: 1gb
hash-size: 65536
prealloc: 1000000
emergency-recovery: 30
prune-flows: 5
managers: 2 # default to one flow manager
recyclers: 2 # default to one flow recycler thread
vlan:
use-for-tracking: true

flow-timeouts:
default:
new: 10 #10
established: 100 #100
closed: 0
bypassed: 50 #50
emergency-new: 5
emergency-established: 50 #50
emergency-closed: 0
emergency-bypassed: 10
tcp:
new: 10 #10
established: 100 #100
closed: 5 #5
bypassed: 50 #50
emergency-new: 2
emergency-established: 50 #50
emergency-closed: 5 #5
emergency-bypassed: 10
udp:
new: 10 #10
established: 100 #100
bypassed: 50 #50
emergency-new: 2
emergency-established: 50 #50
emergency-bypassed: 10
icmp:
new: 10 #10
established: 100 #100
bypassed: 50 #50
emergency-new: 2
emergency-established: 50 #50
emergency-bypassed: 10
stream:
#memcap: 12gb
#checksum-validation: no # reject wrong csums
#inline: yes # auto will use inline mode in IPS mode, yes or no set it statically
#prealloc-sessions: 1000000
#bypass: yes
#midstream: false # do not allow midstream session pickups
#async-oneside: false # do not enable async stream handling
#drop-invalid: no # drop invalid packets
#reassembly:
#memcap: 18gb
#depth: 1mb # reassemble 1mb into a stream
#toserver-chunk-size: 2560
#toclient-chunk-size: 2560
#randomize-chunk-size: yes
#randomize-chunk-range: 10
memcap: 2gb
checksum-validation: yes # reject wrong csums
inline: auto # auto will use inline mode in IPS mode, yes or no set it statically
reassembly:
memcap: 4gb
depth: 1mb # reassemble 1mb into a stream
toserver-chunk-size: 2560
toclient-chunk-size: 2560
randomize-chunk-size: yes

host:
hash-size: 4096
prealloc: 1000
memcap: 32mb

# Decoder settings

decoder:
teredo:
enabled: true

detect:
profile: custom
custom-values:
toclient-groups: 300
toserver-groups: 300
toclient-sp-groups: 300
toclient-dp-groups: 300
toserver-src-groups: 300
toserver-dst-groups: 5400
toserver-sp-groups: 300
toserver-dp-groups: 350
sgh-mpm-context: full
inspection-recursion-limit: 3000

prefilter:
default: mpm

grouping:
#tcp-whitelist: 53, 80, 139, 443, 445, 1433, 3306, 3389, 6666, 6667, 8080
#udp-whitelist: 53, 135, 5060

profiling:
#inspect-logging-threshold: 200
grouping:
dump-to-disk: false
include-rules: false # very verbose
include-mpm-stats: false

mpm-algo: hs

spm-algo: hs

threading:
set-cpu-affinity: yes
#
cpu-affinity:
- management-cpu-set:
cpu: [ 0,1,2,3,4,5 ] # include only these CPUs in affinity settings
mode: "balanced"
prio:
default: "high"
- worker-cpu-set:
cpu: [ "20-39","60-79" ]
mode: "exclusive"
prio:
default: "high"

detect-thread-ratio: 1.0

luajit:
states: 128

default-rule-path: /etc/suricata/rules
rule-files:
- custom.rules

classification-file: /etc/suricata/classification.config
reference-config-file: /etc/suricata/reference.config
threshold-file: /etc/suricata/threshold.config
(6-6/39)