Actions
Bug #7365
closedflow-manager: multi Flow Manager memory leak problem
Affected Versions:
Effort:
Difficulty:
Label:
Description
Problem¶
When I set flow managers to 2, I found suricata sometimes never output the flow log.
Configuration¶
%YAML 1.1
---
suricata-version: 8.0
vars:
address-groups:
HOME_NET: "any"
EXTERNAL_NET: "any"
HTTP_SERVERS: "any"
SMTP_SERVERS: "any"
SQL_SERVERS: "any"
DNS_SERVERS: "any"
TELNET_SERVERS: "any"
AIM_SERVERS: "any"
DC_SERVERS: "any"
DNP3_SERVER: "any"
DNP3_CLIENT: "any"
MODBUS_CLIENT: "any"
MODBUS_SERVER: "any"
ENIP_CLIENT: "any"
ENIP_SERVER: "any"
port-groups:
HTTP_PORTS: "80"
SHELLCODE_PORTS: "!80"
ORACLE_PORTS: 1521
SSH_PORTS: 22
DNP3_PORTS: 20000
MODBUS_PORTS: 502
FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
FTP_PORTS: 21
GENEVE_PORTS: 6081
VXLAN_PORTS: 4789
TEREDO_PORTS: 2544
SIP_PORTS: "[5060, 5061]"
tap-mode: 2
default-log-dir: /var/log/suricata
stats:
enabled: yes
interval: 10
exception-policy: {}
outputs:
- fast:
enabled: no
filename: fast.log
append: yes
filetype: regular
- eve-log:
enabled: yes
filetype: regular
filename: eve.json
threaded: false
pcap-file: false
community-id: false
community-id-seed: 0
xff:
enabled: no
mode: extra-data
deployment: reverse
header: X-Forwarded-For
redis: null
types:
- alert:
tagged-packets: yes
metadata: yes
http-body-printable: yes
http-body: yes
payload: no
- frame:
enabled: no
- anomaly:
enabled: yes
- http:
extended: yes
- dns:
enabled:
- tls:
extended: yes
- files:
fore-magic: no
- smtp: {}
- websocket: {}
- ftp: {}
- rdp: {}
- nfs: {}
- smb: {}
- tftp: {}
- ike: {}
- dcerpc: {}
- krb5: {}
- bittorrent-dht: {}
- snmp: {}
- rfb: {}
- sip: {}
- quic: {}
- ldap: {}
- arp:
enabled: no
- dhcp:
enabled: yes
extended: no
- ssh: {}
- mqtt: {}
- http2: {}
- doh2: {}
- flow: {}
- http-log:
enabled: no
filename: http.log
append: yes
- tls-log:
enabled: no
filename: tls.log
append: yes
- tls-store:
enabled: no
- pcap-log:
enabled: no
filename: log.pcap
limit: "1000 MiB"
max-files: 2000
compression: none
mode: normal
use-stream-depth: no
honor-pass-rules: no
- alert-debug:
enabled: yes
filename: alert-debug.log
append: yes
- stats:
enabled: yes
filename: stats.log
append: no
totals: yes
threads: yes
- syslog:
enabled: no
facility: local5
- file-store:
version: 2
enabled: no
xff:
enabled: no
mode: extra-data
deployment: reverse
header: X-Forwarded-For
- tcp-data:
enabled: no
type: file
filename: tcp-data.log
- http-body-data:
enabled: no
type: file
filename: http-data.log
- lua:
enabled: no
scripts-dir: /etc/caracal/lua-outputs/
logging:
default-log-level: info
default-output-filter: {}
outputs:
- console:
enabled: yes
- file:
enabled: yes
level: info
filename: caracal.log
- syslog:
enabled: no
facility: local5
format: '[%i] <%d> -- '
af-packet:
- interface: docker0
cluster-id: 99
cluster-type: cluster_flow
defrag: yes
use-mmap: yes
tpacket-v3: yes
ring-size: 100000
block-size: 1048576
block-timeout: 10
buffer-size: 1048576
checksum-checks: no
use-emergency-flush: no
dpdk: null
pcap-file:
checksum-checks: auto
app-layer:
protocols:
telnet:
enabled: yes
rfb:
enabled: yes
detection-ports:
dp: "5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909"
mqtt:
enabled: yes
krb5:
enabled: yes
bittorrent-dht:
enabled: yes
snmp:
enabled: yes
ike:
enabled: yes
detection-ports:
dp: 500, 4500
sp: 500, 4500
tls:
enabled: yes
detection-ports:
dp: "443"
pgsql:
enabled: yes
stream-depth: 0
detection-ports:
dp: "30000:65535"
dcerpc:
enabled: yes
ftp:
enabled: yes
websocket: {}
rdp: {}
ssh:
enabled: yes
doh2:
enabled: yes
http2:
enabled: yes
max-streams: 4096
max-table-size: 65536
max-reassembly-size: 102400
smtp:
enabled: no
raw-extraction: no
mime:
decode-mime: yes
decode-base64: yes
decode-quoted-printable: yes
header-value-depth: 2000
extract-urls: yes
body-md5: no
inspected-tracker:
content-limit: 100000
content-inspect-min-size: 32768
content-inspect-window: 4096
imap:
enabled: detection-only
pop3:
enabled: detection-only
smb:
enabled: yes
detection-ports:
dp: "139, 445"
nfs:
enabled: yes
tftp:
enabled: yes
dns:
tcp:
enabled: yes
detection-ports:
dp: "53"
udp:
enabled: yes
detection-ports:
dp: "53"
http:
enabled: yes
libhtp:
default-config:
personality: IDS
request-body-limit: 100 KiB
response-body-limit: 100 KiB
request-body-minimal-inspect-size: 32 KiB
request-body-inspect-window: 4 KiB
response-body-minimal-inspect-size: 40 KiB
response-body-inspect-window: 16 KiB
response-body-decompress-layer-limit: 2
http-body-inline: auto
swf-decompression:
enabled: no
type: both
compress-depth: 100 KiB
decompress-depth: 100 KiB
double-decode-path: no
double-decode-query: no
server-config: {}
modbus:
enabled: no
detection-ports:
dp: "502"
stream-depth: 0
dnp3:
enabled: no
detection-ports:
dp: "20000"
enip:
enabled: no
detection-ports:
dp: "44818"
sp: "44818"
ntp:
enabled: yes
quic:
enabled: yes
dhcp:
enabled: yes
sip: {}
ldap:
enabled: yes
asn1-max-frames: 256
datasets:
defaults: {}
rules: {}
security:
limit-noproc: true
landlock:
enabled: no
directories:
read:
- /usr/local
- /etc/caracal
lua: {}
sensor-name: 1
coredump:
max-dump: unlimited
host-mode: auto
unix-command:
enabled: auto
legacy:
uricontent: enabled
exception-policy: auto
engine-analysis:
rules-fast-pattern: yes
rules: yes
pcre:
match-limit: 3500
match-limit-recursion: 1500
host-os-policy:
windows:
- 0.0.0.0/0
bsd: []
bsd-right: []
old-linux: []
linux: []
old-solaris: []
solaris: []
hpux10: []
hpux11: []
irix: []
macos: []
vista: []
windows2k3: []
defrag:
memcap: "32 MiB"
hash-size: 65536
trackers: 65536
max-frags: 65535
prealloc: yes
timeout: 60
flow:
memcap: "6 GiB"
hash-size: 65536
prealloc: 10000
emergency-recovery: 30
managers: 2
recylers: 2
vlan:
use-for-tracking: true
livedev:
use-for-tracking: true
flow-timeouts:
default:
new: 30
established: 60
closed: 0
bypassed: 100
emergency-new: 10
emergency-established: 10
emergency-closed: 0
emergency-bypassed: 50
tcp:
new: 60
established: 60
closed: 0
bypassed: 100
emergency-new: 5
emergency-established: 10
emergency-closed: 0
emergency-bypassed: 50
udp:
new: 30
established: 60
closed: 0
bypassed: 100
emergency-new: 10
emergency-established: 10
emergency-closed: 0
emergency-bypassed: 50
icmp:
new: 30
established: 300
closed: 0
bypassed: 100
emergency-new: 10
emergency-established: 100
emergency-closed: 0
emergency-bypassed: 50
stream:
memcap: "8 GiB"
checksum-validation: no
inline: no
reassembly:
memcap: "10 GiB"
depth: "512 MiB"
toserver-chunk-size: 2560
toclient-chunk-size: 2560
randomize-chunk-size: yes
host:
hash-size: 4096
prealloc: 1000
memcap: "32 MiB"
decoder:
teredo:
enabled: true
ports: $TEREDO_PORTS
vxlan:
enabled: true
ports: $VXLAN_PORTS
geneve:
enabled: true
ports: $GENEVE_PORTS
detect:
profile: medium
custom-values:
toclient-groups: 3
toserver-groups: 25
sgh-mpm-context: auto
inspection-recursion-limit: 3000
prefilter:
default: mpm
grouping: {}
thresholds:
hash-size: 16384
memcap: "16 MiB"
profiling:
grouping:
dump-to-disk: false
include-rules: false
include-mpm-stats: false
mpm-algo: hs
spm-algo: hs
threading:
set-cpu-affinity: yes
cpu-affinity:
- management-cpu-set:
cpu:
- "7"
- receive-cpu-set:
cpu:
- "0"
- worker-cpu-set:
cpu:
- all
mode: exclusive
prio:
low:
- 0
medium:
- 1-2
high:
- 3
default: medium
detect-thread-ratio: 1
profiling:
rules:
enabled: yes
filename: rule_perf.log
append: yes
limit: 10
json: yes
keywords:
enabled: yes
filename: keyword_perf.log
append: yes
prefilter:
enabled: yes
filename: prefilter_perf.log
append: yes
rulegroups:
enabled: yes
filename: rule_group_perf.log
append: yes
packets:
enabled: yes
filename: packet_stats.log
append: yes
csv:
enabled: no
filename: packet_stats.csv
locks:
enabled: no
filename: lock_stats.log
append: yes
pcap-log:
enabled: no
filename: pcaplog_stats.log
append: yes
Log¶
I add some log in flow-manager.c, I found flow manager instance 1 never step forward.
Info: unix-manager: unix socket '/var/run/suricata/suricata-command.socket' [UnixNew:unix-manager.c:136]
Info: flow-manager: instance: 0 hash 0:32768 slice starting at 0 with 3276 rows [FlowManager:flow-manager.c:857]
Info: flow-manager: instance: 1 hash 32768:65536 slice starting at 0 with 3276 rows [FlowManager:flow-manager.c:857]
Info: flow-manager: instance: 0 hash 0:32768 slice starting at 3276 with 3276 rows [FlowManager:flow-manager.c:857]
Info: flow-manager: instance: 1 hash 32768:65536 slice starting at 36044 with 3276 rows [FlowManager:flow-manager.c:857]
Notice: threads: Threads created -> W: 18 FM: 2 FR: 1 Engine started. [TmThreadWaitOnThreadRunning:tm-threads.c:1894]
Info: flow-manager: instance: 0 hash 0:32768 slice starting at 6552 with 3276 rows [FlowManager:flow-manager.c:857]
Info: flow-manager: instance: 1 hash 32768:65536 slice starting at 36044 with 3276 rows [FlowManager:flow-manager.c:857]
Info: flow-manager: instance: 1 hash 32768:65536 slice starting at 36044 with 3276 rows [FlowManager:flow-manager.c:857]
Info: flow-manager: instance: 0 hash 0:32768 slice starting at 9828 with 3276 rows [FlowManager:flow-manager.c:857]
Info: flow-manager: instance: 1 hash 32768:65536 slice starting at 36044 with 3276 rows [FlowManager:flow-manager.c:857]
Info: flow-manager: instance: 0 hash 0:32768 slice starting at 13104 with 3276 rows [FlowManager:flow-manager.c:857]
Info: flow-manager: instance: 1 hash 32768:65536 slice starting at 36044 with 3276 rows [FlowManager:flow-manager.c:857]
Info: flow-manager: instance: 0 hash 0:32768 slice starting at 16380 with 3276 rows [FlowManager:flow-manager.c:857]
^CNotice: caracal: Signal Received. Stopping engine. [SuricataMainLoop:caracal.c:2820]
Info: flow-manager: instance: 1 hash 32768:65536 slice starting at 36044 with 3276 rows [FlowManager:flow-manager.c:857]
Info: flow-manager: instance: 0 hash 0:32768 slice starting at 19656 with 3276 rows [FlowManager:flow-manager.c:857]
Updated by Victor Julien 19 days ago
- Subject changed from Multi Flow Manager memory leak problem to flow-manager: multi Flow Manager memory leak problem
- Status changed from New to In Progress
- Assignee changed from OISF Dev to Victor Julien
- Priority changed from Urgent to Normal
- Target version changed from TBD to 8.0.0-beta1
Thanks for your report QianKai Lin, I can confirm there is an issue. Looking into it.
Updated by Victor Julien 19 days ago
- Status changed from In Progress to In Review
- Label Needs backport to 7.0 added
Updated by Victor Julien 19 days ago
- Related to Bug #6499: tcp.active_sessions and flow.active count will never reduce when using trex added
Updated by Victor Julien 13 days ago
- Status changed from In Review to Resolved
Actions