Project

General

Profile

Actions

Bug #6565

open

coverity: new issues after updating to 2023.6.2

Added by Victor Julien about 1 year ago.

Status:
New
Priority:
Normal
Assignee:
Target version:
Affected Versions:
Effort:
Difficulty:
Label:

Description

** CID 1554240:  Data race undermines locking  (LOCK_EVASION)
/src/host.c: 353 in HostCleanup()

________________________________________________________________________________________________________
*** CID 1554240:  Data race undermines locking  (LOCK_EVASION)
/src/host.c: 353 in HostCleanup()
347     
348         if (host_hash != NULL) {
349             for (u = 0; u < host_config.hash_size; u++) {
350                 h = host_hash[u].head;
351                 HostHashRow *hb = &host_hash[u];
352                 HRLOCK_LOCK(hb);
>>>     CID 1554240:  Data race undermines locking  (LOCK_EVASION)
>>>     Thread2 checks "head", reading it after Thread1 assigns to "head" but before some of the correlated field assignments can occur. It sees the condition "h" as being false. It continues on before the critical section has completed, and can read data changed by that critical section while it is in an inconsistent state.
353                 while (h) {
354                     if ((SC_ATOMIC_GET(h->use_cnt) > 0) && (h->iprep != NULL)) {
355                         /* iprep is attached to host only clear local storage */
356                         HostFreeStorage(h);
357                         h = h->hnext;
358                     } else {

** CID 1554239:  Concurrent data access violations  (MISSING_LOCK)
/src/util-var-name.c: 324 in VarNameStoreLookupByName()

________________________________________________________________________________________________________
*** CID 1554239:  Concurrent data access violations  (MISSING_LOCK)
/src/util-var-name.c: 324 in VarNameStoreLookupByName()
318         return name;
319     }
320     
321     /** \brief find name for id+type at packet time. */
322     uint32_t VarNameStoreLookupByName(const char *name, const enum VarTypes type)
323     {
>>>     CID 1554239:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "active_sc_atomic__" without holding lock "base_lock". Elsewhere, "active_sc_atomic__" is written to with "base_lock" held 1 out of 1 times.
324         const VarNameStore *current = SC_ATOMIC_GET(active);
325         if (current) {
326             VariableName lookup = { .name = (char *)name, .type = type };
327             const VariableName *found = HashListTableLookup(current->names, (void *)&lookup, 0);
328             if (found) {
329                 return found->id;

** CID 1554238:  Program hangs  (BAD_CHECK_OF_WAIT_COND)
/src/counters.c: 501 in StatsWakeupThread()

________________________________________________________________________________________________________
*** CID 1554238:  Program hangs  (BAD_CHECK_OF_WAIT_COND)
/src/counters.c: 501 in StatsWakeupThread()
495             struct timespec cond_time = FROM_TIMEVAL(cur_timev);
496             cond_time.tv_sec += STATS_WUT_TTS;
497     
498             /* wait for the set time, or until we are woken up by
499              * the shutdown procedure */
500             SCCtrlMutexLock(tv_local->ctrl_mutex);
>>>     CID 1554238:  Program hangs  (BAD_CHECK_OF_WAIT_COND)
>>>     The wait condition prompting the wait upon "ThreadVars_.ctrl_mutex" is not checked correctly. This code can wait for a condition that has already been satisfied, which can cause a never-ending wait.
501             SCCtrlCondTimedwait(tv_local->ctrl_cond, tv_local->ctrl_mutex, &cond_time);
502             SCCtrlMutexUnlock(tv_local->ctrl_mutex);
503     
504             SCMutexLock(&tv_root_lock);
505             ThreadVars *tv = tv_root[TVT_PPT];
506             while (tv != NULL) {

** CID 1554237:  Resource leaks  (RESOURCE_LEAK)
/src/detect-xbits.c: 375 in DetectXbitSetup()

________________________________________________________________________________________________________
*** CID 1554237:  Resource leaks  (RESOURCE_LEAK)
/src/detect-xbits.c: 375 in DetectXbitSetup()
369                             DETECT_SM_LIST_POSTMATCH) == NULL) {
370                     goto error;
371                 }
372                 break;
373         }
374     
>>>     CID 1554237:  Resource leaks  (RESOURCE_LEAK)
>>>     Variable "cd" going out of scope leaks the storage it points to.
375         return 0;
376     
377     error:
378         if (cd != NULL)
379             SCFree(cd);
380         return -1;

** CID 1554236:  Program hangs  (BAD_CHECK_OF_WAIT_COND)
/src/counters.c: 431 in StatsMgmtThread()

________________________________________________________________________________________________________
*** CID 1554236:  Program hangs  (BAD_CHECK_OF_WAIT_COND)
/src/counters.c: 431 in StatsMgmtThread()
425             struct timespec cond_time = FROM_TIMEVAL(cur_timev);
426             cond_time.tv_sec += (stats_tts);
427     
428             /* wait for the set time, or until we are woken up by
429              * the shutdown procedure */
430             SCCtrlMutexLock(tv_local->ctrl_mutex);
>>>     CID 1554236:  Program hangs  (BAD_CHECK_OF_WAIT_COND)
>>>     The wait condition prompting the wait upon "ThreadVars_.ctrl_mutex" is not checked correctly. This code can wait for a condition that has already been satisfied, which can cause a never-ending wait.
431             SCCtrlCondTimedwait(tv_local->ctrl_cond, tv_local->ctrl_mutex, &cond_time);
432             SCCtrlMutexUnlock(tv_local->ctrl_mutex);
433     
434             SCMutexLock(&stats_table_mutex);
435             StatsOutput(tv_local);
436             SCMutexUnlock(&stats_table_mutex);

** CID 1554235:  Concurrent data access violations  (MISSING_LOCK)
/src/defrag-hash.c: 280 in DefragInitConfig()

________________________________________________________________________________________________________
*** CID 1554235:  Concurrent data access violations  (MISSING_LOCK)
/src/defrag-hash.c: 280 in DefragInitConfig()
274                         SCLogError("preallocating defrag failed: %s", strerror(errno));
275                         exit(EXIT_FAILURE);
276                     }
277                     DefragTrackerEnqueue(&defragtracker_spare_q,h);
278                 }
279                 if (!quiet) {
>>>     CID 1554235:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "defragtracker_spare_q.len" without holding lock "DefragTrackerQueue_.m". Elsewhere, "DefragTrackerQueue_.len" is written to with "DefragTrackerQueue_.m" held 2 out of 2 times.
280                     SCLogConfig("preallocated %" PRIu32 " defrag trackers of size %" PRIuMAX "",
281                             defragtracker_spare_q.len, (uintmax_t)sizeof(DefragTracker));
282                 }
283             }
284         }
285     

** CID 1554234:  Concurrent data access violations  (BAD_CHECK_OF_WAIT_COND)
/src/tmqh-flow.c: 109 in TmqhInputFlow()

________________________________________________________________________________________________________
*** CID 1554234:  Concurrent data access violations  (BAD_CHECK_OF_WAIT_COND)
/src/tmqh-flow.c: 109 in TmqhInputFlow()
103     
104         StatsSyncCountersIfSignalled(tv);
105     
106         SCMutexLock(&q->mutex_q);
107         if (q->len == 0) {
108             /* if we have no packets in queue, wait... */
>>>     CID 1554234:  Concurrent data access violations  (BAD_CHECK_OF_WAIT_COND)
>>>     The wait condition prompting the wait upon "PacketQueue_.mutex_q" is not checked correctly. If a spurious wakeup occurs, the thread could continue its task before the wait condition is satisfied.
109             SCCondWait(&q->cond_q, &q->mutex_q);
110         }
111     
112         if (q->len > 0) {
113             Packet *p = PacketDequeue(q);
114             SCMutexUnlock(&q->mutex_q);

** CID 1554233:  Null pointer dereferences  (REVERSE_INULL)
/src/detect-xbits.c: 378 in DetectXbitSetup()

________________________________________________________________________________________________________
*** CID 1554233:  Null pointer dereferences  (REVERSE_INULL)
/src/detect-xbits.c: 378 in DetectXbitSetup()
372                 break;
373         }
374     
375         return 0;
376     
377     error:
>>>     CID 1554233:  Null pointer dereferences  (REVERSE_INULL)
>>>     Null-checking "cd" suggests that it may be null, but it has already been dereferenced on all paths leading to the check.
378         if (cd != NULL)
379             SCFree(cd);
380         return -1;
381     }
382     
383     static void DetectXbitFree (DetectEngineCtx *de_ctx, void *ptr)

** CID 1554232:  Data race undermines locking  (LOCK_EVASION)
/src/ippair.c: 351 in IPPairCleanup()

________________________________________________________________________________________________________
*** CID 1554232:  Data race undermines locking  (LOCK_EVASION)
/src/ippair.c: 351 in IPPairCleanup()
345     
346         if (ippair_hash != NULL) {
347             for (u = 0; u < ippair_config.hash_size; u++) {
348                 h = ippair_hash[u].head;
349                 IPPairHashRow *hb = &ippair_hash[u];
350                 HRLOCK_LOCK(hb);
>>>     CID 1554232:  Data race undermines locking  (LOCK_EVASION)
>>>     Thread2 checks "head", reading it after Thread1 assigns to "head" but before some of the correlated field assignments can occur. It sees the condition "h" as being false. It continues on before the critical section has completed, and can read data changed by that critical section while it is in an inconsistent state.
351                 while (h) {
352                     if ((SC_ATOMIC_GET(h->use_cnt) > 0)) {
353                         /* iprep is attached to ippair only clear local storage */
354                         IPPairFreeStorage(h);
355                         h = h->hnext;
356                     } else {

** CID 1554231:    (LOCK_EVASION)
/src/detect-mark.c: 245 in DetectMarkPacket()
/src/detect-mark.c: 245 in DetectMarkPacket()

________________________________________________________________________________________________________
*** CID 1554231:    (LOCK_EVASION)
/src/detect-mark.c: 245 in DetectMarkPacket()
239                  * are fine. */
240                 if (p->flags & PKT_REBUILT_FRAGMENT) {
241                     Packet *tp = p->root ? p->root : p;
242                     SCSpinLock(&tp->persistent.tunnel_lock);
243                     tp->nfq_v.mark = (nf_data->mark & nf_data->mask)
244                         | (tp->nfq_v.mark & ~(nf_data->mask));
>>>     CID 1554231:    (LOCK_EVASION)
>>>     Thread1 sets "flags" to a new value. Now the two threads have an inconsistent view of "flags" and updates to fields correlated with "flags" may be lost.
245                     tp->flags |= PKT_MARK_MODIFIED;
246                     SCSpinUnlock(&tp->persistent.tunnel_lock);
247                 }
248             }
249         }
250     #endif
/src/detect-mark.c: 245 in DetectMarkPacket()
239                  * are fine. */
240                 if (p->flags & PKT_REBUILT_FRAGMENT) {
241                     Packet *tp = p->root ? p->root : p;
242                     SCSpinLock(&tp->persistent.tunnel_lock);
243                     tp->nfq_v.mark = (nf_data->mark & nf_data->mask)
244                         | (tp->nfq_v.mark & ~(nf_data->mask));
>>>     CID 1554231:    (LOCK_EVASION)
>>>     Thread1 sets "flags" to a new value. Now the two threads have an inconsistent view of "flags" and updates to fields correlated with "flags" may be lost.
245                     tp->flags |= PKT_MARK_MODIFIED;
246                     SCSpinUnlock(&tp->persistent.tunnel_lock);
247                 }
248             }
249         }
250     #endif

** CID 1554230:  Concurrent data access violations  (BAD_CHECK_OF_WAIT_COND)
/src/tmqh-simple.c: 57 in TmqhInputSimple()

________________________________________________________________________________________________________
*** CID 1554230:  Concurrent data access violations  (BAD_CHECK_OF_WAIT_COND)
/src/tmqh-simple.c: 57 in TmqhInputSimple()
51         StatsSyncCountersIfSignalled(t);
52     
53         SCMutexLock(&q->mutex_q);
54     
55         if (q->len == 0) {
56             /* if we have no packets in queue, wait... */
>>>     CID 1554230:  Concurrent data access violations  (BAD_CHECK_OF_WAIT_COND)
>>>     The wait condition prompting the wait upon "PacketQueue_.mutex_q" is not checked correctly. If a spurious wakeup occurs, the thread could continue its task before the wait condition is satisfied.
57             SCCondWait(&q->cond_q, &q->mutex_q);
58         }
59     
60         if (q->len > 0) {
61             Packet *p = PacketDequeue(q);
62             SCMutexUnlock(&q->mutex_q);

** CID 1554229:  Concurrent data access violations  (MISSING_LOCK)
/src/util-pool-thread.c: 218 in PoolThreadReturnRaw()

________________________________________________________________________________________________________
*** CID 1554229:  Concurrent data access violations  (MISSING_LOCK)
/src/util-pool-thread.c: 218 in PoolThreadReturnRaw()
212     }
213     
214     void PoolThreadReturnRaw(PoolThread *pt, PoolThreadId id, void *data)
215     {
216         BUG_ON(pt == NULL || id >= pt->size);
217         PoolThreadElement *e = &pt->array[id];
>>>     CID 1554229:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "e->pool" without holding lock "PoolThreadElement_.lock". Elsewhere, "PoolThreadElement_.pool" is written to with "PoolThreadElement_.lock" held 5 out of 6 times (2 of these accesses strongly imply that it is necessary).
218         PoolReturn(e->pool, data);
219     }
220     
221     void PoolThreadUnlock(PoolThread *pt, PoolThreadId id)
222     {
223         BUG_ON(pt == NULL || id >= pt->size);

** CID 1554228:  Program hangs  (BAD_CHECK_OF_WAIT_COND)
/src/tmqh-packetpool.c: 76 in PacketPoolWait()

________________________________________________________________________________________________________
*** CID 1554228:  Program hangs  (BAD_CHECK_OF_WAIT_COND)
/src/tmqh-packetpool.c: 76 in PacketPoolWait()
70     {
71         PktPool *my_pool = GetThreadPacketPool();
72     
73         if (PacketPoolIsEmpty(my_pool)) {
74             SCMutexLock(&my_pool->return_stack.mutex);
75             SC_ATOMIC_ADD(my_pool->return_stack.sync_now, 1);
>>>     CID 1554228:  Program hangs  (BAD_CHECK_OF_WAIT_COND)
>>>     The wait condition prompting the wait upon "PktPoolLockedStack_.mutex" is not checked correctly. This code can wait for a condition that has already been satisfied, which can cause a never-ending wait.
76             SCCondWait(&my_pool->return_stack.cond, &my_pool->return_stack.mutex);
77             SCMutexUnlock(&my_pool->return_stack.mutex);
78         }
79     
80         while(PacketPoolIsEmpty(my_pool))
81             cc_barrier();

** CID 1554227:  Concurrent data access violations  (MISSING_LOCK)
/src/tm-threads.c: 2292 in TmThreadsInjectFlowById()

________________________________________________________________________________________________________
*** CID 1554227:  Concurrent data access violations  (MISSING_LOCK)
/src/tm-threads.c: 2292 in TmThreadsInjectFlowById()
2286     void TmThreadsInjectFlowById(Flow *f, const int id)
2287     {
2288         BUG_ON(id <= 0 || id > (int)thread_store.threads_size);
2289     
2290         int idx = id - 1;
2291     
>>>     CID 1554227:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "thread_store.threads" without holding lock "thread_store_lock". Elsewhere, "Threads_.threads" is written to with "thread_store_lock" held 3 out of 3 times.
2292         Thread *t = &thread_store.threads[idx];
2293         ThreadVars *tv = t->tv;
2294     
2295         BUG_ON(tv == NULL || tv->flow_queue == NULL);
2296     
2297         FlowEnqueue(tv->flow_queue, f);

** CID 1554226:    (LOCK_EVASION)
/src/app-layer-htp-range.c: 211 in HttpRangeContainersTimeoutHash()
/src/app-layer-htp-range.c: 211 in HttpRangeContainersTimeoutHash()

________________________________________________________________________________________________________
*** CID 1554226:    (LOCK_EVASION)
/src/app-layer-htp-range.c: 211 in HttpRangeContainersTimeoutHash()
205                     /* remove from the hash */
206                     if (h->prev != NULL)
207                         h->prev->next = h->next;
208                     if (h->next != NULL)
209                         h->next->prev = h->prev;
210                     if (hb->head == h)
>>>     CID 1554226:    (LOCK_EVASION)
>>>     Thread1 sets "head" to a new value. Now the two threads have an inconsistent view of "head" and updates to fields of "head" or fields correlated with "head" may be lost.
211                         hb->head = h->next;
212                     if (hb->tail == h)
213                         hb->tail = h->prev;
214                     h->next = NULL;
215                     h->prev = NULL;
216                     // we should log the timed out file somehow...
/src/app-layer-htp-range.c: 211 in HttpRangeContainersTimeoutHash()
205                     /* remove from the hash */
206                     if (h->prev != NULL)
207                         h->prev->next = h->next;
208                     if (h->next != NULL)
209                         h->next->prev = h->prev;
210                     if (hb->head == h)
>>>     CID 1554226:    (LOCK_EVASION)
>>>     Thread1 sets "head" to a new value. Now the two threads have an inconsistent view of "head" and updates to fields of "head" or fields correlated with "head" may be lost.
211                         hb->head = h->next;
212                     if (hb->tail == h)
213                         hb->tail = h->prev;
214                     h->next = NULL;
215                     h->prev = NULL;
216                     // we should log the timed out file somehow...

** CID 1554225:  Concurrent data access violations  (MISSING_LOCK)
/src/detect-engine.c: 3806 in DetectEngineMultiTenantEnabled()

________________________________________________________________________________________________________
*** CID 1554225:  Concurrent data access violations  (MISSING_LOCK)
/src/detect-engine.c: 3806 in DetectEngineMultiTenantEnabled()
3800     }
3801     
3802     /** TODO locking? Not needed if this is a one time setting at startup */
3803     int DetectEngineMultiTenantEnabled(void)
3804     {
3805         DetectEngineMasterCtx *master = &g_master_de_ctx;
>>>     CID 1554225:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "master->multi_tenant_enabled" without holding lock "DetectEngineMasterCtx_.lock". Elsewhere, "DetectEngineMasterCtx_.multi_tenant_enabled" is written to with "DetectEngineMasterCtx_.lock" held 1 out of 1 times (1 of these accesses strongly imply that it is necessary).
3806         return (master->multi_tenant_enabled);
3807     }
3808     
3809     /** \internal
3810      *  \brief load a tenant from a yaml file
3811      *

** CID 1554224:  Concurrent data access violations  (MISSING_LOCK)
/src/util-var-name.c: 327 in VarNameStoreLookupByName()

________________________________________________________________________________________________________
*** CID 1554224:  Concurrent data access violations  (MISSING_LOCK)
/src/util-var-name.c: 327 in VarNameStoreLookupByName()
321     /** \brief find name for id+type at packet time. */
322     uint32_t VarNameStoreLookupByName(const char *name, const enum VarTypes type)
323     {
324         const VarNameStore *current = SC_ATOMIC_GET(active);
325         if (current) {
326             VariableName lookup = { .name = (char *)name, .type = type };
>>>     CID 1554224:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "current->names" without holding lock "base_lock". Elsewhere, "VarNameStore_.names" is written to with "base_lock" held 3 out of 3 times (1 of these accesses strongly imply that it is necessary).
327             const VariableName *found = HashListTableLookup(current->names, (void *)&lookup, 0);
328             if (found) {
329                 return found->id;
330             }
331         }
332     

** CID 1554223:  Data race undermines locking  (LOCK_EVASION)
/src/flow-queue.c: 126 in FlowQueueAppendPrivate()

________________________________________________________________________________________________________
*** CID 1554223:  Data race undermines locking  (LOCK_EVASION)
/src/flow-queue.c: 126 in FlowQueueAppendPrivate()
120     {
121         if (fqc->top == NULL)
122             return;
123     
124         FQLOCK_LOCK(fq);
125         if (fq->qbot == NULL) {
>>>     CID 1554223:  Data race undermines locking  (LOCK_EVASION)
>>>     Thread1 sets "top" to a new value. Now the two threads have an inconsistent view of "top" and updates to fields of "top" or fields correlated with "top" may be lost.
126             fq->qtop = fqc->top;
127             fq->qbot = fqc->bot;
128             fq->qlen = fqc->len;
129         } else {
130             fq->qbot->next = fqc->top;
131             fq->qbot = fqc->bot;

** CID 1554222:    (LOCK_EVASION)
/src/tmqh-packetpool.c: 310 in TmqhOutputPacketpool()
/src/tmqh-packetpool.c: 397 in TmqhOutputPacketpool()

________________________________________________________________________________________________________
*** CID 1554222:    (LOCK_EVASION)
/src/tmqh-packetpool.c: 310 in TmqhOutputPacketpool()
304     {
305         bool proot = false;
306     
307         SCEnter();
308         SCLogDebug("Packet %p, p->root %p, alloced %s", p, p->root, BOOL2STR(p->pool == NULL));
309     
>>>     CID 1554222:    (LOCK_EVASION)
>>>     Thread2 checks "flags", reading it after Thread1 assigns to "flags" but before some of the correlated field assignments can occur. It sees the condition "p->flags & 0x2000UL" as being false. It continues on before the critical section has completed, and can read data changed by that critical section while it is in an inconsistent state.
310         if (IS_TUNNEL_PKT(p)) {
311             SCLogDebug("Packet %p is a tunnel packet: %s",
312                 p,p->root ? "upper layer" : "tunnel root");
313     
314             /* get a lock to access root packet fields */
315             SCSpinlock *lock = p->root ? &p->root->persistent.tunnel_lock : &p->persistent.tunnel_lock;
/src/tmqh-packetpool.c: 397 in TmqhOutputPacketpool()
391         /* we're done with the tunnel root now as well */
392         if (proot == true) {
393             SCLogDebug("getting rid of root pkt... alloc'd %s", BOOL2STR(p->root->pool == NULL));
394     
395             PacketReleaseRefs(p->root);
396             p->root->ReleasePacket(p->root);
>>>     CID 1554222:    (LOCK_EVASION)
>>>     Thread1 sets "root" to a new value. Now the two threads have an inconsistent view of "root" and updates to fields of "root" or fields correlated with "root" may be lost.
397             p->root = NULL;
398         }
399     
400         PACKET_PROFILING_END(p);
401     
402         PacketReleaseRefs(p);

** CID 1554221:  Concurrent data access violations  (MISSING_LOCK)
/src/detect-engine.c: 3103 in DetectEngineThreadCtxInitForMT()

________________________________________________________________________________________________________
*** CID 1554221:  Concurrent data access violations  (MISSING_LOCK)
/src/detect-engine.c: 3103 in DetectEngineThreadCtxInitForMT()
3097         uint32_t map_array_size = 0;
3098         uint32_t map_cnt = 0;
3099         uint32_t max_tenant_id = 0;
3100         DetectEngineCtx *list = master->list;
3101         HashTable *mt_det_ctxs_hash = NULL;
3102     
>>>     CID 1554221:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "master->tenant_selector" without holding lock "DetectEngineMasterCtx_.lock". Elsewhere, "DetectEngineMasterCtx_.tenant_selector" is written to with "DetectEngineMasterCtx_.lock" held 4 out of 4 times.
3103         if (master->tenant_selector == TENANT_SELECTOR_UNKNOWN) {
3104             SCLogError("no tenant selector set: " 
3105                        "set using multi-detect.selector");
3106             return TM_ECODE_FAILED;
3107         }
3108     

No data to display

Actions

Also available in: Atom PDF