Bug Summary

File:d/sched.c
Warning:line 1131, column 9
Duplicate code detected
Note:line 1133, column 9
Similar code here

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name sched.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/home/isvv/naviserver/nsd -resource-dir /usr/local/lib/clang/15.0.0 -D _FORTIFY_SOURCE=2 -D NDEBUG -D SYSTEM_MALLOC -I ../include -I /usr/include/tcl8.6 -D HAVE_CONFIG_H -internal-isystem /usr/local/lib/clang/15.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/11/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -std=c99 -fdebug-compilation-dir=/home/isvv/naviserver/nsd -ferror-limit 19 -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-checker alpha -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-07-23-130959-11103-1 -x c sched.c
1/*
2 * The contents of this file are subject to the Mozilla Public License
3 * Version 1.1 (the "License"); you may not use this file except in
4 * compliance with the License. You may obtain a copy of the License at
5 * http://mozilla.org/.
6 *
7 * Software distributed under the License is distributed on an "AS IS"
8 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
9 * the License for the specific language governing rights and limitations
10 * under the License.
11 *
12 * The Original Code is AOLserver Code and related documentation
13 * distributed by AOL.
14 *
15 * The Initial Developer of the Original Code is America Online,
16 * Inc. Portions created by AOL are Copyright (C) 1999 America Online,
17 * Inc. All Rights Reserved.
18 *
19 * Alternatively, the contents of this file may be used under the terms
20 * of the GNU General Public License (the "GPL"), in which case the
21 * provisions of GPL are applicable instead of those above. If you wish
22 * to allow use of your version of this file only under the terms of the
23 * GPL and not to allow others to use your version of this file under the
24 * License, indicate your decision by deleting the provisions above and
25 * replace them with the notice and other provisions required by the GPL.
26 * If you do not delete the provisions above, a recipient may use your
27 * version of this file under either the License or the GPL.
28 */
29
30/*
31 * sched.c --
32 *
33 * Support for the background task and scheduled procedure interfaces. The
34 * implementation of the priority queue based on a binary heap. A binary heap
35 * has the following characteristics:
36 *
37 * - Cost of insertion: O(log N)
38 * - Cost of deletion: O(log N)
39 * - Cost of change of key value: O(log N) (not used here)
40 * - Cost of smallest (largest) value: O(1)
41 *
42 * The binary heap code is based on:
43 *
44 * "Chapter 9. Priority Queues and Heapsort", Sedgewick "Algorithms
45 * in C, 3rd Edition", Addison-Wesley, 1998.
46 *
47 * https://algs4.cs.princeton.edu/24pq/
48 */
49
50#include "nsd.h"
51
52/*
53 * The following two defines can be used to turn on consistency checking and
54 * intense tracing of the scheduling/unscheduling of the commands.
55 *
56 * #define NS_SCHED_CONSISTENCY_CHECK
57 * #define NS_SCHED_TRACE_EVENTS
58 */
59
60/*
61 * The following structure defines a scheduled event.
62 */
63
64typedef struct Event {
65 struct Event *nextPtr;
66 Tcl_HashEntry *hPtr; /* Entry in event hash or NULL if deleted. */
67 int id; /* Unique event id. */
68 int qid; /* Current priority queue id. */
69 Ns_Time nextqueue; /* Next time to queue for run. */
70 Ns_Time lastqueue; /* Last time queued for run. */
71 Ns_Time laststart; /* Last time run started. */
72 Ns_Time lastend; /* Last time run finished. */
73 Ns_Time interval; /* Interval specification. */
74 Ns_Time scheduled; /* The scheduled time. */
75 Ns_SchedProc *proc; /* Procedure to execute. */
76 void *arg; /* Client data for procedure. */
77 Ns_SchedProc *deleteProc; /* Procedure to cleanup when done (if any). */
78 unsigned int flags; /* One or more of NS_SCHED_ONCE, NS_SCHED_THREAD,
79 * NS_SCHED_DAILY, or NS_SCHED_WEEKLY. */
80} Event;
81
82/*
83 * Local functions defined in this file.
84 */
85
86static Ns_ThreadProc SchedThread; /* Detached event firing thread. */
87static Ns_ThreadProc EventThread; /* Proc for NS_SCHED_THREAD events. */
88static Event *DeQueueEvent(int k); /* Remove event from heap. */
89static void FreeEvent(Event *ePtr) /* Free completed or cancelled event. */
90 NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1)));
91static void QueueEvent(Event *ePtr) /* Queue event on heap. */
92 NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1)));
93static void Exchange(int i, int j); /* Exchange elements in the global queue */
94static bool_Bool Larger(int j, int k); /* Function defining the sorting
95 criterium of the binary heap */
96
97
98/*
99 * Static variables defined in this file.
100 */
101
102static Tcl_HashTable eventsTable; /* Hash table of events. */
103static Ns_Mutex lock; /* Lock around heap and hash table. */
104static Ns_Cond schedcond; /* Condition to wakeup SchedThread. */
105static Ns_Cond eventcond; /* Condition to wakeup EventThread(s). */
106static Event **queue = NULL((void*)0); /* Heap priority queue (dynamically re-sized). */
107static Event *firstEventPtr = NULL((void*)0); /* Pointer to the first event */
108static int nqueue = 0; /* Number of events in queue. */
109static int maxqueue = 0; /* Max queue events (dynamically re-sized). */
110
111static int nThreads = 0; /* Total number of running threads */
112static int nIdleThreads = 0; /* Number of idle threads */
113
114static bool_Bool running = NS_FALSE0;
115static bool_Bool shutdownPending = NS_FALSE0;
116static Ns_Thread schedThread;
117
118
119/*
120 *----------------------------------------------------------------------
121 *
122 * Exchange --
123 *
124 * Helper function to exchange two events in the global queue,
125 * used in QueueEvent() and DeQueueEvent().
126 *
127 * Results:
128 * None.
129 *
130 * Side effects:
131 * Queue elements flipped.
132 *
133 *----------------------------------------------------------------------
134 */
135
136static void Exchange(int i, int j) {
137 Event *tmp = queue[i];
138
139 queue[i] = queue[j];
140 queue[j] = tmp;
141 queue[i]->qid = i;
142 queue[j]->qid = j;
143}
144
145
146/*
147 *----------------------------------------------------------------------
148 *
149 * NsInitSched --
150 *
151 * Initialize scheduler API.
152 *
153 * Results:
154 * None.
155 *
156 * Side effects:
157 * None.
158 *
159 *----------------------------------------------------------------------
160 */
161
162void
163NsInitSched(void)
164{
165 Ns_MutexInit(&lock);
166 Ns_MutexSetName(&lock, "ns:sched");
167 Tcl_InitHashTable(&eventsTable, TCL_ONE_WORD_KEYS(1));
168}
169
170
171/*
172 *----------------------------------------------------------------------
173 *
174 * Ns_After --
175 *
176 * Schedule a one-shot event after the specified delay in seconds.
177 *
178 * Results:
179 * Event id or NS_ERROR if delay is out of range.
180 *
181 * Side effects:
182 * See Ns_ScheduleProcEx().
183 *
184 *----------------------------------------------------------------------
185 */
186
187int
188Ns_After(const Ns_Time *interval, Ns_SchedProc *proc, void *arg, ns_funcptr_t deleteProc)
189{
190 int result;
191
192 NS_NONNULL_ASSERT(proc != NULL)((void) (0));
193 NS_NONNULL_ASSERT(interval != NULL)((void) (0));
194
195 if (interval->sec < 0 || interval->usec < 0) {
196 result = (int)NS_ERROR;
197 } else {
198 result = Ns_ScheduleProcEx(proc, arg, NS_SCHED_ONCE0x02u, interval, (Ns_SchedProc *)deleteProc);
199 }
200 return result;
201}
202
203
204/*
205 *----------------------------------------------------------------------
206 *
207 * Ns_ScheduleProc --
208 *
209 * Schedule a proc to run at a given interval.
210 *
211 * Results:
212 * Event id or NS_ERROR if interval is invalid.
213 *
214 * Side effects:
215 * See Ns_ScheduleProcEx().
216 *
217 *----------------------------------------------------------------------
218 */
219
220int
221Ns_ScheduleProc(Ns_SchedProc *proc, void *arg, int thread, int secs)
222{
223 Ns_Time interval;
224
225 NS_NONNULL_ASSERT(proc != NULL)((void) (0));
226
227 interval.sec = secs;
228 interval.usec = 0;
229 return Ns_ScheduleProcEx(proc, arg, (thread != 0) ? NS_SCHED_THREAD0x01u : 0u,
230 &interval, NULL((void*)0));
231}
232
233
234/*
235 *----------------------------------------------------------------------
236 *
237 * Ns_ScheduleDaily --
238 *
239 * Schedule a proc to run once a day.
240 *
241 * Results:
242 * Event id or NS_ERROR if hour and/or minute is out of range.
243 *
244 * Side effects:
245 * See Ns_ScheduleProcEx
246 *
247 *----------------------------------------------------------------------
248 */
249
250int
251Ns_ScheduleDaily(Ns_SchedProc *proc, void *clientData, unsigned int flags,
252 int hour, int minute, Ns_SchedProc *cleanupProc)
253{
254 int result;
255
256 NS_NONNULL_ASSERT(proc != NULL)((void) (0));
257
258 if (hour > 23 || hour < 0 || minute > 59 || minute < 0) {
259 result = (int)NS_ERROR;
260 } else {
261 Ns_Time interval;
262
263 interval.sec = (hour * 3600) + (minute * 60);
264 interval.usec = 0;
265 result = Ns_ScheduleProcEx(proc, clientData, flags | NS_SCHED_DAILY0x04u,
266 &interval, cleanupProc);
267 }
268 return result;
269}
270
271
272/*
273 *----------------------------------------------------------------------
274 *
275 * Ns_ScheduleWeekly --
276 *
277 * Schedule a proc to run once a week.
278 *
279 * Results:
280 * Event id or NS_ERROR if day, hour, and/or minute is out of range.
281 *
282 * Side effects:
283 * See Ns_ScheduleProcEx
284 *
285 *----------------------------------------------------------------------
286 */
287
288int
289Ns_ScheduleWeekly(Ns_SchedProc *proc, void *clientData, unsigned int flags,
290 int day, int hour, int minute, Ns_SchedProc *cleanupProc)
291{
292 int result;
293
294 NS_NONNULL_ASSERT(proc != NULL)((void) (0));
295
296 if (day < 0 || day > 6 || hour > 23 || hour < 0 || minute > 59 || minute < 0) {
297 result = (int)NS_ERROR;
298 } else {
299 Ns_Time interval;
300
301 interval.sec = (((day * 24) + hour) * 3600) + (minute * 60);
302 interval.usec = 0;
303 result = Ns_ScheduleProcEx(proc, clientData, flags | NS_SCHED_WEEKLY0x08u,
304 &interval, cleanupProc);
305 }
306 return result;
307}
308
309
310/*
311 *----------------------------------------------------------------------
312 *
313 * Ns_ScheduleProcEx --
314 *
315 * Schedule a proc to run at a given interval. The interpretation
316 * of interval (whether iterative, daily, or weekly) is handled
317 * by QueueEvent.
318 *
319 * Results:
320 * Event ID or NS_ERROR when interval is out of range.
321 *
322 * Side effects:
323 * Event is allocated, hashed, and queued.
324 *
325 *----------------------------------------------------------------------
326 */
327
328int
329Ns_ScheduleProcEx(Ns_SchedProc *proc, void *clientData, unsigned int flags,
330 const Ns_Time *interval, Ns_SchedProc *cleanupProc)
331{
332 int id;
333
334 NS_NONNULL_ASSERT(proc != NULL)((void) (0));
335 NS_NONNULL_ASSERT(interval != NULL)((void) (0));
336
337 if (unlikely(interval->sec < 0 || interval->usec < 0)(__builtin_expect((interval->sec < 0 || interval->usec
< 0), 0))
) {
338 id = (int)NS_ERROR;
339
340 } else {
341 Event *ePtr;
342 int isNew;
343 Ns_Time now;
344
345 Ns_GetTime(&now);
346 ePtr = ns_malloc(sizeof(Event));
347 ePtr->flags = flags;
348 ePtr->nextqueue.sec = 0;
349 ePtr->nextqueue.usec = 0;
350 ePtr->lastqueue.sec = ePtr->laststart.sec = ePtr->lastend.sec = -1;
351 ePtr->lastqueue.usec = ePtr->laststart.usec = ePtr->lastend.usec = 0;
352 ePtr->interval = *interval;
353 ePtr->proc = proc;
354 ePtr->deleteProc = cleanupProc;
355 ePtr->arg = clientData;
356
357 Ns_MutexLock(&lock);
358 if (shutdownPending) {
359 id = (int)NS_ERROR;
360 ns_free(ePtr);
361 } else {
362 do {
363 static int nextId = 0;
364
365 id = nextId++;
366 if (nextId < 0) {
367 nextId = 0;
368 }
369 ePtr->hPtr = Tcl_CreateHashEntry(&eventsTable, INT2PTR(id), &isNew)(*((&eventsTable)->createProc))(&eventsTable, (const
char *)(((void *)(intptr_t)(id))), &isNew)
;
370 } while (isNew == 0);
371 Tcl_SetHashValue(ePtr->hPtr, ePtr)((ePtr->hPtr)->clientData = (ClientData) (ePtr));
372 ePtr->id = id;
373 ePtr->scheduled = now;
374 QueueEvent(ePtr);
375 }
376 Ns_MutexUnlock(&lock);
377 }
378
379 return id;
380}
381
382
383/*
384 *----------------------------------------------------------------------
385 *
386 * Ns_Cancel, Ns_UnscheduleProc --
387 *
388 * Cancel a previously scheduled event.
389 *
390 * Results:
391 * Ns_UnscheduleProc: None.
392 * Ns_Cancel: NS_TRUE if cancelled, NS_FALSE otherwise.
393 *
394 * Side effects:
395 * See FreeEvent().
396 *
397 *----------------------------------------------------------------------
398 */
399
400void
401Ns_UnscheduleProc(int id)
402{
403 (void) Ns_Cancel(id);
404}
405
406bool_Bool
407Ns_Cancel(int id)
408{
409 Event *ePtr = NULL((void*)0);
410 bool_Bool cancelled = NS_FALSE0;
411
412 Ns_MutexLock(&lock);
413 if (!shutdownPending) {
414 Tcl_HashEntry *hPtr = Tcl_FindHashEntry(&eventsTable, INT2PTR(id))(*((&eventsTable)->findProc))(&eventsTable, (const
char *)(((void *)(intptr_t)(id))))
;
415
416 if (hPtr != NULL((void*)0)) {
417 ePtr = Tcl_GetHashValue(hPtr)((hPtr)->clientData);
418 Tcl_DeleteHashEntry(hPtr);
419 ePtr->hPtr = NULL((void*)0);
420 if (ePtr->qid > 0) {
421 (void) DeQueueEvent(ePtr->qid);
422 cancelled = NS_TRUE1;
423 }
424 }
425 }
426 Ns_MutexUnlock(&lock);
427 if (cancelled) {
428 FreeEvent(ePtr);
429 }
430 return cancelled;
431}
432
433
434/*
435 *----------------------------------------------------------------------
436 *
437 * Ns_Pause --
438 *
439 * Pause a schedule procedure.
440 *
441 * Results:
442 * NS_TRUE if proc paused, NS_FALSE otherwise.
443 *
444 * Side effects:
445 * Proc will not run at the next scheduled time.
446 *
447 *----------------------------------------------------------------------
448 */
449
450bool_Bool
451Ns_Pause(int id)
452{
453 bool_Bool paused = NS_FALSE0;
454
455 Ns_MutexLock(&lock);
456 if (!shutdownPending) {
457 const Tcl_HashEntry *hPtr = Tcl_FindHashEntry(&eventsTable, INT2PTR(id))(*((&eventsTable)->findProc))(&eventsTable, (const
char *)(((void *)(intptr_t)(id))))
;
458
459 if (hPtr != NULL((void*)0)) {
460 Event *ePtr;
461
462 ePtr = Tcl_GetHashValue(hPtr)((hPtr)->clientData);
463 if ((ePtr->flags & NS_SCHED_PAUSED0x10u) == 0u) {
464 ePtr->flags |= NS_SCHED_PAUSED0x10u;
465 if (ePtr->qid > 0) {
466 (void) DeQueueEvent(ePtr->qid);
467 }
468 paused = NS_TRUE1;
469 }
470 }
471 }
472 Ns_MutexUnlock(&lock);
473 return paused;
474}
475
476
477/*
478 *----------------------------------------------------------------------
479 *
480 * Ns_Resume --
481 *
482 * Resume a scheduled proc.
483 *
484 * Results:
485 * NS_TRUE if proc resumed, NS_FALSE otherwise.
486 *
487 * Side effects:
488 * Proc will be rescheduled.
489 *
490 *----------------------------------------------------------------------
491 */
492
493bool_Bool
494Ns_Resume(int id)
495{
496 bool_Bool resumed = NS_FALSE0;
497
498 Ns_MutexLock(&lock);
499 if (!shutdownPending) {
500 const Tcl_HashEntry *hPtr = Tcl_FindHashEntry(&eventsTable, INT2PTR(id))(*((&eventsTable)->findProc))(&eventsTable, (const
char *)(((void *)(intptr_t)(id))))
;
501
502 if (hPtr != NULL((void*)0)) {
503 Event *ePtr;
504
505 ePtr = Tcl_GetHashValue(hPtr)((hPtr)->clientData);
506 if ((ePtr->flags & NS_SCHED_PAUSED0x10u) != 0u) {
507 Ns_Time now;
508
509 ePtr->flags &= ~NS_SCHED_PAUSED0x10u;
510 Ns_GetTime(&now);
511 ePtr->scheduled = now;
512 QueueEvent(ePtr);
513 resumed = NS_TRUE1;
514 }
515 }
516 }
517 Ns_MutexUnlock(&lock);
518
519 return resumed;
520}
521
522
523/*
524 *----------------------------------------------------------------------
525 *
526 * NsStartSchedShutdown, NsWaitSchedShutdown --
527 *
528 * Inititiate and then wait for sched shutdown.
529 *
530 * Results:
531 * None.
532 *
533 * Side effects:
534 * May timeout waiting for sched shutdown.
535 *
536 *----------------------------------------------------------------------
537 */
538
539void
540NsStartSchedShutdown(void)
541{
542 Ns_MutexLock(&lock);
543 if (running) {
544 Ns_Log(Notice, "sched: shutdown pending");
545 shutdownPending = NS_TRUE1;
546 Ns_CondSignal(&schedcond);
547 }
548 Ns_MutexUnlock(&lock);
549}
550
551void
552NsWaitSchedShutdown(const Ns_Time *toPtr)
553{
554 Ns_ReturnCode status;
555
556 Ns_MutexLock(&lock);
557 status = NS_OK;
558 while (status == NS_OK && running) {
559 status = Ns_CondTimedWait(&schedcond, &lock, toPtr);
560 }
561 Ns_MutexUnlock(&lock);
562 if (status != NS_OK) {
563 Ns_Log(Warning, "sched: timeout waiting for sched exit");
564 } else if (schedThread != NULL((void*)0)) {
565 Ns_ThreadJoin(&schedThread, NULL((void*)0));
566 }
567}
568
569static bool_Bool
570Larger(int j, int k)
571{
572 return (Ns_DiffTime(&queue[j]->nextqueue, &queue[k]->nextqueue, NULL((void*)0)) == 1);
573}
574
575
576#ifndef NS_SCHED_CONSISTENCY_CHECK
577static void QueueConsistencyCheck(const char *UNUSED(startMsg)UNUSED_startMsg __attribute__((__unused__)), int UNUSED(n)UNUSED_n __attribute__((__unused__)), bool_Bool UNUSED(runAsserts)UNUSED_runAsserts __attribute__((__unused__))) {
578}
579#else
580
581
582static void
583QueueConsistencyCheck(const char *startMsg, int n, bool_Bool runAsserts)
584{
585 int k;
586
587 Ns_Log(Notice, "=== %s (%d) ", startMsg, n);
588
589#ifdef NS_SCHED_TRACE_EVENTS
590 Event *ePtr;
591 Tcl_DString ds;
592 time_t s;
593
594 Tcl_DStringInit(&ds);
595
596 Ns_DStringPrintf(&ds, "=== %s (%d) ", startMsg, n);
597 if (n > 1) {
598 s = queue[1]->nextqueue.sec;
599 }
600 for (k = 1; k <= n; k++) {
601 ePtr = queue[k];
602 Ns_DStringPrintf(&ds, "[%d] (%p id %d qid %d " NS_TIME_FMT"%" "l" "d" ".%06ld" ") ",
603 k, (void*)ePtr, ePtr->id, ePtr->qid,
604 (int64_t)ePtr->nextqueue.sec, ePtr->nextqueue.usec);
605 }
606 Ns_Log(Notice, "%s", ds.string);
607 Tcl_DStringFree(&ds);
608#endif
609
610 /*
611 * Check if all parent nodes (k/2) are earlier then the child nodes.
612 */
613 for (k = 2; k <= n; k++) {
614 int j = k/2;
615 bool_Bool ok = !Larger(j, k);
616
617 if (!ok) {
618 Ns_Log(Error, "=== %s: parent node [%d] (id %d " NS_TIME_FMT"%" "l" "d" ".%06ld"
619 ") is later than child [%d] (id %d " NS_TIME_FMT"%" "l" "d" ".%06ld" ")",
620 startMsg,
621 j, queue[j]->id, (int64_t)queue[j]->nextqueue.sec, queue[j]->nextqueue.usec,
622 k, queue[k]->id, (int64_t)queue[k]->nextqueue.sec, queue[k]->nextqueue.usec);
623 if (runAsserts) {
624 assert(ok)((void) (0));
625 }
626 }
627 }
628
629 /*
630 * Check whether all qids correspond to the array position.
631 */
632 for (k = 1; k <= n; k++) {
633 if (queue[k]->qid != k) {
634 Ns_Log(Error, "=== %s inconsistent qid on pos %d (id %d): is %d, should be %d",
635 startMsg, k, queue[k]->id, queue[k]->qid, k);
636 if (runAsserts) {
637 assert(queue[k]->qid == k)((void) (0));
638 }
639 }
640 }
641}
642#endif
643
644
645/*
646 *----------------------------------------------------------------------
647 *
648 * QueueEvent --
649 *
650 * Add an event to the priority queue heap.
651 *
652 * Results:
653 * None.
654 *
655 * Side effects:
656 * SchedThread() may be created and/or signaled.
657 *
658 *----------------------------------------------------------------------
659 */
660
661static void
662QueueEvent(Event *ePtr)
663{
664 if ((ePtr->flags & NS_SCHED_PAUSED0x10u) == 0u) {
665 long d;
666
667 /*
668 * Calculate the time from now in seconds this event should run.
669 */
670 if ((ePtr->flags & (NS_SCHED_DAILY0x04u | NS_SCHED_WEEKLY0x08u)) != 0u) {
671 struct tm *tp;
672 time_t secs = ePtr->scheduled.sec;
673
674 tp = ns_localtime(&secs);
675 tp->tm_sec = (int)ePtr->interval.sec;
676 tp->tm_hour = 0;
677 tp->tm_min = 0;
678 if ((ePtr->flags & NS_SCHED_WEEKLY0x08u) != 0u) {
679 tp->tm_mday -= tp->tm_wday;
680 }
681 ePtr->nextqueue.sec = mktime(tp);
682 ePtr->nextqueue.usec = 0;
683 d = Ns_DiffTime(&ePtr->nextqueue, &ePtr->scheduled, NULL((void*)0));
684 Ns_Log(Debug, "SCHED_DAILY: scheduled " NS_TIME_FMT"%" "l" "d" ".%06ld" " next " NS_TIME_FMT"%" "l" "d" ".%06ld"
685 " diff %ld secdiff %ld",
686 (int64_t)ePtr->scheduled.sec, ePtr->scheduled.usec,
687 (int64_t)ePtr->nextqueue.sec, ePtr->nextqueue.usec,
688 d, (long)ePtr->nextqueue.sec-(long)ePtr->scheduled.sec);
689
690 if (d <= 0) {
691 tp->tm_mday += ((ePtr->flags & NS_SCHED_WEEKLY0x08u) != 0u) ? 7 : 1;
692 ePtr->nextqueue.sec = mktime(tp);
693 ePtr->nextqueue.usec = 0;
694 Ns_Log(Debug, "SCHED_DAILY: final next " NS_TIME_FMT"%" "l" "d" ".%06ld" ,
695 (int64_t)ePtr->nextqueue.sec, ePtr->nextqueue.usec);
696 }
697 ePtr->scheduled = ePtr->nextqueue;
698 } else {
699 Ns_Time diff, now;
700
701 ePtr->nextqueue = ePtr->scheduled;
702 Ns_IncrTime(&ePtr->nextqueue, ePtr->interval.sec, ePtr->interval.usec);
703 /*
704 * The update time is the next scheduled time.
705 */
706 ePtr->scheduled = ePtr->nextqueue;
707
708 Ns_GetTime(&now);
709 d = Ns_DiffTime(&ePtr->nextqueue, &now, &diff);
710 Ns_Log(Debug, "sched: compute next run time based on: scheduled " NS_TIME_FMT"%" "l" "d" ".%06ld"
711 " diff %ld",
712 (int64_t)ePtr->scheduled.sec, ePtr->scheduled.usec, d);
713
714 if (d == -1) {
715 /*
716 * The last execution took longer than the schedule
717 * interval. Re-schedule after 10ms.
718 */
719 ePtr->nextqueue = now;
720 Ns_IncrTime(&ePtr->nextqueue, 0, 10000);
721 Ns_Log(Warning, "sched id %d: last execution overlaps with scheduled execution; "
722 "running late", ePtr->id);
723 }
724 }
725
726 ePtr->qid = ++nqueue;
727 /*
728 * The queue array is extended if necessary.
729 */
730 if (maxqueue <= nqueue) {
731 maxqueue += 25;
732 queue = ns_realloc(queue, sizeof(Event *) * ((size_t)maxqueue + 1u));
733 }
734 /*
735 * Place the new event at the end of the queue array.
736 */
737 queue[nqueue] = ePtr;
738
739 if (nqueue > 1) {
740 int j, k;
741
742 QueueConsistencyCheck("Queue event", nqueue - 1, NS_FALSE0);
743
744 /*
745 * Bottom-up reheapify: swim up" in the heap. When a node is
746 * larger than its parent, then the nodes have to swapped.
747 *
748 * In the implementation below, "j" is always k/2 and represents
749 * the parent node in the binary tree.
750 */
751 k = nqueue;
752 j = k / 2;
753 while (k > 1 && Larger(j, k)) {
754 Exchange(j, k);
755 k = j;
756 j = k / 2;
757 }
758 QueueConsistencyCheck("Queue event end", nqueue, NS_TRUE1);
759 }
760 Ns_Log(Debug, "QueueEvent (id %d qid %d " NS_TIME_FMT"%" "l" "d" ".%06ld" ")",
761 ePtr->id, ePtr->qid,
762 (int64_t)ePtr->nextqueue.sec, ePtr->nextqueue.usec);
763
764 /*
765 * Signal or create the SchedThread if necessary.
766 */
767
768 if (running) {
769 Ns_CondSignal(&schedcond);
770 } else {
771 running = NS_TRUE1;
772 Ns_ThreadCreate(SchedThread, NULL((void*)0), 0, &schedThread);
773 }
774 }
775}
776
777
778/*
779 *----------------------------------------------------------------------
780 *
781 * DeQueueEvent --
782 *
783 * Remove an event from the priority queue heap.
784 *
785 * Results:
786 * Pointer to removed event.
787 *
788 * Side effects:
789 * None.
790 *
791 *----------------------------------------------------------------------
792 */
793
794static Event *
795DeQueueEvent(int k)
796{
797 Event *ePtr;
798
799 Ns_Log(Debug, "DeQueueEvent (id %d qid %d " NS_TIME_FMT"%" "l" "d" ".%06ld" ")",
800 queue[k]->id, k,
801 (int64_t)queue[k]->nextqueue.sec, queue[k]->nextqueue.usec);
802
803 QueueConsistencyCheck("Dequeue event start", nqueue, NS_TRUE1);
804
805 /*
806 * Remove an element qid (named k in Sedgewick) from the priority queue.
807 *
808 * 1) Exchange element to be deleted with the node at the end. Now, the
809 * element will violate in most cases the heap order.
810 * 2) Sink down the element.
811 */
812
813 Exchange(k, nqueue);
814 ePtr = queue[nqueue--];
815 ePtr->qid = 0;
816
817 for (;;) {
818 int j = 2 * k;
819
820 if (j > nqueue) {
821 break;
822 }
823
824 if (j < nqueue && Larger(j, j+1)) {
825 ++j;
826 }
827
828 if (!Larger(k, j)) {
829 break;
830 }
831 Exchange(k, j);
832 k = j;
833 }
834 QueueConsistencyCheck("Dequeue event end", nqueue, NS_TRUE1);
835
836 return ePtr;
837}
838
839
840/*
841 *----------------------------------------------------------------------
842 *
843 * EventThread --
844 *
845 * Run detached thread events.
846 *
847 * Results:
848 * None.
849 *
850 * Side effects:
851 * See FinishEvent().
852 *
853 *----------------------------------------------------------------------
854 */
855
856static void
857EventThread(void *arg)
858{
859 Ns_Time now;
860 Event *ePtr;
861 int jpt, njobs;
862 uintptr_t jobId;
863
864 jpt = njobs = nsconf.sched.jobsperthread;
865 jobId = 0u;
866
867 Ns_ThreadSetName("-sched:idle%" PRIuPTR"l" "u" "-", (uintptr_t)arg);
868 Ns_Log(Notice, "starting");
869
870 Ns_MutexLock(&lock);
871 while (jpt == 0 || njobs > 0) {
872 while (firstEventPtr == NULL((void*)0) && !shutdownPending) {
873 Ns_CondWait(&eventcond, &lock);
874 }
875 if (firstEventPtr == NULL((void*)0)) {
876 break;
877 }
878 ePtr = firstEventPtr;
879 firstEventPtr = ePtr->nextPtr;
880 if (firstEventPtr != NULL((void*)0)) {
881 Ns_CondSignal(&eventcond);
882 }
883 --nIdleThreads;
884 Ns_MutexUnlock(&lock);
885
886 Ns_ThreadSetName("-sched:%" PRIuPTR"l" "u" ":%" PRIuPTR"l" "u" ":%d-",
887 (uintptr_t)arg, ++jobId, ePtr->id);
888 (*ePtr->proc) (ePtr->arg, ePtr->id);
889 Ns_ThreadSetName("-sched:idle%" PRIuPTR"l" "u" "-", (uintptr_t)arg);
890 Ns_GetTime(&now);
891
892 Ns_MutexLock(&lock);
893 ++nIdleThreads;
894 if (ePtr->hPtr == NULL((void*)0)) {
895 Ns_MutexUnlock(&lock);
896 FreeEvent(ePtr);
897 Ns_MutexLock(&lock);
898 } else {
899 ePtr->flags &= ~NS_SCHED_RUNNING0x20u;
900 ePtr->lastend = now;
901 /*
902 * EventThread triggers QueueEvent() based on lastqueue.
903 */
904 Ns_Log(Debug, "QueueEvent (%d) based on lastqueue "NS_TIME_FMT"%" "l" "d" ".%06ld"" or nextqueue "NS_TIME_FMT"%" "l" "d" ".%06ld",
905 ePtr->id,
906 (int64_t)ePtr->lastqueue.sec, ePtr->lastqueue.usec,
907 (int64_t)ePtr->nextqueue.sec, ePtr->nextqueue.usec
908 );
909 QueueEvent(ePtr);
910 }
911 /* Served given # of jobs in this thread */
912 if (jpt != 0 && --njobs <= 0) {
913 break;
914 }
915 }
916 --nThreads;
917 --nIdleThreads;
918 Ns_Log(Notice, "exiting, %d threads, %d idle", nThreads, nIdleThreads);
919
920 Ns_CondSignal(&schedcond);
921 Ns_MutexUnlock(&lock);
922}
923
924
925/*
926 *----------------------------------------------------------------------
927 *
928 * FreeEvent --
929 *
930 * Free and event after run.
931 *
932 * Results:
933 * None.
934 *
935 * Side effects:
936 * Event is freed or re-queued.
937 *
938 *----------------------------------------------------------------------
939 */
940
941static void
942FreeEvent(Event *ePtr)
943{
944 NS_NONNULL_ASSERT(ePtr != NULL)((void) (0));
945
946 if (ePtr->deleteProc != NULL((void*)0)) {
947 (*ePtr->deleteProc) (ePtr->arg, ePtr->id);
948 }
949 ns_free(ePtr);
950}
951
952
953/*
954 *----------------------------------------------------------------------
955 *
956 * SchedThread --
957 *
958 * Detached thread to fire events on time.
959 *
960 * Results:
961 * None.
962 *
963 * Side effects:
964 * Depends on event procedures.
965 *
966 *----------------------------------------------------------------------
967 */
968
969static void
970SchedThread(void *UNUSED(arg)UNUSED_arg __attribute__((__unused__)))
971{
972 Ns_Time now;
973 Ns_Time timeout = {0, 0};
974 Event *ePtr, *readyPtr = NULL((void*)0);
975
976 (void) Ns_WaitForStartup();
977
978 Ns_ThreadSetName("-sched-");
979 Ns_Log(Notice, "sched: starting");
980
981 Ns_MutexLock(&lock);
982 while (!shutdownPending) {
983
984 /*
985 * For events ready to run, either create a thread for
986 * detached events or add to a list of synchronous events.
987 */
988
989 Ns_GetTime(&now);
990 while (nqueue > 0 && Ns_DiffTime(&queue[1]->nextqueue, &now, NULL((void*)0)) <= 0) {
991 ePtr = DeQueueEvent(1);
992
993#ifdef NS_SCHED_TRACE_EVENTS
994 Ns_Log(Notice, "... dequeue event (id %d) " NS_TIME_FMT"%" "l" "d" ".%06ld",
995 ePtr->id,
996 (int64_t)ePtr->nextqueue.sec, ePtr->nextqueue.usec);
997#endif
998 if ((ePtr->flags & NS_SCHED_ONCE0x02u) != 0u) {
999 Tcl_DeleteHashEntry(ePtr->hPtr);
1000 ePtr->hPtr = NULL((void*)0);
1001 }
1002 ePtr->lastqueue = now;
1003 if ((ePtr->flags & NS_SCHED_THREAD0x01u) != 0u) {
1004 ePtr->flags |= NS_SCHED_RUNNING0x20u;
1005 ePtr->laststart = now;
1006 ePtr->nextPtr = firstEventPtr;
1007 firstEventPtr = ePtr;
1008 } else {
1009 ePtr->nextPtr = readyPtr;
1010 readyPtr = ePtr;
1011 }
1012 }
1013
1014#ifdef NS_SCHED_TRACE_EVENTS
1015 if (readyPtr != NULL((void*)0) || firstEventPtr != NULL((void*)0)) {
1016 Ns_Log(Notice, "... dequeuing done ready %p ready-nextPtr %p first %p",
1017 (void*)readyPtr, (void*)(readyPtr ? readyPtr->nextPtr : NULL((void*)0)),
1018 (void*)firstEventPtr);
1019 }
1020#endif
1021
1022 /*
1023 * Dispatch any threaded events.
1024 */
1025
1026 if (firstEventPtr != NULL((void*)0)) {
1027 if (nIdleThreads == 0) {
1028 Ns_ThreadCreate(EventThread, INT2PTR(nThreads)((void *)(intptr_t)(nThreads)), 0, NULL((void*)0));
1029 ++nIdleThreads;
1030 ++nThreads;
1031 }
1032 Ns_CondSignal(&eventcond);
1033 }
1034
1035 /*
1036 * Run and re-queue or free synchronous events.
1037 */
1038
1039 while ((ePtr = readyPtr) != NULL((void*)0)) {
1040 Ns_Time diff;
1041
1042 readyPtr = ePtr->nextPtr;
1043 ePtr->laststart = now;
1044 ePtr->flags |= NS_SCHED_RUNNING0x20u;
1045 Ns_MutexUnlock(&lock);
1046 (*ePtr->proc) (ePtr->arg, ePtr->id);
1047 Ns_GetTime(&now);
1048
1049 (void)Ns_DiffTime(&ePtr->laststart, &now, &diff);
1050 if (Ns_DiffTime(&diff, &nsconf.sched.maxelapsed, NULL((void*)0)) == 1) {
1051 Ns_Log(Warning, "sched: excessive time taken by proc %d (" NS_TIME_FMT"%" "l" "d" ".%06ld" " seconds)",
1052 ePtr->id, (int64_t)diff.sec, diff.usec);
1053 }
1054 if (ePtr->hPtr == NULL((void*)0)) {
1055 FreeEvent(ePtr);
1056 ePtr = NULL((void*)0);
1057 }
1058 Ns_MutexLock(&lock);
1059 if (ePtr != NULL((void*)0)) {
1060 ePtr->flags &= ~NS_SCHED_RUNNING0x20u;
1061 ePtr->lastend = now;
1062 /*
1063 * Base repeating thread on the last queue time, and not on
1064 * the last endtime to avoid a growing timeshift for events
1065 * that should run at fixed intervals.
1066 *
1067 * SchedThread triggers QueueEvent() based on lastqueue.
1068 */
1069 Ns_Log(Debug, "QueueEvent (%d) based on lastqueue", ePtr->id);
1070 QueueEvent(ePtr);
1071 }
1072 }
1073
1074 /*
1075 * Wait for the next ready event.
1076 */
1077 if (nqueue == 0) {
1078 Ns_CondWait(&schedcond, &lock);
1079 } else if (!shutdownPending) {
1080 timeout = queue[1]->nextqueue;
1081 (void) Ns_CondTimedWait(&schedcond, &lock, &timeout);
1082 }
1083
1084 }
1085
1086 /*
1087 * Wait for any detached event threads to exit
1088 * and then cleanup the scheduler and signal
1089 * shutdown complete.
1090 */
1091
1092 Ns_Log(Notice, "sched: shutdown started");
1093 if (nThreads > 0) {
1094 Ns_Log(Notice, "sched: waiting for %d/%d event threads...",
1095 nThreads, nIdleThreads);
1096 Ns_CondBroadcast(&eventcond);
1097 while (nThreads > 0) {
1098 (void) Ns_CondTimedWait(&schedcond, &lock, &timeout);
1099 }
1100 }
1101 Ns_MutexUnlock(&lock);
1102 while (nqueue > 0) {
1103 FreeEvent(queue[nqueue--]);
1104 }
1105 ns_free(queue);
1106 Tcl_DeleteHashTable(&eventsTable);
1107 Ns_Log(Notice, "sched: shutdown complete");
1108
1109 Ns_MutexLock(&lock);
1110 running = NS_FALSE0;
1111 Ns_CondBroadcast(&schedcond);
1112 Ns_MutexUnlock(&lock);
1113}
1114
1115
1116void
1117NsGetScheduled(Tcl_DString *dsPtr)
1118{
1119 const Tcl_HashEntry *hPtr;
1120 Tcl_HashSearch search;
1121
1122 NS_NONNULL_ASSERT(dsPtr != NULL)((void) (0));
1123
1124 Ns_MutexLock(&lock);
1125 hPtr = Tcl_FirstHashEntry(&eventsTable, &search);
1126 while (hPtr != NULL((void*)0)) {
1127 const Event *ePtr = Tcl_GetHashValue(hPtr)((hPtr)->clientData);
1128
1129 Tcl_DStringStartSublist(dsPtr);
1130 Ns_DStringPrintf(dsPtr, "%d %d ", ePtr->id, ePtr->flags);
1131 Ns_DStringAppendTime(dsPtr, &ePtr->interval);
Duplicate code detected
1132 Tcl_DStringAppend(dsPtr, " ", 1);
1133 Ns_DStringAppendTime(dsPtr, &ePtr->nextqueue);
Similar code here
1134 Tcl_DStringAppend(dsPtr, " ", 1);
1135 Ns_DStringAppendTime(dsPtr, &ePtr->lastqueue);
1136 Tcl_DStringAppend(dsPtr, " ", 1);
1137 Ns_DStringAppendTime(dsPtr, &ePtr->laststart);
1138 Tcl_DStringAppend(dsPtr, " ", 1);
1139 Ns_DStringAppendTime(dsPtr, &ePtr->lastend);
1140 Tcl_DStringAppend(dsPtr, " ", 1);
1141 Ns_GetProcInfo(dsPtr, (ns_funcptr_t)ePtr->proc, ePtr->arg);
1142 Tcl_DStringEndSublist(dsPtr);
1143 hPtr = Tcl_NextHashEntry(&search);
1144 }
1145 Ns_MutexUnlock(&lock);
1146}
1147
1148/*
1149 * Local Variables:
1150 * mode: c
1151 * c-basic-offset: 4
1152 * fill-column: 78
1153 * indent-tabs-mode: nil
1154 * End:
1155 */