aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Poznyakoff <gray@gnu.org>2020-04-27 13:44:31 +0300
committerSergey Poznyakoff <gray@gnu.org>2020-04-27 13:44:31 +0300
commit9dff021e31a300dcd5eda941d9e4992a6a8e465a (patch)
tree9ba56aa2ccbd7c24b1d1e8535c8e3eeef25f27cf
parente19cf8e1303700b5b2e4f3e525285f63a3c7b51f (diff)
downloadwydawca-9dff021e31a300dcd5eda941d9e4992a6a8e465a.tar.gz
wydawca-9dff021e31a300dcd5eda941d9e4992a6a8e465a.tar.bz2
Use BSD queue macros to implement queues and linked lists.
-rw-r--r--modules/mailutils/mod_mailutils.c2
-rw-r--r--src/Makefile.am13
-rw-r--r--src/config.c18
-rw-r--r--src/directive.c14
-rw-r--r--src/event.c43
-rw-r--r--src/gpg.c5
-rw-r--r--src/module.c30
-rw-r--r--src/net.c49
-rw-r--r--src/queue.h574
-rw-r--r--src/spool.c (renamed from src/process.c)84
-rw-r--r--src/sql.c10
-rw-r--r--src/triplet.c99
-rw-r--r--src/watcher.c44
-rw-r--r--src/wydawca.h66
-rw-r--r--tests/mailnotify.at12
15 files changed, 777 insertions, 286 deletions
diff --git a/modules/mailutils/mod_mailutils.c b/modules/mailutils/mod_mailutils.c
index 00ea697..759fa14 100644
--- a/modules/mailutils/mod_mailutils.c
+++ b/modules/mailutils/mod_mailutils.c
@@ -605,3 +605,3 @@ wy_config(grecs_node_t *node)
void
-wy_flush()
+wy_flush(void)
{
diff --git a/src/Makefile.am b/src/Makefile.am
index 5af4c2c..5b70f5e 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -18,6 +18,2 @@ sbin_PROGRAMS=wydawca
-if COND_INOTIFY
- WATCHER_C=watcher.c
-endif
-
wydawca_SOURCES=\
@@ -32,3 +28,2 @@ wydawca_SOURCES=\
exec.c\
- event.c\
gpg.c\
@@ -38,3 +33,3 @@ wydawca_SOURCES=\
pidfile.c\
- process.c\
+ spool.c\
sql.c\
@@ -51,3 +46,7 @@ wydawca_SOURCES=\
thread_name.c\
- $(WATCHER_C)
+ queue.h
+
+if COND_INOTIFY
+ wydawca_SOURCES += watcher.c
+endif
diff --git a/src/config.c b/src/config.c
index a7ceb92..ee9f0c3 100644
--- a/src/config.c
+++ b/src/config.c
@@ -127,3 +127,3 @@ static struct archive_descr default_archive_descr = {
static struct dictionary *default_dictionary[dictionary_count];
-struct notification *default_notification = NULL;
+NOTIFYQ default_notification = NOTIFYQ_INITIALIZER(default_notification);
@@ -881,5 +881,3 @@ cb_notify_event(enum grecs_callback_command cmd, grecs_node_t * node,
delay this check until config_finish */
- struct notification **p = (struct notification **) varptr;
- ntf->next = *p;
- *p = ntf;
+ NOTIFYQ_APPEND((NOTIFYQ*)varptr, ntf);
/* FIXME: check ev and tgt? */
@@ -1137,3 +1135,3 @@ static struct grecs_keyword spool_kw[] = {
grecs_type_section, GRECS_MULT,
- NULL, offsetof(struct spool, notification),
+ NULL, offsetof(struct spool, notification_queue),
cb_notify_event, NULL, notify_event_kw },
@@ -1209,10 +1207,12 @@ cb_spool(enum grecs_callback_command cmd, grecs_node_t * node,
- if (rc)
+ if (rc) {
+ //FIXME: free spool */
return rc;
+ }
- if (!spool->notification)
- spool->notification = default_notification;
+ //FIXME
+ if (NOTIFYQ_EMPTY(&spool->notification_queue))
+ spool->notification_queue = default_notification;
spool->dest_dir = wy_url_printable(spool->dest_url);
register_spool(spool);
- free(spool);
*pdata = NULL;
diff --git a/src/directive.c b/src/directive.c
index 8e7afeb..bb5ce4b 100644
--- a/src/directive.c
+++ b/src/directive.c
@@ -60,6 +60,8 @@ str_dirname_sig(char const *a, char const *b)
if (arg2) {
- if (ISSPACE(*a) && strncmp(b, ".sig", 4) == 0) {
+ if (ISSPACE(*a)
+ && strncmp(b, SUF_SIG, SUF_SIG_LEN) == 0) {
arg2 = 1;
b += 4;
- } else if (ISSPACE(*b) && strncmp(a, ".sig", 4) == 0) {
+ } else if (ISSPACE(*b)
+ && strncmp(a, SUF_SIG, SUF_SIG_LEN) == 0) {
arg2 = -1;
@@ -79,6 +81,6 @@ str_dirname_sig(char const *a, char const *b)
- if (*a == 0 && strcmp(b, ".sig") == 0)
+ if (*a == 0 && strcmp(b, SUF_SIG) == 0)
return (arg2 == 0 || arg2 == 1) ? 1 : 0;
- if (*b == 0 && strcmp(a, ".sig") == 0)
+ if (*b == 0 && strcmp(a, SUF_SIG) == 0)
return (arg2 == 0 || arg2 == -1) ? -1 : 0;
@@ -651,3 +653,3 @@ external_check(struct wy_triplet *trp)
wydawca_stat_incr(WY_STAT_CHECK_FAIL);
- notify(spool->notification, trp, wy_ev_check_fail);
+ notify(&spool->notification_queue, trp, wy_ev_check_fail);
}
@@ -761,3 +763,3 @@ process_directives(struct wy_triplet *trp)
timer_stop(WY_TIMER_TRIPLET);
- notify(spool->notification, trp, wy_ev_success);
+ notify(&spool->notification_queue, trp, wy_ev_success);
return 0;
diff --git a/src/event.c b/src/event.c
deleted file mode 100644
index da26a30..0000000
--- a/src/event.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/* wydawca - automatic release submission daemon
- Copyright (C) 2007-2020 Sergey Poznyakoff
-
- Wydawca is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the
- Free Software Foundation; either version 3 of the License, or (at your
- option) any later version.
-
- Wydawca is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License along
- with wydawca. If not, see <http://www.gnu.org/licenses/>. */
-
-#include "wydawca.h"
-
-void
-notify(struct notification *n, struct wy_triplet *t, enum wy_event e)
-{
- for (; n; n = n->next)
- if (n->ev == e) {
- if (n->modname)
- module_notify(n->modname, n->modcfg, e, t);
- }
-}
-
-void
-notify_finish(void)
-{
- notify(default_notification, NULL, wy_ev_finish);
-}
-
-void
-notify_flush(struct spool *sp)
-{
- struct notification *n;
-
- for (n = sp->notification; n; n = n->next)
- if (n->modcfg)
- module_flush(n->modname, n->modcfg);
-}
diff --git a/src/gpg.c b/src/gpg.c
index 41ac723..1aafb11 100644
--- a/src/gpg.c
+++ b/src/gpg.c
@@ -330,3 +330,3 @@ verify_directive_signature(struct wy_triplet *trp)
wydawca_stat_incr(WY_STAT_BAD_SIGNATURE);
- notify(trp->spool->notification, trp,
+ notify(&trp->spool->notification_queue, trp,
wy_ev_bad_directive_signature);
@@ -378,3 +378,4 @@ verify_detached_signature(struct wy_triplet *trp)
wy_log(LOG_ERR, _("BAD detached signature for %s"), trp->name);
- notify(trp->spool->notification, trp, wy_ev_bad_detached_signature);
+ notify(&trp->spool->notification_queue, trp,
+ wy_ev_bad_detached_signature);
rc = 1;
diff --git a/src/module.c b/src/module.c
index 22a9c6e..2b47ba0 100644
--- a/src/module.c
+++ b/src/module.c
@@ -19,3 +19,3 @@
-static struct module *mod_head, *mod_tail;
+static STAILQ_HEAD(,module) mod_head = STAILQ_HEAD_INITIALIZER(mod_head);
struct grecs_list *module_load_path, *module_prepend_load_path;
@@ -26,6 +26,6 @@ modlookup(const char *name)
struct module *p;
-
- for (p = mod_head; p; p = p->next)
+ STAILQ_FOREACH(p, &mod_head, link) {
if (strcmp(p->name, name) == 0)
- return p;;
+ return p;
+ }
return NULL;
@@ -55,7 +55,3 @@ modinstall(const char *name, const char *path, grecs_locus_t * loc)
- if (mod_tail)
- mod_tail->next = p;
- else
- mod_head = p;
- mod_tail = p;
+ STAILQ_INSERT_TAIL(&mod_head, p, link);
@@ -137,5 +133,6 @@ modload(struct module *mod, lt_dladvise advise)
static int
-conf_notification_modules(struct notification *np)
+conf_notification_modules(NOTIFYQ *nq)
{
- for (; np; np = np->next) {
+ struct notification *np;
+ NOTIFYQ_FOREACH(np, nq) {
if (np->modname) {
@@ -162,3 +159,3 @@ spoolmodcfg(struct spool *spool, void *unused)
{
- return conf_notification_modules(spool->notification);
+ return conf_notification_modules(&spool->notification_queue);
}
@@ -196,3 +193,3 @@ modules_load()
- for (mod = mod_head; mod; mod = mod->next) {
+ STAILQ_FOREACH(mod, &mod_head, link) {
if (modload(mod, advise))
@@ -206,3 +203,3 @@ modules_load()
}
- conf_notification_modules(default_notification);
+ conf_notification_modules(&default_notification);
}
@@ -210,3 +207,3 @@ modules_load()
void
-modules_close()
+modules_close(void)
{
@@ -214,3 +211,3 @@ modules_close()
- for (mod = mod_head; mod; mod = mod->next) {
+ while ((mod = STAILQ_FIRST(&mod_head)) != NULL) {
if (mod->close)
@@ -218,2 +215,3 @@ modules_close()
lt_dlclose(mod->handle);
+ STAILQ_REMOVE_HEAD(&mod_head, link);
}
diff --git a/src/net.c b/src/net.c
index eccd078..400585c 100644
--- a/src/net.c
+++ b/src/net.c
@@ -91,20 +91,12 @@ struct wydawca_connection {
FILE *fp;
- struct wydawca_connection *next, *prev;
+ TAILQ_ENTRY(wydawca_connection) link;
};
-struct wydawca_connection_queue {
- struct wydawca_connection *head, *tail;
-};
+typedef TAILQ_HEAD(,wydawca_connection) WYDAWCA_CONNECTION_QUEUE;
static inline void
-wydawca_connection_enqueue(struct wydawca_connection_queue *q,
+wydawca_connection_enqueue(WYDAWCA_CONNECTION_QUEUE *q,
struct wydawca_connection *conn)
{
- conn->next = NULL;
- conn->prev = q->tail;
- if (q->tail)
- q->tail->next = conn;
- else
- q->head = conn;
- q->tail = conn;
+ TAILQ_INSERT_TAIL(q, conn, link);
}
@@ -112,14 +104,6 @@ wydawca_connection_enqueue(struct wydawca_connection_queue *q,
static inline void
-wydawca_connection_dequeue(struct wydawca_connection_queue *q,
+wydawca_connection_dequeue(WYDAWCA_CONNECTION_QUEUE *q,
struct wydawca_connection *conn)
{
- if (conn->prev)
- conn->prev->next = conn->next;
- else
- q->head = conn->next;
- if (conn->next)
- conn->next->prev = conn->prev;
- else
- q->tail = conn->prev;
- conn->prev = conn->next = NULL;
+ TAILQ_REMOVE(q, conn, link);
}
@@ -127,3 +111,4 @@ wydawca_connection_dequeue(struct wydawca_connection_queue *q,
static struct wydawca_connection *conn_table;
-static struct wydawca_connection_queue conn_avail, conn_idle;
+static WYDAWCA_CONNECTION_QUEUE conn_avail = TAILQ_HEAD_INITIALIZER(conn_avail),
+ conn_idle = TAILQ_HEAD_INITIALIZER(conn_idle);
static pthread_mutex_t conn_mutex = PTHREAD_MUTEX_INITIALIZER;
@@ -148,4 +133,3 @@ connection_start(int fd)
pthread_mutex_lock(&conn_mutex);
- if (conn_avail.head) {
- conn = conn_avail.head;
+ if ((conn = TAILQ_FIRST(&conn_avail)) != NULL) {
wydawca_connection_dequeue(&conn_avail, conn);
@@ -157,3 +141,3 @@ connection_start(int fd)
pthread_cond_broadcast(&conn_cond);
- } else {
+ } else {
wy_log(LOG_ERR, "connection table is full");
@@ -265,12 +249,13 @@ wy_thr_connection_watcher(void *ptr)
while (1) {
- if (conn_idle.head) {
+ struct wydawca_connection *conn;
+
+ if ((conn = TAILQ_FIRST(&conn_idle)) != NULL) {
struct timespec ts;
- pthread_cond_timedwait(&conn_cond, &conn_mutex,
- &conn_idle.head->ts);
- if (conn_idle.head) {
+ pthread_cond_timedwait(&conn_cond, &conn_mutex, &conn->ts);
+ if ((conn = TAILQ_FIRST(&conn_idle)) != NULL) {
clock_gettime(CLOCK_REALTIME, &ts);
- if (timespec_cmp(&ts, &conn_idle.head->ts) >= 0) {
+ if (timespec_cmp(&ts, &conn->ts) >= 0) {
void *ret;
- pthread_t tid = conn_idle.head->tid;
+ pthread_t tid = conn->tid;
pthread_cancel(tid);
diff --git a/src/queue.h b/src/queue.h
new file mode 100644
index 0000000..daf4553
--- /dev/null
+++ b/src/queue.h
@@ -0,0 +1,574 @@
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef _SYS_QUEUE_H_
+#define _SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The
+ * elements are singly linked for minimum space and pointer manipulation
+ * overhead at the expense of O(n) removal for arbitrary elements. New
+ * elements can be added to the list after an existing element or at the
+ * head of the list. Elements being removed from the head of the list
+ * should use the explicit macro for this purpose for optimum
+ * efficiency. A singly-linked list may only be traversed in the forward
+ * direction. Singly-linked lists are ideal for applications with large
+ * datasets and few or no removals or for implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+/*
+ * List definitions.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List functions.
+ */
+#define LIST_INIT(head) do { \
+ (head)->lh_first = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (/*CONSTCOND*/0)
+
+#define LIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define LIST_FOREACH(var, head, field) \
+ for ((var) = ((head)->lh_first); \
+ (var); \
+ (var) = ((var)->field.le_next))
+
+/*
+ * List access methods.
+ */
+#define LIST_EMPTY(head) ((head)->lh_first == NULL)
+#define LIST_FIRST(head) ((head)->lh_first)
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+
+/*
+ * Singly-linked List definitions.
+ */
+#define SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define SLIST_INIT(head) do { \
+ (head)->slh_first = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ (head)->slh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_REMOVE_HEAD(head, field) do { \
+ (head)->slh_first = (head)->slh_first->field.sle_next; \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_REMOVE(head, elm, type, field) do { \
+ if ((head)->slh_first == (elm)) { \
+ SLIST_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = (head)->slh_first; \
+ while(curelm->field.sle_next != (elm)) \
+ curelm = curelm->field.sle_next; \
+ curelm->field.sle_next = \
+ curelm->field.sle_next->field.sle_next; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_FOREACH(var, head, field) \
+ for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
+
+/*
+ * Singly-linked List access methods.
+ */
+#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
+#define SLIST_FIRST(head) ((head)->slh_first)
+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define STAILQ_HEAD(name, type) \
+struct name { \
+ struct type *stqh_first; /* first element */ \
+ struct type **stqh_last; /* addr of last next element */ \
+}
+
+#define STAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).stqh_first }
+
+#define STAILQ_ENTRY(type) \
+struct { \
+ struct type *stqe_next; /* next element */ \
+}
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define STAILQ_INIT(head) do { \
+ (head)->stqh_first = NULL; \
+ (head)->stqh_last = &(head)->stqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
+ (head)->stqh_last = &(elm)->field.stqe_next; \
+ (head)->stqh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.stqe_next = NULL; \
+ *(head)->stqh_last = (elm); \
+ (head)->stqh_last = &(elm)->field.stqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
+ (head)->stqh_last = &(elm)->field.stqe_next; \
+ (listelm)->field.stqe_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_REMOVE_HEAD(head, field) do { \
+ if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
+ (head)->stqh_last = &(head)->stqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_REMOVE(head, elm, type, field) do { \
+ if ((head)->stqh_first == (elm)) { \
+ STAILQ_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->stqh_first; \
+ while (curelm->field.stqe_next != (elm)) \
+ curelm = curelm->field.stqe_next; \
+ if ((curelm->field.stqe_next = \
+ curelm->field.stqe_next->field.stqe_next) == NULL) \
+ (head)->stqh_last = &(curelm)->field.stqe_next; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->stqh_first); \
+ (var); \
+ (var) = ((var)->field.stqe_next))
+
+#define STAILQ_CONCAT(head1, head2) do { \
+ if (!STAILQ_EMPTY((head2))) { \
+ *(head1)->stqh_last = (head2)->stqh_first; \
+ (head1)->stqh_last = (head2)->stqh_last; \
+ STAILQ_INIT((head2)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+/*
+ * Singly-linked Tail queue access methods.
+ */
+#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
+#define STAILQ_FIRST(head) ((head)->stqh_first)
+#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
+
+
+/*
+ * Simple queue definitions.
+ */
+#define SIMPLEQ_HEAD(name, type) \
+struct name { \
+ struct type *sqh_first; /* first element */ \
+ struct type **sqh_last; /* addr of last next element */ \
+}
+
+#define SIMPLEQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).sqh_first }
+
+#define SIMPLEQ_ENTRY(type) \
+struct { \
+ struct type *sqe_next; /* next element */ \
+}
+
+/*
+ * Simple queue functions.
+ */
+#define SIMPLEQ_INIT(head) do { \
+ (head)->sqh_first = NULL; \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (head)->sqh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.sqe_next = NULL; \
+ *(head)->sqh_last = (elm); \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (listelm)->field.sqe_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_REMOVE_HEAD(head, field) do { \
+ if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_REMOVE(head, elm, type, field) do { \
+ if ((head)->sqh_first == (elm)) { \
+ SIMPLEQ_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->sqh_first; \
+ while (curelm->field.sqe_next != (elm)) \
+ curelm = curelm->field.sqe_next; \
+ if ((curelm->field.sqe_next = \
+ curelm->field.sqe_next->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(curelm)->field.sqe_next; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->sqh_first); \
+ (var); \
+ (var) = ((var)->field.sqe_next))
+
+/*
+ * Simple queue access methods.
+ */
+#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
+#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
+#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
+
+
+/*
+ * Tail queue definitions.
+ */
+#define _TAILQ_HEAD(name, type, qual) \
+struct name { \
+ qual type *tqh_first; /* first element */ \
+ qual type *qual *tqh_last; /* addr of last next element */ \
+}
+#define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
+
+#define TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+
+#define _TAILQ_ENTRY(type, qual) \
+struct { \
+ qual type *tqe_next; /* next element */ \
+ qual type *qual *tqe_prev; /* address of previous next element */\
+}
+#define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
+
+/*
+ * Tail queue functions.
+ */
+#define TAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_last = &(head)->tqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->tqh_first); \
+ (var); \
+ (var) = ((var)->field.tqe_next))
+
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
+ (var); \
+ (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
+
+#define TAILQ_CONCAT(head1, head2, field) do { \
+ if (!TAILQ_EMPTY(head2)) { \
+ *(head1)->tqh_last = (head2)->tqh_first; \
+ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
+ (head1)->tqh_last = (head2)->tqh_last; \
+ TAILQ_INIT((head2)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+/*
+ * Tail queue access methods.
+ */
+#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+
+
+/*
+ * Circular queue definitions.
+ */
+#define CIRCLEQ_HEAD(name, type) \
+struct name { \
+ struct type *cqh_first; /* first element */ \
+ struct type *cqh_last; /* last element */ \
+}
+
+#define CIRCLEQ_HEAD_INITIALIZER(head) \
+ { (void *)&head, (void *)&head }
+
+#define CIRCLEQ_ENTRY(type) \
+struct { \
+ struct type *cqe_next; /* next element */ \
+ struct type *cqe_prev; /* previous element */ \
+}
+
+/*
+ * Circular queue functions.
+ */
+#define CIRCLEQ_INIT(head) do { \
+ (head)->cqh_first = (void *)(head); \
+ (head)->cqh_last = (void *)(head); \
+} while (/*CONSTCOND*/0)
+
+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm)->field.cqe_next; \
+ (elm)->field.cqe_prev = (listelm); \
+ if ((listelm)->field.cqe_next == (void *)(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (listelm)->field.cqe_next->field.cqe_prev = (elm); \
+ (listelm)->field.cqe_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm); \
+ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
+ if ((listelm)->field.cqe_prev == (void *)(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (listelm)->field.cqe_prev->field.cqe_next = (elm); \
+ (listelm)->field.cqe_prev = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.cqe_next = (head)->cqh_first; \
+ (elm)->field.cqe_prev = (void *)(head); \
+ if ((head)->cqh_last == (void *)(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (head)->cqh_first->field.cqe_prev = (elm); \
+ (head)->cqh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.cqe_next = (void *)(head); \
+ (elm)->field.cqe_prev = (head)->cqh_last; \
+ if ((head)->cqh_first == (void *)(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (head)->cqh_last->field.cqe_next = (elm); \
+ (head)->cqh_last = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define CIRCLEQ_REMOVE(head, elm, field) do { \
+ if ((elm)->field.cqe_next == (void *)(head)) \
+ (head)->cqh_last = (elm)->field.cqe_prev; \
+ else \
+ (elm)->field.cqe_next->field.cqe_prev = \
+ (elm)->field.cqe_prev; \
+ if ((elm)->field.cqe_prev == (void *)(head)) \
+ (head)->cqh_first = (elm)->field.cqe_next; \
+ else \
+ (elm)->field.cqe_prev->field.cqe_next = \
+ (elm)->field.cqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define CIRCLEQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->cqh_first); \
+ (var) != (const void *)(head); \
+ (var) = ((var)->field.cqe_next))
+
+#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
+ for ((var) = ((head)->cqh_last); \
+ (var) != (const void *)(head); \
+ (var) = ((var)->field.cqe_prev))
+
+/*
+ * Circular queue access methods.
+ */
+#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
+#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
+#define CIRCLEQ_LAST(head) ((head)->cqh_last)
+#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
+#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
+
+#define CIRCLEQ_LOOP_NEXT(head, elm, field) \
+ (((elm)->field.cqe_next == (void *)(head)) \
+ ? ((head)->cqh_first) \
+ : (elm->field.cqe_next))
+#define CIRCLEQ_LOOP_PREV(head, elm, field) \
+ (((elm)->field.cqe_prev == (void *)(head)) \
+ ? ((head)->cqh_last) \
+ : (elm->field.cqe_prev))
+
+#endif /* sys/queue.h */
diff --git a/src/process.c b/src/spool.c
index 323dbfe..39ddc7b 100644
--- a/src/process.c
+++ b/src/spool.c
@@ -18,8 +18,3 @@
-struct spool_list {
- struct spool_list *next;
- struct spool spool;
-};
-
-static struct spool_list *spool_head, *spool_tail;
+static STAILQ_HEAD(,spool) spool_list = STAILQ_HEAD_INITIALIZER(spool_list);
size_t spool_count;
@@ -29,6 +24,6 @@ for_each_spool(int (*fun) (struct spool *, void *), void *data)
{
- struct spool_l