Implement kqueue(2) support via CONFIG_ELOOP_KQUEUE
NOTE: kqueue has to be closed and re-build after forking. epoll *should* do the same, but it seems that wpa_supplicant doesn't need it at least. I have re-worked a little bit of the epoll code (moved into a similar kqueue function) so it's trivial to requeue epoll if needed in the future. Signed-off-by: Roy Marples <roy@marples.name>
This commit is contained in:
parent
2e69bdd16a
commit
f9982b3212
3 changed files with 246 additions and 42 deletions
|
@ -140,6 +140,10 @@ ifdef CONFIG_ELOOP_EPOLL
|
|||
CFLAGS += -DCONFIG_ELOOP_EPOLL
|
||||
endif
|
||||
|
||||
ifdef CONFIG_ELOOP_KQUEUE
|
||||
CFLAGS += -DCONFIG_ELOOP_KQUEUE
|
||||
endif
|
||||
|
||||
OBJS += ../src/utils/common.o
|
||||
OBJS_c += ../src/utils/common.o
|
||||
OBJS += ../src/utils/wpa_debug.o
|
||||
|
|
|
@ -18,7 +18,12 @@
|
|||
#error Do not define both of poll and epoll
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL)
|
||||
#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
|
||||
#error Do not define both of poll and kqueue
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
|
||||
!defined(CONFIG_ELOOP_KQUEUE)
|
||||
#define CONFIG_ELOOP_SELECT
|
||||
#endif
|
||||
|
||||
|
@ -30,6 +35,10 @@
|
|||
#include <sys/epoll.h>
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
#include <sys/event.h>
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
|
||||
struct eloop_sock {
|
||||
int sock;
|
||||
void *eloop_data;
|
||||
|
@ -75,13 +84,20 @@ struct eloop_data {
|
|||
struct pollfd *pollfds;
|
||||
struct pollfd **pollfds_map;
|
||||
#endif /* CONFIG_ELOOP_POLL */
|
||||
#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
|
||||
int max_fd;
|
||||
struct eloop_sock *fd_table;
|
||||
#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
|
||||
#ifdef CONFIG_ELOOP_EPOLL
|
||||
int epollfd;
|
||||
int epoll_max_event_num;
|
||||
int epoll_max_fd;
|
||||
struct eloop_sock *epoll_table;
|
||||
struct epoll_event *epoll_events;
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
int kqueuefd;
|
||||
int kqueue_nevents;
|
||||
struct kevent *kqueue_events;
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
struct eloop_sock_table readers;
|
||||
struct eloop_sock_table writers;
|
||||
struct eloop_sock_table exceptions;
|
||||
|
@ -153,10 +169,20 @@ int eloop_init(void)
|
|||
__func__, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
eloop.kqueuefd = kqueue();
|
||||
if (eloop.kqueuefd < 0) {
|
||||
wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
|
||||
__func__, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
|
||||
eloop.readers.type = EVENT_TYPE_READ;
|
||||
eloop.writers.type = EVENT_TYPE_WRITE;
|
||||
eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
|
||||
#ifdef WPA_TRACE
|
||||
signal(SIGSEGV, eloop_sigsegv_handler);
|
||||
#endif /* WPA_TRACE */
|
||||
|
@ -164,15 +190,80 @@ int eloop_init(void)
|
|||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_ELOOP_EPOLL
|
||||
static int eloop_sock_queue(int sock, eloop_event_type type)
|
||||
{
|
||||
struct epoll_event ev;
|
||||
|
||||
os_memset(&ev, 0, sizeof(ev));
|
||||
switch (type) {
|
||||
case EVENT_TYPE_READ:
|
||||
ev.events = EPOLLIN;
|
||||
break;
|
||||
case EVENT_TYPE_WRITE:
|
||||
ev.events = EPOLLOUT;
|
||||
break;
|
||||
/*
|
||||
* Exceptions are always checked when using epoll, but I suppose it's
|
||||
* possible that someone registered a socket *only* for exception
|
||||
* handling.
|
||||
*/
|
||||
case EVENT_TYPE_EXCEPTION:
|
||||
ev.events = EPOLLERR | EPOLLHUP;
|
||||
break;
|
||||
}
|
||||
ev.data.fd = sock;
|
||||
if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
|
||||
wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
|
||||
__func__, sock, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
|
||||
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
static int eloop_sock_queue(int sock, eloop_event_type type)
|
||||
{
|
||||
int filter;
|
||||
struct kevent ke;
|
||||
|
||||
switch (type) {
|
||||
case EVENT_TYPE_READ:
|
||||
filter = EVFILT_READ;
|
||||
break;
|
||||
case EVENT_TYPE_WRITE:
|
||||
filter = EVFILT_WRITE;
|
||||
break;
|
||||
default:
|
||||
filter = 0;
|
||||
}
|
||||
EV_SET(&ke, sock, filter, EV_ADD, 0, 0, NULL);
|
||||
if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
|
||||
wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
|
||||
__func__, sock, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
|
||||
|
||||
static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
|
||||
int sock, eloop_sock_handler handler,
|
||||
void *eloop_data, void *user_data)
|
||||
{
|
||||
#ifdef CONFIG_ELOOP_EPOLL
|
||||
struct eloop_sock *temp_table;
|
||||
struct epoll_event ev, *temp_events;
|
||||
int next;
|
||||
struct epoll_event *temp_events;
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
struct kevent *temp_events;
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
|
||||
struct eloop_sock *temp_table;
|
||||
int next;
|
||||
#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
|
||||
struct eloop_sock *tmp;
|
||||
int new_max_sock;
|
||||
|
||||
|
@ -208,18 +299,20 @@ static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
|
|||
eloop.pollfds = n;
|
||||
}
|
||||
#endif /* CONFIG_ELOOP_POLL */
|
||||
#ifdef CONFIG_ELOOP_EPOLL
|
||||
if (new_max_sock >= eloop.epoll_max_fd) {
|
||||
next = eloop.epoll_max_fd == 0 ? 16 : eloop.epoll_max_fd * 2;
|
||||
temp_table = os_realloc_array(eloop.epoll_table, next,
|
||||
#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
|
||||
if (new_max_sock >= eloop.max_fd) {
|
||||
next = eloop.max_fd == 0 ? 16 : eloop.max_fd * 2;
|
||||
temp_table = os_realloc_array(eloop.fd_table, next,
|
||||
sizeof(struct eloop_sock));
|
||||
if (temp_table == NULL)
|
||||
return -1;
|
||||
|
||||
eloop.epoll_max_fd = next;
|
||||
eloop.epoll_table = temp_table;
|
||||
eloop.max_fd = next;
|
||||
eloop.fd_table = temp_table;
|
||||
}
|
||||
#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
|
||||
|
||||
#ifdef CONFIG_ELOOP_EPOLL
|
||||
if (eloop.count + 1 > eloop.epoll_max_event_num) {
|
||||
next = eloop.epoll_max_event_num == 0 ? 8 :
|
||||
eloop.epoll_max_event_num * 2;
|
||||
|
@ -235,6 +328,22 @@ static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
|
|||
eloop.epoll_events = temp_events;
|
||||
}
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
if (eloop.count + 1 > eloop.kqueue_nevents) {
|
||||
next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
|
||||
temp_events = os_malloc(next * sizeof(*temp_events));
|
||||
if (!temp_events) {
|
||||
wpa_printf(MSG_ERROR,
|
||||
"%s: malloc for kqueue failed: %s",
|
||||
__func__, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
os_free(eloop.kqueue_events);
|
||||
eloop.kqueue_events = temp_events;
|
||||
eloop.kqueue_nevents = next;
|
||||
}
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
|
||||
eloop_trace_sock_remove_ref(table);
|
||||
tmp = os_realloc_array(table->table, table->count + 1,
|
||||
|
@ -256,33 +365,12 @@ static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
|
|||
table->changed = 1;
|
||||
eloop_trace_sock_add_ref(table);
|
||||
|
||||
#ifdef CONFIG_ELOOP_EPOLL
|
||||
os_memset(&ev, 0, sizeof(ev));
|
||||
switch (table->type) {
|
||||
case EVENT_TYPE_READ:
|
||||
ev.events = EPOLLIN;
|
||||
break;
|
||||
case EVENT_TYPE_WRITE:
|
||||
ev.events = EPOLLOUT;
|
||||
break;
|
||||
/*
|
||||
* Exceptions are always checked when using epoll, but I suppose it's
|
||||
* possible that someone registered a socket *only* for exception
|
||||
* handling.
|
||||
*/
|
||||
case EVENT_TYPE_EXCEPTION:
|
||||
ev.events = EPOLLERR | EPOLLHUP;
|
||||
break;
|
||||
}
|
||||
ev.data.fd = sock;
|
||||
if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
|
||||
wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d "
|
||||
"failed. %s\n", __func__, sock, strerror(errno));
|
||||
#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
|
||||
if (eloop_sock_queue(sock, table->type) < 0)
|
||||
return -1;
|
||||
}
|
||||
os_memcpy(&eloop.epoll_table[sock], &table->table[table->count - 1],
|
||||
os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
|
||||
sizeof(struct eloop_sock));
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -290,6 +378,9 @@ static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
|
|||
static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
|
||||
int sock)
|
||||
{
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
struct kevent ke;
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
int i;
|
||||
|
||||
if (table == NULL || table->table == NULL || table->count == 0)
|
||||
|
@ -317,8 +408,17 @@ static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
|
|||
"failed. %s\n", __func__, sock, strerror(errno));
|
||||
return;
|
||||
}
|
||||
os_memset(&eloop.epoll_table[sock], 0, sizeof(struct eloop_sock));
|
||||
os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
EV_SET(&ke, sock, 0, EV_DELETE, 0, 0, NULL);
|
||||
if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
|
||||
wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
|
||||
__func__, sock, strerror(errno));
|
||||
return;
|
||||
}
|
||||
os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
}
|
||||
|
||||
|
||||
|
@ -511,7 +611,7 @@ static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < nfds; i++) {
|
||||
table = &eloop.epoll_table[events[i].data.fd];
|
||||
table = &eloop.fd_table[events[i].data.fd];
|
||||
if (table->handler == NULL)
|
||||
continue;
|
||||
table->handler(table->sock, table->eloop_data,
|
||||
|
@ -525,9 +625,64 @@ static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
|
|||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
|
||||
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
|
||||
static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
|
||||
{
|
||||
struct eloop_sock *table;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nfds; i++) {
|
||||
table = &eloop.fd_table[events[i].ident];
|
||||
if (table->handler == NULL)
|
||||
continue;
|
||||
table->handler(table->sock, table->eloop_data,
|
||||
table->user_data);
|
||||
if (eloop.readers.changed ||
|
||||
eloop.writers.changed ||
|
||||
eloop.exceptions.changed)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int eloop_sock_table_requeue(struct eloop_sock_table *table)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
r = 0;
|
||||
for (i = 0; i < table->count && table->table; i++) {
|
||||
if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
|
||||
r = -1;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
|
||||
|
||||
int eloop_sock_requeue(void)
|
||||
{
|
||||
return 0;
|
||||
int r = 0;
|
||||
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
close(eloop.kqueuefd);
|
||||
eloop.kqueuefd = kqueue();
|
||||
if (eloop.kqueuefd < 0) {
|
||||
wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
|
||||
__func__, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (eloop_sock_table_requeue(&eloop.readers) < 0)
|
||||
r = -1;
|
||||
if (eloop_sock_table_requeue(&eloop.writers) < 0)
|
||||
r = -1;
|
||||
if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
|
||||
r = -1;
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
|
@ -911,6 +1066,9 @@ void eloop_run(void)
|
|||
#ifdef CONFIG_ELOOP_EPOLL
|
||||
int timeout_ms = -1;
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
struct timespec ts;
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
int res;
|
||||
struct os_reltime tv, now;
|
||||
|
||||
|
@ -955,6 +1113,10 @@ void eloop_run(void)
|
|||
_tv.tv_sec = tv.sec;
|
||||
_tv.tv_usec = tv.usec;
|
||||
#endif /* CONFIG_ELOOP_SELECT */
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
ts.tv_sec = tv.sec;
|
||||
ts.tv_nsec = tv.usec * 1000L;
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ELOOP_POLL
|
||||
|
@ -980,6 +1142,15 @@ void eloop_run(void)
|
|||
eloop.count, timeout_ms);
|
||||
}
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
if (eloop.count == 0) {
|
||||
res = 0;
|
||||
} else {
|
||||
res = kevent(eloop.kqueuefd, NULL, 0,
|
||||
eloop.kqueue_events, eloop.kqueue_nevents,
|
||||
timeout ? &ts : NULL);
|
||||
}
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
if (res < 0 && errno != EINTR && errno != 0) {
|
||||
wpa_printf(MSG_ERROR, "eloop: %s: %s",
|
||||
#ifdef CONFIG_ELOOP_POLL
|
||||
|
@ -991,6 +1162,10 @@ void eloop_run(void)
|
|||
#ifdef CONFIG_ELOOP_EPOLL
|
||||
"epoll"
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
"kqueue"
|
||||
#endif /* CONFIG_ELOOP_EKQUEUE */
|
||||
|
||||
, strerror(errno));
|
||||
goto out;
|
||||
}
|
||||
|
@ -1001,6 +1176,7 @@ void eloop_run(void)
|
|||
|
||||
eloop_process_pending_signals();
|
||||
|
||||
|
||||
/* check if some registered timeouts have occurred */
|
||||
timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
|
||||
list);
|
||||
|
@ -1046,6 +1222,9 @@ void eloop_run(void)
|
|||
#ifdef CONFIG_ELOOP_EPOLL
|
||||
eloop_sock_table_dispatch(eloop.epoll_events, res);
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
eloop_sock_table_dispatch(eloop.kqueue_events, res);
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
}
|
||||
|
||||
eloop.terminate = 0;
|
||||
|
@ -1098,11 +1277,17 @@ void eloop_destroy(void)
|
|||
os_free(eloop.pollfds);
|
||||
os_free(eloop.pollfds_map);
|
||||
#endif /* CONFIG_ELOOP_POLL */
|
||||
#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
|
||||
os_free(eloop.fd_table);
|
||||
#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
|
||||
#ifdef CONFIG_ELOOP_EPOLL
|
||||
os_free(eloop.epoll_table);
|
||||
os_free(eloop.epoll_events);
|
||||
close(eloop.epollfd);
|
||||
#endif /* CONFIG_ELOOP_EPOLL */
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
os_free(eloop.kqueue_events);
|
||||
close(eloop.kqueuefd);
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
}
|
||||
|
||||
|
||||
|
@ -1141,6 +1326,17 @@ void eloop_wait_for_read_sock(int sock)
|
|||
FD_SET(sock, &rfds);
|
||||
select(sock + 1, &rfds, NULL, NULL, NULL);
|
||||
#endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
|
||||
#ifdef CONFIG_ELOOP_KQUEUE
|
||||
int kfd;
|
||||
struct kevent ke1, ke2;
|
||||
|
||||
kfd = kqueue();
|
||||
if (kfd == -1)
|
||||
return;
|
||||
EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, NULL);
|
||||
kevent(kfd, &ke1, 1, &ke2, 1, NULL);
|
||||
close(kfd);
|
||||
#endif /* CONFIG_ELOOP_KQUEUE */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ELOOP_SELECT
|
||||
|
|
|
@ -170,6 +170,10 @@ ifdef CONFIG_ELOOP_EPOLL
|
|||
CFLAGS += -DCONFIG_ELOOP_EPOLL
|
||||
endif
|
||||
|
||||
ifdef CONFIG_ELOOP_KQUEUE
|
||||
CFLAGS += -DCONFIG_ELOOP_KQUEUE
|
||||
endif
|
||||
|
||||
ifdef CONFIG_EAPOL_TEST
|
||||
CFLAGS += -Werror -DEAPOL_TEST
|
||||
endif
|
||||
|
|
Loading…
Reference in a new issue