uloop: fix corner cases with recursive uloop_run calls
With multiple recursive calls to uloop_run, the callback for the same fd can be run multiple times from different levels in the stack. Prevent this by tracking the stack of uloop_fd callbacks and buffering new incoming events for fds already on the stack. Signed-off-by: Felix Fietkau <nbd@openwrt.org>
This commit is contained in:
parent
35cee2c206
commit
b9ebdbcc64
2 changed files with 58 additions and 2 deletions
53
uloop.c
53
uloop.c
|
@ -43,6 +43,14 @@ struct uloop_fd_event {
|
|||
unsigned int events;
|
||||
};
|
||||
|
||||
struct uloop_fd_stack {
|
||||
struct uloop_fd_stack *next;
|
||||
struct uloop_fd *fd;
|
||||
unsigned int events;
|
||||
};
|
||||
|
||||
static struct uloop_fd_stack *fd_stack = NULL;
|
||||
|
||||
#define ULOOP_MAX_EVENTS 10
|
||||
|
||||
static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
|
||||
|
@ -285,6 +293,32 @@ static int uloop_fetch_events(int timeout)
|
|||
|
||||
#endif
|
||||
|
||||
static bool uloop_fd_stack_event(struct uloop_fd *fd, int events)
|
||||
{
|
||||
struct uloop_fd_stack *cur;
|
||||
|
||||
/*
|
||||
* Do not buffer events for level-triggered fds, they will keep firing.
|
||||
* Caller needs to take care of recursion issues.
|
||||
*/
|
||||
if (!(fd->flags & ULOOP_EDGE_TRIGGER))
|
||||
return false;
|
||||
|
||||
for (cur = fd_stack; cur; cur = cur->next) {
|
||||
if (cur->fd != fd)
|
||||
continue;
|
||||
|
||||
if (events < 0)
|
||||
cur->fd = NULL;
|
||||
else
|
||||
cur->events |= events | ULOOP_EVENT_BUFFERED;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void uloop_run_events(int timeout)
|
||||
{
|
||||
struct uloop_fd_event *cur;
|
||||
|
@ -298,17 +332,33 @@ static void uloop_run_events(int timeout)
|
|||
}
|
||||
|
||||
while (cur_nfds > 0) {
|
||||
struct uloop_fd_stack stack_cur;
|
||||
unsigned int events;
|
||||
|
||||
cur = &cur_fds[cur_fd++];
|
||||
cur_nfds--;
|
||||
|
||||
fd = cur->fd;
|
||||
events = cur->events;
|
||||
if (!fd)
|
||||
continue;
|
||||
|
||||
if (!fd->cb)
|
||||
continue;
|
||||
|
||||
fd->cb(fd, cur->events);
|
||||
if (uloop_fd_stack_event(fd, cur->events))
|
||||
continue;
|
||||
|
||||
stack_cur.next = fd_stack;
|
||||
stack_cur.fd = fd;
|
||||
fd_stack = &stack_cur;
|
||||
do {
|
||||
stack_cur.events = 0;
|
||||
fd->cb(fd, events);
|
||||
events = stack_cur.events & ULOOP_EVENT_MASK;
|
||||
} while (stack_cur.fd && events);
|
||||
fd_stack = stack_cur.next;
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -352,6 +402,7 @@ int uloop_fd_delete(struct uloop_fd *fd)
|
|||
cur_fds[cur_fd + i].fd = NULL;
|
||||
}
|
||||
fd->registered = false;
|
||||
uloop_fd_stack_event(fd, -1);
|
||||
return __uloop_fd_delete(fd);
|
||||
}
|
||||
|
||||
|
|
7
uloop.h
7
uloop.h
|
@ -44,8 +44,13 @@ typedef void (*uloop_process_handler)(struct uloop_process *c, int ret);
|
|||
#define ULOOP_WRITE (1 << 1)
|
||||
#define ULOOP_EDGE_TRIGGER (1 << 2)
|
||||
#define ULOOP_BLOCKING (1 << 3)
|
||||
|
||||
#define ULOOP_EVENT_MASK (ULOOP_READ | ULOOP_WRITE)
|
||||
|
||||
/* internal flags */
|
||||
#define ULOOP_EVENT_BUFFERED (1 << 4)
|
||||
#ifdef USE_KQUEUE
|
||||
#define ULOOP_EDGE_DEFER (1 << 4)
|
||||
#define ULOOP_EDGE_DEFER (1 << 5)
|
||||
#endif
|
||||
|
||||
struct uloop_fd
|
||||
|
|
Loading…
Reference in a new issue