2019-12-12 10:05:48 +01:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2011-2014 Felix Fietkau <nbd@openwrt.org>
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: LGPL-2.1-only
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/stat.h>
|
2020-12-04 10:53:11 +01:00
|
|
|
#include <sys/types.h>
|
2019-12-12 10:05:48 +01:00
|
|
|
#ifdef FreeBSD
|
|
|
|
#include <sys/param.h>
|
|
|
|
#endif
|
2020-12-04 10:53:11 +01:00
|
|
|
#include <string.h>
|
2019-12-12 10:05:48 +01:00
|
|
|
#include <syslog.h>
|
|
|
|
|
|
|
|
#include <libubox/usock.h>
|
|
|
|
|
|
|
|
#include "ubusd.h"
|
|
|
|
|
|
|
|
static void handle_client_disconnect(struct ubus_client *cl)
|
|
|
|
{
|
ubusd: convert tx_queue to linked list
ubusd maintains a per-client tx_queue containing references to message
buffers that have not been sent yet (due to the socket blocking). This
is a fixed-size, 64-element queue.
When more than 64 elements are queued, subsequent elements are simply
dropped. Thus, a client that is waiting for those messages will block
indefinitely. In particular, this happens when more than +- 250 objects
are registered on the bus and either "ubus list" or "ubus wait_for" is
called. The responses to these requests consist of a message buffer per
object. Since in practice, ubusd will not yield between the sends of
these message buffers, the client has no time to process them and
eventually the output socket blocks. After 64 more objects, the rest is
dropped, including the final message that indicates termination. Thus,
the client waits indefinitely for the termination message.
To solve this, turn the tx_queue into a variable-sized linked list
instead of a fixed-size queue.
To maintain the linked list, an additional structure ubus_msg_buf_list
is created. It is not possible to add the linked list to ubus_msg_buf,
because that is shared between clients.
Note that this infinite tx_queue opens the door to a DoS attack. You can
open a client and a server connection, then send messages from the
client to the server without ever reading anything on the server side.
This will eventually lead to an out-of-memory. However, such a DoS
already existed anyway, it just requires opening multiple server
connections and filling up the fixed-size queue on each one. To protect
against such DoS attacks, we'd need to:
- keep a global maximum queue size that applies to all rx and tx queues
together;
- stop reading from any connection when the maximum is reached;
- close any connection when it hasn't become writeable after some
timeout.
Fixes: https://bugs.openwrt.org/index.php?do=details&task_id=1525
Signed-off-by: Arnout Vandecappelle (Essensium/Mind) <arnout@mind.be>
2021-03-25 22:45:01 +01:00
|
|
|
struct ubus_msg_buf_list *ubl, *ubl2;
|
|
|
|
list_for_each_entry_safe(ubl, ubl2, &cl->tx_queue, list)
|
|
|
|
ubus_msg_list_free(ubl);
|
2019-12-12 10:05:48 +01:00
|
|
|
|
|
|
|
ubusd_monitor_disconnect(cl);
|
|
|
|
ubusd_proto_free_client(cl);
|
|
|
|
if (cl->pending_msg_fd >= 0)
|
|
|
|
close(cl->pending_msg_fd);
|
|
|
|
uloop_fd_delete(&cl->sock);
|
|
|
|
close(cl->sock.fd);
|
|
|
|
free(cl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void client_cb(struct uloop_fd *sock, unsigned int events)
|
|
|
|
{
|
|
|
|
struct ubus_client *cl = container_of(sock, struct ubus_client, sock);
|
2019-12-27 14:48:32 +01:00
|
|
|
uint8_t fd_buf[CMSG_SPACE(sizeof(int))] = { 0 };
|
|
|
|
struct msghdr msghdr = { 0 };
|
2019-12-12 10:05:48 +01:00
|
|
|
struct ubus_msg_buf *ub;
|
ubusd: convert tx_queue to linked list
ubusd maintains a per-client tx_queue containing references to message
buffers that have not been sent yet (due to the socket blocking). This
is a fixed-size, 64-element queue.
When more than 64 elements are queued, subsequent elements are simply
dropped. Thus, a client that is waiting for those messages will block
indefinitely. In particular, this happens when more than +- 250 objects
are registered on the bus and either "ubus list" or "ubus wait_for" is
called. The responses to these requests consist of a message buffer per
object. Since in practice, ubusd will not yield between the sends of
these message buffers, the client has no time to process them and
eventually the output socket blocks. After 64 more objects, the rest is
dropped, including the final message that indicates termination. Thus,
the client waits indefinitely for the termination message.
To solve this, turn the tx_queue into a variable-sized linked list
instead of a fixed-size queue.
To maintain the linked list, an additional structure ubus_msg_buf_list
is created. It is not possible to add the linked list to ubus_msg_buf,
because that is shared between clients.
Note that this infinite tx_queue opens the door to a DoS attack. You can
open a client and a server connection, then send messages from the
client to the server without ever reading anything on the server side.
This will eventually lead to an out-of-memory. However, such a DoS
already existed anyway, it just requires opening multiple server
connections and filling up the fixed-size queue on each one. To protect
against such DoS attacks, we'd need to:
- keep a global maximum queue size that applies to all rx and tx queues
together;
- stop reading from any connection when the maximum is reached;
- close any connection when it hasn't become writeable after some
timeout.
Fixes: https://bugs.openwrt.org/index.php?do=details&task_id=1525
Signed-off-by: Arnout Vandecappelle (Essensium/Mind) <arnout@mind.be>
2021-03-25 22:45:01 +01:00
|
|
|
struct ubus_msg_buf_list *ubl, *ubl2;
|
2019-12-12 10:05:48 +01:00
|
|
|
static struct iovec iov;
|
2019-12-27 14:48:32 +01:00
|
|
|
struct cmsghdr *cmsg;
|
|
|
|
int *pfd;
|
|
|
|
|
|
|
|
msghdr.msg_iov = &iov,
|
|
|
|
msghdr.msg_iovlen = 1,
|
|
|
|
msghdr.msg_control = fd_buf;
|
|
|
|
msghdr.msg_controllen = sizeof(fd_buf);
|
|
|
|
|
|
|
|
cmsg = CMSG_FIRSTHDR(&msghdr);
|
|
|
|
cmsg->cmsg_type = SCM_RIGHTS;
|
|
|
|
cmsg->cmsg_level = SOL_SOCKET;
|
|
|
|
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
|
|
|
|
|
|
|
|
pfd = (int *) CMSG_DATA(cmsg);
|
|
|
|
msghdr.msg_controllen = cmsg->cmsg_len;
|
2019-12-12 10:05:48 +01:00
|
|
|
|
|
|
|
/* first try to tx more pending data */
|
ubusd: convert tx_queue to linked list
ubusd maintains a per-client tx_queue containing references to message
buffers that have not been sent yet (due to the socket blocking). This
is a fixed-size, 64-element queue.
When more than 64 elements are queued, subsequent elements are simply
dropped. Thus, a client that is waiting for those messages will block
indefinitely. In particular, this happens when more than +- 250 objects
are registered on the bus and either "ubus list" or "ubus wait_for" is
called. The responses to these requests consist of a message buffer per
object. Since in practice, ubusd will not yield between the sends of
these message buffers, the client has no time to process them and
eventually the output socket blocks. After 64 more objects, the rest is
dropped, including the final message that indicates termination. Thus,
the client waits indefinitely for the termination message.
To solve this, turn the tx_queue into a variable-sized linked list
instead of a fixed-size queue.
To maintain the linked list, an additional structure ubus_msg_buf_list
is created. It is not possible to add the linked list to ubus_msg_buf,
because that is shared between clients.
Note that this infinite tx_queue opens the door to a DoS attack. You can
open a client and a server connection, then send messages from the
client to the server without ever reading anything on the server side.
This will eventually lead to an out-of-memory. However, such a DoS
already existed anyway, it just requires opening multiple server
connections and filling up the fixed-size queue on each one. To protect
against such DoS attacks, we'd need to:
- keep a global maximum queue size that applies to all rx and tx queues
together;
- stop reading from any connection when the maximum is reached;
- close any connection when it hasn't become writeable after some
timeout.
Fixes: https://bugs.openwrt.org/index.php?do=details&task_id=1525
Signed-off-by: Arnout Vandecappelle (Essensium/Mind) <arnout@mind.be>
2021-03-25 22:45:01 +01:00
|
|
|
list_for_each_entry_safe(ubl, ubl2, &cl->tx_queue, list) {
|
2019-12-12 10:05:48 +01:00
|
|
|
ssize_t written;
|
|
|
|
|
ubusd: convert tx_queue to linked list
ubusd maintains a per-client tx_queue containing references to message
buffers that have not been sent yet (due to the socket blocking). This
is a fixed-size, 64-element queue.
When more than 64 elements are queued, subsequent elements are simply
dropped. Thus, a client that is waiting for those messages will block
indefinitely. In particular, this happens when more than +- 250 objects
are registered on the bus and either "ubus list" or "ubus wait_for" is
called. The responses to these requests consist of a message buffer per
object. Since in practice, ubusd will not yield between the sends of
these message buffers, the client has no time to process them and
eventually the output socket blocks. After 64 more objects, the rest is
dropped, including the final message that indicates termination. Thus,
the client waits indefinitely for the termination message.
To solve this, turn the tx_queue into a variable-sized linked list
instead of a fixed-size queue.
To maintain the linked list, an additional structure ubus_msg_buf_list
is created. It is not possible to add the linked list to ubus_msg_buf,
because that is shared between clients.
Note that this infinite tx_queue opens the door to a DoS attack. You can
open a client and a server connection, then send messages from the
client to the server without ever reading anything on the server side.
This will eventually lead to an out-of-memory. However, such a DoS
already existed anyway, it just requires opening multiple server
connections and filling up the fixed-size queue on each one. To protect
against such DoS attacks, we'd need to:
- keep a global maximum queue size that applies to all rx and tx queues
together;
- stop reading from any connection when the maximum is reached;
- close any connection when it hasn't become writeable after some
timeout.
Fixes: https://bugs.openwrt.org/index.php?do=details&task_id=1525
Signed-off-by: Arnout Vandecappelle (Essensium/Mind) <arnout@mind.be>
2021-03-25 22:45:01 +01:00
|
|
|
ub = ubl->msg;
|
2019-12-12 10:05:48 +01:00
|
|
|
written = ubus_msg_writev(sock->fd, ub, cl->txq_ofs);
|
|
|
|
if (written < 0) {
|
|
|
|
switch(errno) {
|
|
|
|
case EINTR:
|
|
|
|
case EAGAIN:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto disconnect;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
cl->txq_ofs += written;
|
2021-03-25 22:45:02 +01:00
|
|
|
cl->txq_len -= written;
|
2019-12-12 10:05:48 +01:00
|
|
|
if (cl->txq_ofs < ub->len + sizeof(ub->hdr))
|
|
|
|
break;
|
|
|
|
|
ubusd: convert tx_queue to linked list
ubusd maintains a per-client tx_queue containing references to message
buffers that have not been sent yet (due to the socket blocking). This
is a fixed-size, 64-element queue.
When more than 64 elements are queued, subsequent elements are simply
dropped. Thus, a client that is waiting for those messages will block
indefinitely. In particular, this happens when more than +- 250 objects
are registered on the bus and either "ubus list" or "ubus wait_for" is
called. The responses to these requests consist of a message buffer per
object. Since in practice, ubusd will not yield between the sends of
these message buffers, the client has no time to process them and
eventually the output socket blocks. After 64 more objects, the rest is
dropped, including the final message that indicates termination. Thus,
the client waits indefinitely for the termination message.
To solve this, turn the tx_queue into a variable-sized linked list
instead of a fixed-size queue.
To maintain the linked list, an additional structure ubus_msg_buf_list
is created. It is not possible to add the linked list to ubus_msg_buf,
because that is shared between clients.
Note that this infinite tx_queue opens the door to a DoS attack. You can
open a client and a server connection, then send messages from the
client to the server without ever reading anything on the server side.
This will eventually lead to an out-of-memory. However, such a DoS
already existed anyway, it just requires opening multiple server
connections and filling up the fixed-size queue on each one. To protect
against such DoS attacks, we'd need to:
- keep a global maximum queue size that applies to all rx and tx queues
together;
- stop reading from any connection when the maximum is reached;
- close any connection when it hasn't become writeable after some
timeout.
Fixes: https://bugs.openwrt.org/index.php?do=details&task_id=1525
Signed-off-by: Arnout Vandecappelle (Essensium/Mind) <arnout@mind.be>
2021-03-25 22:45:01 +01:00
|
|
|
ubus_msg_list_free(ubl);
|
2019-12-12 10:05:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* prevent further ULOOP_WRITE events if we don't have data
|
|
|
|
* to send anymore */
|
ubusd: convert tx_queue to linked list
ubusd maintains a per-client tx_queue containing references to message
buffers that have not been sent yet (due to the socket blocking). This
is a fixed-size, 64-element queue.
When more than 64 elements are queued, subsequent elements are simply
dropped. Thus, a client that is waiting for those messages will block
indefinitely. In particular, this happens when more than +- 250 objects
are registered on the bus and either "ubus list" or "ubus wait_for" is
called. The responses to these requests consist of a message buffer per
object. Since in practice, ubusd will not yield between the sends of
these message buffers, the client has no time to process them and
eventually the output socket blocks. After 64 more objects, the rest is
dropped, including the final message that indicates termination. Thus,
the client waits indefinitely for the termination message.
To solve this, turn the tx_queue into a variable-sized linked list
instead of a fixed-size queue.
To maintain the linked list, an additional structure ubus_msg_buf_list
is created. It is not possible to add the linked list to ubus_msg_buf,
because that is shared between clients.
Note that this infinite tx_queue opens the door to a DoS attack. You can
open a client and a server connection, then send messages from the
client to the server without ever reading anything on the server side.
This will eventually lead to an out-of-memory. However, such a DoS
already existed anyway, it just requires opening multiple server
connections and filling up the fixed-size queue on each one. To protect
against such DoS attacks, we'd need to:
- keep a global maximum queue size that applies to all rx and tx queues
together;
- stop reading from any connection when the maximum is reached;
- close any connection when it hasn't become writeable after some
timeout.
Fixes: https://bugs.openwrt.org/index.php?do=details&task_id=1525
Signed-off-by: Arnout Vandecappelle (Essensium/Mind) <arnout@mind.be>
2021-03-25 22:45:01 +01:00
|
|
|
if (list_empty(&cl->tx_queue) && (events & ULOOP_WRITE))
|
2019-12-12 10:05:48 +01:00
|
|
|
uloop_fd_add(sock, ULOOP_READ | ULOOP_EDGE_TRIGGER);
|
|
|
|
|
|
|
|
retry:
|
|
|
|
if (!sock->eof && cl->pending_msg_offset < (int) sizeof(cl->hdrbuf)) {
|
|
|
|
int offset = cl->pending_msg_offset;
|
|
|
|
int bytes;
|
|
|
|
|
2019-12-27 14:48:32 +01:00
|
|
|
*pfd = -1;
|
2019-12-12 10:05:48 +01:00
|
|
|
|
|
|
|
iov.iov_base = ((char *) &cl->hdrbuf) + offset;
|
|
|
|
iov.iov_len = sizeof(cl->hdrbuf) - offset;
|
|
|
|
|
|
|
|
if (cl->pending_msg_fd < 0) {
|
2019-12-27 14:48:32 +01:00
|
|
|
msghdr.msg_control = fd_buf;
|
|
|
|
msghdr.msg_controllen = cmsg->cmsg_len;
|
2019-12-12 10:05:48 +01:00
|
|
|
} else {
|
|
|
|
msghdr.msg_control = NULL;
|
|
|
|
msghdr.msg_controllen = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bytes = recvmsg(sock->fd, &msghdr, 0);
|
|
|
|
if (bytes < 0)
|
|
|
|
goto out;
|
|
|
|
|
2019-12-27 14:48:32 +01:00
|
|
|
if (*pfd >= 0)
|
|
|
|
cl->pending_msg_fd = *pfd;
|
2019-12-12 10:05:48 +01:00
|
|
|
|
|
|
|
cl->pending_msg_offset += bytes;
|
|
|
|
if (cl->pending_msg_offset < (int) sizeof(cl->hdrbuf))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (blob_pad_len(&cl->hdrbuf.data) > UBUS_MAX_MSGLEN)
|
|
|
|
goto disconnect;
|
|
|
|
|
|
|
|
cl->pending_msg = ubus_msg_new(NULL, blob_raw_len(&cl->hdrbuf.data), false);
|
|
|
|
if (!cl->pending_msg)
|
|
|
|
goto disconnect;
|
|
|
|
|
|
|
|
cl->hdrbuf.hdr.seq = be16_to_cpu(cl->hdrbuf.hdr.seq);
|
|
|
|
cl->hdrbuf.hdr.peer = be32_to_cpu(cl->hdrbuf.hdr.peer);
|
|
|
|
|
|
|
|
memcpy(&cl->pending_msg->hdr, &cl->hdrbuf.hdr, sizeof(cl->hdrbuf.hdr));
|
|
|
|
memcpy(cl->pending_msg->data, &cl->hdrbuf.data, sizeof(cl->hdrbuf.data));
|
|
|
|
}
|
|
|
|
|
|
|
|
ub = cl->pending_msg;
|
|
|
|
if (ub) {
|
|
|
|
int offset = cl->pending_msg_offset - sizeof(ub->hdr);
|
|
|
|
int len = blob_raw_len(ub->data) - offset;
|
|
|
|
int bytes = 0;
|
|
|
|
|
|
|
|
if (len > 0) {
|
|
|
|
bytes = read(sock->fd, (char *) ub->data + offset, len);
|
|
|
|
if (bytes <= 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bytes < len) {
|
|
|
|
cl->pending_msg_offset += bytes;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* accept message */
|
|
|
|
ub->fd = cl->pending_msg_fd;
|
|
|
|
cl->pending_msg_fd = -1;
|
|
|
|
cl->pending_msg_offset = 0;
|
|
|
|
cl->pending_msg = NULL;
|
|
|
|
ubusd_monitor_message(cl, ub, false);
|
|
|
|
ubusd_proto_receive_message(cl, ub);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
ubusd: convert tx_queue to linked list
ubusd maintains a per-client tx_queue containing references to message
buffers that have not been sent yet (due to the socket blocking). This
is a fixed-size, 64-element queue.
When more than 64 elements are queued, subsequent elements are simply
dropped. Thus, a client that is waiting for those messages will block
indefinitely. In particular, this happens when more than +- 250 objects
are registered on the bus and either "ubus list" or "ubus wait_for" is
called. The responses to these requests consist of a message buffer per
object. Since in practice, ubusd will not yield between the sends of
these message buffers, the client has no time to process them and
eventually the output socket blocks. After 64 more objects, the rest is
dropped, including the final message that indicates termination. Thus,
the client waits indefinitely for the termination message.
To solve this, turn the tx_queue into a variable-sized linked list
instead of a fixed-size queue.
To maintain the linked list, an additional structure ubus_msg_buf_list
is created. It is not possible to add the linked list to ubus_msg_buf,
because that is shared between clients.
Note that this infinite tx_queue opens the door to a DoS attack. You can
open a client and a server connection, then send messages from the
client to the server without ever reading anything on the server side.
This will eventually lead to an out-of-memory. However, such a DoS
already existed anyway, it just requires opening multiple server
connections and filling up the fixed-size queue on each one. To protect
against such DoS attacks, we'd need to:
- keep a global maximum queue size that applies to all rx and tx queues
together;
- stop reading from any connection when the maximum is reached;
- close any connection when it hasn't become writeable after some
timeout.
Fixes: https://bugs.openwrt.org/index.php?do=details&task_id=1525
Signed-off-by: Arnout Vandecappelle (Essensium/Mind) <arnout@mind.be>
2021-03-25 22:45:01 +01:00
|
|
|
if (!sock->eof || !list_empty(&cl->tx_queue))
|
2019-12-12 10:05:48 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
disconnect:
|
|
|
|
handle_client_disconnect(cl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool get_next_connection(int fd)
|
|
|
|
{
|
|
|
|
struct ubus_client *cl;
|
|
|
|
int client_fd;
|
|
|
|
|
|
|
|
client_fd = accept(fd, NULL, 0);
|
|
|
|
if (client_fd < 0) {
|
|
|
|
switch (errno) {
|
|
|
|
case ECONNABORTED:
|
|
|
|
case EINTR:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cl = ubusd_proto_new_client(client_fd, client_cb);
|
|
|
|
if (cl)
|
|
|
|
uloop_fd_add(&cl->sock, ULOOP_READ | ULOOP_EDGE_TRIGGER);
|
|
|
|
else
|
|
|
|
close(client_fd);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void server_cb(struct uloop_fd *fd, unsigned int events)
|
|
|
|
{
|
|
|
|
bool next;
|
|
|
|
|
|
|
|
do {
|
|
|
|
next = get_next_connection(fd->fd);
|
|
|
|
} while (next);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct uloop_fd server_fd = {
|
|
|
|
.cb = server_cb,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int usage(const char *progname)
|
|
|
|
{
|
|
|
|
fprintf(stderr, "Usage: %s [<options>]\n"
|
|
|
|
"Options: \n"
|
|
|
|
" -A <path>: Set the path to ACL files\n"
|
|
|
|
" -s <socket>: Set the unix domain socket to listen on\n"
|
|
|
|
"\n", progname);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sighup_handler(int sig)
|
|
|
|
{
|
|
|
|
ubusd_acl_load();
|
|
|
|
}
|
|
|
|
|
2020-12-04 10:53:11 +01:00
|
|
|
static void mkdir_sockdir()
|
|
|
|
{
|
|
|
|
char *ubus_sock_dir, *tmp;
|
|
|
|
|
|
|
|
ubus_sock_dir = strdup(UBUS_UNIX_SOCKET);
|
|
|
|
tmp = strrchr(ubus_sock_dir, '/');
|
|
|
|
if (tmp) {
|
|
|
|
*tmp = '\0';
|
|
|
|
mkdir(ubus_sock_dir, 0755);
|
|
|
|
}
|
|
|
|
free(ubus_sock_dir);
|
|
|
|
}
|
|
|
|
|
2019-12-12 10:05:48 +01:00
|
|
|
int main(int argc, char **argv)
|
|
|
|
{
|
|
|
|
const char *ubus_socket = UBUS_UNIX_SOCKET;
|
|
|
|
int ret = 0;
|
|
|
|
int ch;
|
|
|
|
|
|
|
|
signal(SIGPIPE, SIG_IGN);
|
|
|
|
signal(SIGHUP, sighup_handler);
|
|
|
|
|
|
|
|
openlog("ubusd", LOG_PID, LOG_DAEMON);
|
|
|
|
uloop_init();
|
|
|
|
|
|
|
|
while ((ch = getopt(argc, argv, "A:s:")) != -1) {
|
|
|
|
switch (ch) {
|
|
|
|
case 's':
|
|
|
|
ubus_socket = optarg;
|
|
|
|
break;
|
|
|
|
case 'A':
|
|
|
|
ubusd_acl_dir = optarg;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return usage(argv[0]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-04 10:53:11 +01:00
|
|
|
mkdir_sockdir();
|
2019-12-12 10:05:48 +01:00
|
|
|
unlink(ubus_socket);
|
|
|
|
umask(0111);
|
|
|
|
server_fd.fd = usock(USOCK_UNIX | USOCK_SERVER | USOCK_NONBLOCK, ubus_socket, NULL);
|
|
|
|
if (server_fd.fd < 0) {
|
|
|
|
perror("usock");
|
|
|
|
ret = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
uloop_fd_add(&server_fd, ULOOP_READ | ULOOP_EDGE_TRIGGER);
|
|
|
|
ubusd_acl_load();
|
|
|
|
|
|
|
|
uloop_run();
|
|
|
|
unlink(ubus_socket);
|
|
|
|
|
|
|
|
out:
|
|
|
|
uloop_done();
|
|
|
|
return ret;
|
|
|
|
}
|