Unverified Commit 35a68d6d by Stéphane Graber Committed by GitHub

Merge pull request #3681 from brauner/2021-02-18/cgroups

cgroups: fixes & bpf rework
parents 599a0c6c ad755295
......@@ -385,10 +385,6 @@ AM_COND_IF([ENABLE_CAP],
AC_CHECK_LIB(cap,cap_get_file, AC_DEFINE(LIBCAP_SUPPORTS_FILE_CAPABILITIES,1,[Have cap_get_file]),[],[])
AC_SUBST([CAP_LIBS], [-lcap])])
AC_CHECK_HEADERS([linux/bpf.h], [
AC_CHECK_TYPES([struct bpf_cgroup_dev_ctx], [], [], [[#include <linux/bpf.h>]])
], [], [])
# Configuration examples
AC_ARG_ENABLE([examples],
[AS_HELP_STRING([--enable-examples], [install examples [default=yes]])],
......
This source diff could not be displayed because it is too large. You can view the blob instead.
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI__LINUX_BPF_COMMON_H__
#define _UAPI__LINUX_BPF_COMMON_H__
/* Instruction classes */
#define BPF_CLASS(code) ((code) & 0x07)
#define BPF_LD 0x00
#define BPF_LDX 0x01
#define BPF_ST 0x02
#define BPF_STX 0x03
#define BPF_ALU 0x04
#define BPF_JMP 0x05
#define BPF_RET 0x06
#define BPF_MISC 0x07
/* ld/ldx fields */
#define BPF_SIZE(code) ((code) & 0x18)
#define BPF_W 0x00 /* 32-bit */
#define BPF_H 0x08 /* 16-bit */
#define BPF_B 0x10 /* 8-bit */
/* eBPF BPF_DW 0x18 64-bit */
#define BPF_MODE(code) ((code) & 0xe0)
#define BPF_IMM 0x00
#define BPF_ABS 0x20
#define BPF_IND 0x40
#define BPF_MEM 0x60
#define BPF_LEN 0x80
#define BPF_MSH 0xa0
/* alu/jmp fields */
#define BPF_OP(code) ((code) & 0xf0)
#define BPF_ADD 0x00
#define BPF_SUB 0x10
#define BPF_MUL 0x20
#define BPF_DIV 0x30
#define BPF_OR 0x40
#define BPF_AND 0x50
#define BPF_LSH 0x60
#define BPF_RSH 0x70
#define BPF_NEG 0x80
#define BPF_MOD 0x90
#define BPF_XOR 0xa0
#define BPF_JA 0x00
#define BPF_JEQ 0x10
#define BPF_JGT 0x20
#define BPF_JGE 0x30
#define BPF_JSET 0x40
#define BPF_SRC(code) ((code) & 0x08)
#define BPF_K 0x00
#define BPF_X 0x08
#ifndef BPF_MAXINSNS
#define BPF_MAXINSNS 4096
#endif
#endif /* _UAPI__LINUX_BPF_COMMON_H__ */
......@@ -6,6 +6,8 @@ pkginclude_HEADERS = attach_options.h \
noinst_HEADERS = api_extensions.h \
attach.h \
../include/bpf.h \
../include/bpf_common.h \
caps.h \
cgroups/cgroup.h \
cgroups/cgroup_utils.h \
......@@ -99,6 +101,8 @@ lib_LTLIBRARIES = liblxc.la
liblxc_la_SOURCES = af_unix.c af_unix.h \
api_extensions.h \
attach.c attach.h \
../include/bpf.h \
../include/bpf_common.h \
caps.c caps.h \
cgroups/cgfsng.c \
cgroups/cgroup.c cgroups/cgroup.h \
......
......@@ -34,9 +34,7 @@ static char *api_extensions[] = {
"network_gateway_device_route",
"network_phys_macvlan_mtu",
"network_veth_router",
#ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
"cgroup2_devices",
#endif
"cgroup2",
"pidfd",
"cgroup_advanced_isolation",
......
......@@ -797,9 +797,7 @@ static int cgroup_tree_remove(struct hierarchy **hierarchies, const char *path_p
else
TRACE("Removed cgroup tree %d(%s)", h->dfd_base, path_prune);
if (h->container_limit_path != h->container_full_path)
free_disarm(h->container_limit_path);
free_disarm(h->container_full_path);
free_equal(h->container_limit_path, h->container_full_path);
}
return 0;
......@@ -864,11 +862,9 @@ __cgfsng_ops static void cgfsng_payload_destroy(struct cgroup_ops *ops,
return;
}
#ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
ret = bpf_program_cgroup_detach(handler->cgroup_ops->cgroup2_devices);
if (ret < 0)
WARN("Failed to detach bpf program from cgroup");
#endif
if (!lxc_list_empty(&handler->conf->id_map)) {
struct generic_userns_exec_data wrap = {
......@@ -1050,11 +1046,11 @@ static int __cgroup_tree_create(int dfd_base, const char *path, mode_t mode,
char buf[PATH_MAX];
if (is_empty_string(path))
return ret_errno(-EINVAL);
return ret_errno(EINVAL);
len = strlcpy(buf, path, sizeof(buf));
if (len >= sizeof(buf))
return -E2BIG;
return ret_errno(E2BIG);
lxc_iterate_parts(cur, buf, "/") {
/*
......@@ -1172,7 +1168,6 @@ static bool cgroup_tree_create(struct cgroup_ops *ops, struct lxc_conf *conf,
h->container_limit_path = h->container_full_path;
} else {
h->cgfd_mon = move_fd(fd_final);
h->monitor_full_path = move_ptr(path);
}
return true;
......@@ -1188,18 +1183,13 @@ static void cgroup_tree_prune_leaf(struct hierarchy *h, const char *path_prune,
if (h->cgfd_limit < 0)
prune = false;
if (h->container_full_path != h->container_limit_path)
free_disarm(h->container_limit_path);
free_disarm(h->container_full_path);
close_prot_errno_disarm(h->cgfd_con);
close_prot_errno_disarm(h->cgfd_limit);
free_equal(h->container_full_path, h->container_limit_path);
close_equal(h->cgfd_con, h->cgfd_limit);
} else {
/* Check whether we actually created the cgroup to prune. */
if (h->cgfd_mon < 0)
prune = false;
free_disarm(h->monitor_full_path);
close_prot_errno_disarm(h->cgfd_mon);
}
......@@ -1386,7 +1376,7 @@ __cgfsng_ops static bool cgfsng_monitor_create(struct cgroup_ops *ops, struct lx
monitor_cgroup, NULL, false))
continue;
DEBUG("Failed to create cgroup \"%s\"", maybe_empty(ops->hierarchies[i]->monitor_full_path));
DEBUG("Failed to create cgroup %s)", monitor_cgroup);
for (int j = 0; j <= i; j++)
cgroup_tree_prune_leaf(ops->hierarchies[j],
monitor_cgroup, false);
......@@ -1548,18 +1538,18 @@ __cgfsng_ops static bool cgfsng_monitor_enter(struct cgroup_ops *ops,
ret = lxc_writeat(h->cgfd_mon, "cgroup.procs", monitor, monitor_len);
if (ret)
return log_error_errno(false, errno, "Failed to enter cgroup \"%s\"", h->monitor_full_path);
return log_error_errno(false, errno, "Failed to enter cgroup %d", h->cgfd_mon);
TRACE("Moved monitor into %s cgroup via %d", h->monitor_full_path, h->cgfd_mon);
TRACE("Moved monitor into cgroup %d", h->cgfd_mon);
if (handler->transient_pid <= 0)
continue;
ret = lxc_writeat(h->cgfd_mon, "cgroup.procs", transient, transient_len);
if (ret)
return log_error_errno(false, errno, "Failed to enter cgroup \"%s\"", h->monitor_full_path);
return log_error_errno(false, errno, "Failed to enter cgroup %d", h->cgfd_mon);
TRACE("Moved transient process into %s cgroup via %d", h->monitor_full_path, h->cgfd_mon);
TRACE("Moved transient process into cgroup %d", h->cgfd_mon);
/*
* we don't keep the fds for non-unified hierarchies around
......@@ -1770,9 +1760,9 @@ __cgfsng_ops static void cgfsng_payload_finalize(struct cgroup_ops *ops)
}
/* cgroup-full:* is done, no need to create subdirs */
static inline bool cg_mount_needs_subdirs(int cg_flags)
static inline bool cg_mount_needs_subdirs(int cgroup_automount_type)
{
switch (cg_flags) {
switch (cgroup_automount_type) {
case LXC_AUTO_CGROUP_RO:
return true;
case LXC_AUTO_CGROUP_RW:
......@@ -1788,7 +1778,7 @@ static inline bool cg_mount_needs_subdirs(int cg_flags)
* remount controller ro if needed and bindmount the cgroupfs onto
* control/the/cg/path.
*/
static int cg_legacy_mount_controllers(int cg_flags, struct hierarchy *h,
static int cg_legacy_mount_controllers(int cgroup_automount_type, struct hierarchy *h,
char *controllerpath, char *cgpath,
const char *container_cgroup)
{
......@@ -1796,7 +1786,8 @@ static int cg_legacy_mount_controllers(int cg_flags, struct hierarchy *h,
int ret, remount_flags;
int flags = MS_BIND;
if ((cg_flags == LXC_AUTO_CGROUP_RO) || (cg_flags == LXC_AUTO_CGROUP_MIXED)) {
if ((cgroup_automount_type == LXC_AUTO_CGROUP_RO) ||
(cgroup_automount_type == LXC_AUTO_CGROUP_MIXED)) {
ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
if (ret < 0)
return log_error_errno(-1, errno, "Failed to bind mount \"%s\" onto \"%s\"",
......@@ -1816,7 +1807,7 @@ static int cg_legacy_mount_controllers(int cg_flags, struct hierarchy *h,
sourcepath = must_make_path(h->mountpoint, h->container_base_path,
container_cgroup, NULL);
if (cg_flags == LXC_AUTO_CGROUP_RO)
if (cgroup_automount_type == LXC_AUTO_CGROUP_RO)
flags |= MS_RDONLY;
ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
......@@ -1844,7 +1835,7 @@ static int cg_legacy_mount_controllers(int cg_flags, struct hierarchy *h,
* uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
* cgroups for the LXC_AUTO_CGROUP_FULL option.
*/
static int __cgroupfs_mount(int cg_flags, struct hierarchy *h,
static int __cgroupfs_mount(int cgroup_automount_type, struct hierarchy *h,
struct lxc_rootfs *rootfs, int dfd_mnt_cgroupfs,
const char *hierarchy_mnt)
{
......@@ -1861,15 +1852,14 @@ static int __cgroupfs_mount(int cg_flags, struct hierarchy *h,
flags |= MOUNT_ATTR_NODEV;
flags |= MOUNT_ATTR_RELATIME;
if ((cg_flags == LXC_AUTO_CGROUP_RO) ||
(cg_flags == LXC_AUTO_CGROUP_FULL_RO))
if ((cgroup_automount_type == LXC_AUTO_CGROUP_RO) ||
(cgroup_automount_type == LXC_AUTO_CGROUP_FULL_RO))
flags |= MOUNT_ATTR_RDONLY;
if (is_unified_hierarchy(h)) {
if (is_unified_hierarchy(h))
fstype = "cgroup2";
} else {
else
fstype = "cgroup";
}
if (can_use_mount_api()) {
fd_fs = fs_prepare(fstype, -EBADF, "", 0, 0);
......@@ -1918,19 +1908,20 @@ static int __cgroupfs_mount(int cg_flags, struct hierarchy *h,
return 0;
}
static inline int cgroupfs_mount(int cg_flags, struct hierarchy *h,
static inline int cgroupfs_mount(int cgroup_automount_type, struct hierarchy *h,
struct lxc_rootfs *rootfs,
int dfd_mnt_cgroupfs, const char *hierarchy_mnt)
{
return __cgroupfs_mount(cg_flags, h, rootfs, dfd_mnt_cgroupfs, hierarchy_mnt);
return __cgroupfs_mount(cgroup_automount_type, h, rootfs,
dfd_mnt_cgroupfs, hierarchy_mnt);
}
static inline int cgroupfs_bind_mount(int cg_flags, struct hierarchy *h,
static inline int cgroupfs_bind_mount(int cgroup_automount_type, struct hierarchy *h,
struct lxc_rootfs *rootfs,
int dfd_mnt_cgroupfs,
const char *hierarchy_mnt)
{
switch (cg_flags) {
switch (cgroup_automount_type) {
case LXC_AUTO_CGROUP_FULL_RO:
break;
case LXC_AUTO_CGROUP_FULL_RW:
......@@ -1941,7 +1932,8 @@ static inline int cgroupfs_bind_mount(int cg_flags, struct hierarchy *h,
return 0;
}
return __cgroupfs_mount(cg_flags, h, rootfs, dfd_mnt_cgroupfs, hierarchy_mnt);
return __cgroupfs_mount(cgroup_automount_type, h, rootfs,
dfd_mnt_cgroupfs, hierarchy_mnt);
}
__cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
......@@ -1949,6 +1941,7 @@ __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
{
__do_close int dfd_mnt_tmpfs = -EBADF, fd_fs = -EBADF;
__do_free char *cgroup_root = NULL;
int cgroup_automount_type;
bool in_cgroup_ns = false, wants_force_mount = false;
struct lxc_conf *conf = handler->conf;
struct lxc_rootfs *rootfs = &conf->rootfs;
......@@ -1994,6 +1987,7 @@ __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
default:
return log_error_errno(false, EINVAL, "Invalid cgroup mount options specified");
}
cgroup_automount_type = cg_flags;
if (!wants_force_mount) {
wants_force_mount = !lxc_wants_cap(CAP_SYS_ADMIN, conf);
......@@ -2012,18 +2006,8 @@ __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
wants_force_mount = true;
}
if (cgns_supported() && container_uses_namespace(handler, CLONE_NEWCGROUP)) {
if (cgns_supported() && container_uses_namespace(handler, CLONE_NEWCGROUP))
in_cgroup_ns = true;
/*
* When cgroup namespaces are supported and used by the
* container the LXC_AUTO_CGROUP_MIXED and
* LXC_AUTO_CGROUP_FULL_MIXED auto mount options don't apply
* since the parent directory of the container's cgroup is not
* accessible to the container.
*/
cg_flags &= ~LXC_AUTO_CGROUP_MIXED;
cg_flags &= ~LXC_AUTO_CGROUP_FULL_MIXED;
}
if (in_cgroup_ns && !wants_force_mount)
return log_trace(true, "Mounting cgroups not requested or needed");
......@@ -2067,7 +2051,7 @@ __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
* 11. cgroup-full:ro:force -> Not supported.
* 12. cgroup-full:mixed:force -> Not supported.
*/
ret = cgroupfs_mount(cg_flags, ops->unified, rootfs, dfd_mnt_unified, "");
ret = cgroupfs_mount(cgroup_automount_type, ops->unified, rootfs, dfd_mnt_unified, "");
if (ret < 0)
return syserrno(false, "Failed to force mount cgroup filesystem in cgroup namespace");
......@@ -2160,7 +2144,7 @@ __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
* will not have CAP_SYS_ADMIN after it has started we
* need to mount the cgroups manually.
*/
ret = cgroupfs_mount(cg_flags, h, rootfs, dfd_mnt_tmpfs, controller);
ret = cgroupfs_mount(cgroup_automount_type, h, rootfs, dfd_mnt_tmpfs, controller);
if (ret < 0)
return false;
......@@ -2168,11 +2152,11 @@ __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
}
/* Here is where the ancient kernel section begins. */
ret = cgroupfs_bind_mount(cg_flags, h, rootfs, dfd_mnt_tmpfs, controller);
ret = cgroupfs_bind_mount(cgroup_automount_type, h, rootfs, dfd_mnt_tmpfs, controller);
if (ret < 0)
return false;
if (!cg_mount_needs_subdirs(cg_flags))
if (!cg_mount_needs_subdirs(cgroup_automount_type))
continue;
if (!cgroup_root)
......@@ -2184,7 +2168,7 @@ __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
if (ret < 0 && (errno != EEXIST))
return false;
ret = cg_legacy_mount_controllers(cg_flags, h, controllerpath, path2, ops->container_cgroup);
ret = cg_legacy_mount_controllers(cgroup_automount_type, h, controllerpath, path2, ops->container_cgroup);
if (ret < 0)
return false;
}
......@@ -2780,18 +2764,21 @@ static int device_cgroup_rule_parse(struct device_item *device, const char *key,
char temp[50];
if (strequal("devices.allow", key))
device->allow = 1;
device->allow = 1; /* allow the device */
else
device->allow = 0;
device->allow = 0; /* deny the device */
if (strequal(val, "a")) {
/* global rule */
device->type = 'a';
device->major = -1;
device->minor = -1;
device->global_rule = device->allow
? LXC_BPF_DEVICE_CGROUP_DENYLIST
: LXC_BPF_DEVICE_CGROUP_ALLOWLIST;
if (device->allow) /* allow all devices */
device->global_rule = LXC_BPF_DEVICE_CGROUP_DENYLIST;
else /* deny all devices */
device->global_rule = LXC_BPF_DEVICE_CGROUP_ALLOWLIST;
device->allow = -1;
return 0;
}
......@@ -3109,7 +3096,6 @@ static int bpf_device_cgroup_prepare(struct cgroup_ops *ops,
struct lxc_conf *conf, const char *key,
const char *val)
{
#ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
struct device_item device_item = {};
int ret;
......@@ -3120,10 +3106,9 @@ static int bpf_device_cgroup_prepare(struct cgroup_ops *ops,
if (ret < 0)
return log_error_errno(-1, EINVAL, "Failed to parse device string %s=%s", key, val);
ret = bpf_list_add_device(conf, &device_item);
ret = bpf_list_add_device(&conf->devices, &device_item);
if (ret < 0)
return -1;
#endif
return 0;
}
......@@ -3177,13 +3162,8 @@ __cgfsng_ops static bool cgfsng_setup_limits(struct cgroup_ops *ops,
__cgfsng_ops static bool cgfsng_devices_activate(struct cgroup_ops *ops, struct lxc_handler *handler)
{
#ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
__do_bpf_program_free struct bpf_program *prog = NULL;
int ret;
struct lxc_conf *conf;
struct hierarchy *unified;
struct lxc_list *it;
struct bpf_program *prog_old;
if (!ops)
return ret_set_errno(false, ENOENT);
......@@ -3203,51 +3183,7 @@ __cgfsng_ops static bool cgfsng_devices_activate(struct cgroup_ops *ops, struct
!unified->container_full_path || lxc_list_empty(&conf->devices))
return true;
prog = bpf_program_new(BPF_PROG_TYPE_CGROUP_DEVICE);
if (!prog)
return log_error_errno(false, ENOMEM, "Failed to create new bpf program");
ret = bpf_program_init(prog);
if (ret)
return log_error_errno(false, ENOMEM, "Failed to initialize bpf program");
lxc_list_for_each(it, &conf->devices) {
struct device_item *cur = it->elem;
ret = bpf_program_append_device(prog, cur);
if (ret)
return log_error_errno(false, ENOMEM, "Failed to add new rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
cur->type,
cur->major,
cur->minor,
cur->access,
cur->allow,
cur->global_rule);
TRACE("Added rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
cur->type,
cur->major,
cur->minor,
cur->access,
cur->allow,
cur->global_rule);
}
ret = bpf_program_finalize(prog);
if (ret)
return log_error_errno(false, ENOMEM, "Failed to finalize bpf program");
ret = bpf_program_cgroup_attach(prog, BPF_CGROUP_DEVICE,
unified->container_limit_path,
BPF_F_ALLOW_MULTI);
if (ret)
return log_error_errno(false, ENOMEM, "Failed to attach bpf program");
/* Replace old bpf program. */
prog_old = move_ptr(ops->cgroup2_devices);
ops->cgroup2_devices = move_ptr(prog);
prog = move_ptr(prog_old);
#endif
return true;
return bpf_cgroup_devices_attach(ops, &conf->devices);
}
static bool __cgfsng_delegate_controllers(struct cgroup_ops *ops, const char *cgroup)
......
......@@ -14,6 +14,7 @@
#include "conf.h"
#include "config.h"
#include "initutils.h"
#include "memory_utils.h"
#include "log.h"
#include "start.h"
#include "string_utils.h"
......@@ -68,14 +69,9 @@ void cgroup_exit(struct cgroup_ops *ops)
free(ops->cgroup_pattern);
free(ops->monitor_cgroup);
{
if (ops->container_cgroup != ops->container_limit_cgroup)
free(ops->container_limit_cgroup);
free(ops->container_cgroup);
}
free_equal(ops->container_cgroup, ops->container_limit_cgroup);
if (ops->cgroup2_devices)
bpf_program_free(ops->cgroup2_devices);
bpf_device_program_free(ops);
if (ops->dfd_mnt_cgroupfs_host >= 0)
close(ops->dfd_mnt_cgroupfs_host);
......@@ -92,32 +88,15 @@ void cgroup_exit(struct cgroup_ops *ops)
free((*it)->mountpoint);
free((*it)->container_base_path);
{
free((*it)->container_full_path);
if ((*it)->container_full_path != (*it)->container_limit_path)
free((*it)->monitor_full_path);
}
{
if ((*it)->cgfd_limit >= 0 && (*it)->cgfd_con != (*it)->cgfd_limit)
close((*it)->cgfd_limit);
free_equal((*it)->container_full_path,
(*it)->container_limit_path);
if ((*it)->cgfd_con >= 0)
close((*it)->cgfd_con);
}
close_equal((*it)->cgfd_con, (*it)->cgfd_limit);
if ((*it)->cgfd_mon >= 0)
close((*it)->cgfd_mon);
{
if ((*it)->dfd_base >= 0 && (*it)->dfd_mnt != (*it)->dfd_base)
close((*it)->dfd_base);
if ((*it)->dfd_mnt >= 0)
close((*it)->dfd_mnt);
}
close_equal((*it)->dfd_base, (*it)->dfd_mnt);
free(*it);
}
......
......@@ -62,9 +62,6 @@ typedef enum {
* - The full path to the container's limiting cgroup. May simply point to
* container_full_path.
*
* @monitor_full_path
* - The full path to the monitor's cgroup.
*
* @version
* - legacy hierarchy
* If the hierarchy is a legacy hierarchy this will be set to
......@@ -84,7 +81,6 @@ struct hierarchy {
char *container_base_path;
char *container_full_path;
char *container_limit_path;
char *monitor_full_path;
int version;
/* cgroup2 only */
......@@ -101,7 +97,7 @@ struct hierarchy {
*/
int cgfd_limit;
/* File descriptor for the monitor's cgroup @monitor_full_path. */
/* File descriptor for the monitor's cgroup. */
int cgfd_mon;
/* File descriptor for the controller's mountpoint @mountpoint. */
......
......@@ -17,14 +17,11 @@
#include "cgroup2_devices.h"
#include "config.h"
#include "file_utils.h"
#include "log.h"
#include "macro.h"
#include "memory_utils.h"
#ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
#include <linux/bpf.h>
#include <linux/filter.h>
lxc_log_define(cgroup2_devices, cgroup);
#define BPF_LOG_BUF_SIZE (1 << 23) /* 8MB */
......@@ -63,20 +60,6 @@ static int bpf_program_add_instructions(struct bpf_program *prog,
return 0;
}
void bpf_program_free(struct bpf_program *prog)
{
if (!prog)
return;
(void)bpf_program_cgroup_detach(prog);
if (prog->kernel_fd >= 0)
close(prog->kernel_fd);
free(prog->instructions);
free(prog->attached_path);
free(prog);
}
/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
((struct bpf_insn){.code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
......@@ -185,6 +168,7 @@ struct bpf_program *bpf_program_new(uint32_t prog_type)
prog->prog_type = prog_type;
prog->kernel_fd = -EBADF;
prog->fd_cgroup = -EBADF;
/*
* By default a allowlist is used unless the user tells us otherwise.
*/
......@@ -360,118 +344,108 @@ static int bpf_program_load_kernel(struct bpf_program *prog)
return 0;
}
int bpf_program_cgroup_attach(struct bpf_program *prog, int type,
const char *path, uint32_t flags)
static int bpf_program_cgroup_attach(struct bpf_program *prog, int type,
int fd_cgroup, __u32 flags)
{
__do_close int fd = -EBADF;
__do_free char *copy = NULL;
union bpf_attr *attr;
__do_close int fd_attach = -EBADF;
int ret;
union bpf_attr *attr;
if (!path || !prog)
return ret_set_errno(-1, EINVAL);
if (prog->fd_cgroup >= 0 || prog->kernel_fd >= 0)
return ret_errno(EBUSY);
if (flags & ~(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI))
return log_error_errno(-1, EINVAL, "Invalid flags for bpf program");
if (fd_cgroup < 0)
return ret_errno(EBADF);
if (prog->attached_path) {
if (prog->attached_type != type)
return log_error_errno(-1, EBUSY, "Wrong type for bpf program");
if (flags & ~(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE))
return syserrno_set(-EINVAL, "Invalid flags for bpf program");
if (prog->attached_flags != flags)
return log_error_errno(-1, EBUSY, "Wrong flags for bpf program");
/*
* Don't allow the bpf program to be overwritten for now. If we ever
* allow this we need to verify that the attach_flags of the current
* bpf program and the attach_flags of the new program match.
*/
if (flags & BPF_F_ALLOW_OVERRIDE)
INFO("Allowing to override bpf program");
if (flags != BPF_F_ALLOW_OVERRIDE)
return true;
}
/* Leave the caller's fd alone. */
fd_attach = dup_cloexec(fd_cgroup);
if (fd_attach < 0)
return -errno;
ret = bpf_program_load_kernel(prog);
if (ret < 0)
return log_error_errno(-1, ret, "Failed to load bpf program");
copy = strdup(path);
if (!copy)
return log_error_errno(-1, ENOMEM, "Failed to duplicate cgroup path %s", path);
fd = open(path, O_DIRECTORY | O_RDONLY | O_CLOEXEC);
if (fd < 0)
return log_error_errno(-1, errno, "Failed to open cgroup path %s", path);
return syserrno(-errno, "Failed to load bpf program");
attr = &(union bpf_attr){
.attach_type = type,
.target_fd = fd,
.target_fd = fd_attach,
.attach_bpf_fd = prog->kernel_fd,
.attach_flags = flags,
};
ret = bpf(BPF_PROG_ATTACH, attr, sizeof(*attr));
if (ret < 0)
return log_error_errno(-1, errno, "Failed to attach bpf program");
return syserrno(-errno, "Failed to attach bpf program");
free_move_ptr(prog->attached_path, copy);
prog->attached_type = type;
prog->attached_flags = flags;
prog->fd_cgroup = move_fd(fd_attach);
prog->attached_type = type;
prog->attached_flags = flags;
TRACE("Loaded and attached bpf program to cgroup %s", prog->attached_path);
TRACE("Attached bpf program to cgroup %d", prog->fd_cgroup);
return 0;
}
int bpf_program_cgroup_detach(struct bpf_program *prog)
{
__do_close int fd = -EBADF;
__do_close int fd_cgroup = -EBADF, fd_kernel = -EBADF;
int ret;
union bpf_attr *attr;
if (!prog)
return 0;
if (!prog->attached_path)
return 0;
/* Ensure that these fds are wiped. */
fd_cgroup = move_fd(prog->fd_cgroup);
fd_kernel = move_fd(prog->kernel_fd);
fd = open(prog->attached_path, O_DIRECTORY | O_RDONLY | O_CLOEXEC);
if (fd < 0) {
if (errno != ENOENT)
return log_error_errno(-1, errno, "Failed to open attach cgroup %s",
prog->attached_path);
} else {
union bpf_attr *attr;
if (fd_cgroup < 0 || fd_kernel < 0)
return 0;
attr = &(union bpf_attr){
.attach_type = prog->attached_type,
.target_fd = fd,
.attach_bpf_fd = prog->kernel_fd,
};
attr = &(union bpf_attr){
.attach_type = prog->attached_type,
.target_fd = fd_cgroup,
.attach_bpf_fd = fd_kernel,
};
ret = bpf(BPF_PROG_DETACH, attr, sizeof(*attr));
if (ret < 0)
return log_error_errno(-1, errno, "Failed to detach bpf program from cgroup %s",
prog->attached_path);
}
ret = bpf(BPF_PROG_DETACH, attr, sizeof(*attr));
if (ret < 0)
return syserrno(-errno, "Failed to detach bpf program from cgroup %d", fd_cgroup);
TRACE("Detached bpf program from cgroup %s", prog->attached_path);
free_disarm(prog->attached_path);
TRACE("Detached bpf program from cgroup %d", fd_cgroup);
return 0;
return 0;
}
void bpf_device_program_free(struct cgroup_ops *ops)
{
if (ops->cgroup2_devices) {
(void)bpf_program_cgroup_detach(ops->cgroup2_devices);
(void)bpf_program_free(ops->cgroup2_devices);
bpf_program_free(ops->cgroup2_devices);
ops->cgroup2_devices = NULL;
}
}
int bpf_list_add_device(struct lxc_conf *conf, struct device_item *device)
int bpf_list_add_device(struct lxc_list *devices, struct device_item *device)
{
__do_free struct lxc_list *list_elem = NULL;
__do_free struct device_item *new_device = NULL;
struct lxc_list *it;
if (!conf || !device)
if (!devices || !device)
return ret_errno(EINVAL);
lxc_list_for_each(it, &conf->devices) {
lxc_list_for_each(it, devices) {
struct device_item *cur = it->elem;
if (cur->global_rule > LXC_BPF_DEVICE_CGROUP_LOCAL_RULE &&
......@@ -522,7 +496,7 @@ int bpf_list_add_device(struct lxc_conf *conf, struct device_item *device)
return log_error_errno(-1, ENOMEM, "Failed to allocate new device item");
lxc_list_add_elem(list_elem, move_ptr(new_device));
lxc_list_add_tail(&conf->devices, move_ptr(list_elem));
lxc_list_add_tail(devices, move_ptr(list_elem));
return 0;
}
......@@ -558,4 +532,150 @@ bool bpf_devices_cgroup_supported(void)
return log_trace(true, "The bpf device cgroup is supported");
}
#endif
static struct bpf_program *__bpf_cgroup_devices(struct lxc_list *devices)
{
__do_bpf_program_free struct bpf_program *prog = NULL;
int ret;
struct lxc_list *it;
prog = bpf_program_new(BPF_PROG_TYPE_CGROUP_DEVICE);
if (!prog)
return syserrno(NULL, "Failed to create new bpf program");
ret = bpf_program_init(prog);
if (ret)
return syserrno(NULL, "Failed to initialize bpf program");
bpf_device_set_type(prog, devices);
TRACE("Device bpf %s all devices by default",
bpf_device_block_all(prog) ? "blocks" : "allows");
lxc_list_for_each(it, devices) {
struct device_item *cur = it->elem;
if (!bpf_device_add(prog, cur)) {
TRACE("Skipping rule: type %c, major %d, minor %d, access %s, allow %d",
cur->type, cur->major, cur->minor, cur->access, cur->allow);
continue;
}
ret = bpf_program_append_device(prog, cur);
if (ret)
return syserrno(NULL, "Failed adding rule: type %c, major %d, minor %d, access %s, allow %d",
cur->type, cur->major, cur->minor, cur->access, cur->allow);
TRACE("Added rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d",
cur->type, cur->major, cur->minor, cur->access, cur->allow);
}
ret = bpf_program_finalize(prog);
if (ret)
return syserrno(NULL, "Failed to finalize bpf program");
return move_ptr(prog);
}
bool bpf_cgroup_devices_attach(struct cgroup_ops *ops, struct lxc_list *devices)
{
__do_bpf_program_free struct bpf_program *prog = NULL;
int ret;
prog = __bpf_cgroup_devices(devices);
if (!prog)
return syserrno(false, "Failed to create bpf program");
ret = bpf_program_cgroup_attach(prog, BPF_CGROUP_DEVICE,
ops->unified->cgfd_limit,
BPF_F_ALLOW_MULTI);
if (ret)
return syserrno(false, "Failed to attach bpf program");
/* Replace old bpf program. */
swap(prog, ops->cgroup2_devices);
return log_trace(true, "Attached bpf program");
}
bool bpf_cgroup_devices_update(struct cgroup_ops *ops,
struct device_item *new,
struct lxc_list *devices)
{
__do_bpf_program_free struct bpf_program *prog = NULL;
static int can_use_bpf_replace = -1;
struct bpf_program *prog_old;
union bpf_attr *attr;
int ret;
if (!ops)
return ret_set_errno(false, EINVAL);
if (!pure_unified_layout(ops))
return ret_set_errno(false, EINVAL);
if (ops->unified->cgfd_limit < 0)
return ret_set_errno(false, EBADF);
ret = bpf_list_add_device(devices, new);
if (ret < 0)
return false;
/* No previous device program attached. */
prog_old = ops->cgroup2_devices;
if (!prog_old)
return bpf_cgroup_devices_attach(ops, devices);
prog = __bpf_cgroup_devices(devices);
if (!prog)
return syserrno(false, "Failed to create bpf program");
ret = bpf_program_load_kernel(prog);
if (ret < 0)
return syserrno(false, "Failed to load bpf program");
attr = &(union bpf_attr){
.attach_type = prog_old->attached_type,
.target_fd = prog_old->fd_cgroup,
.attach_bpf_fd = prog->kernel_fd,
};
switch (can_use_bpf_replace) {
case 1:
attr->replace_bpf_fd = prog_old->kernel_fd;
attr->attach_flags = BPF_F_REPLACE | BPF_F_ALLOW_MULTI;
ret = bpf(BPF_PROG_ATTACH, attr, sizeof(*attr));
break;
case -1:
attr->replace_bpf_fd = prog_old->kernel_fd;
attr->attach_flags = BPF_F_REPLACE | BPF_F_ALLOW_MULTI;
can_use_bpf_replace = !bpf(BPF_PROG_ATTACH, attr, sizeof(*attr));
if (can_use_bpf_replace > 0)
break;
__fallthrough;
case 0:
attr->attach_flags = BPF_F_ALLOW_MULTI;
attr->replace_bpf_fd = 0;
ret = bpf(BPF_PROG_ATTACH, attr, sizeof(*attr));
break;
}
if (ret < 0)
return syserrno(false, "Failed to update bpf program");
if (can_use_bpf_replace > 0) {
/* The old program was automatically detached by the kernel. */
close_prot_errno_disarm(prog_old->kernel_fd);
/* The new bpf program now owns the cgroup fd. */
prog->fd_cgroup = move_fd(prog_old->fd_cgroup);
TRACE("Replaced existing bpf program");
} else {
TRACE("Appended bpf program");
}
prog->attached_type = prog_old->attached_type;
prog->attached_flags = attr->attach_flags;
swap(prog, ops->cgroup2_devices);
return true;
}
......@@ -19,117 +19,94 @@
#include "compiler.h"
#include "conf.h"
#include "config.h"
#include "list.h"
#include "macro.h"
#include "memory_utils.h"
#include "syscall_numbers.h"
#ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
#include <linux/bpf.h>
#include <linux/filter.h>
#endif
#include "include/bpf.h"
#include "include/bpf_common.h"
#ifndef HAVE_BPF
union bpf_attr;
static inline int missing_bpf(int cmd, union bpf_attr *attr, size_t size)
static inline int bpf_lxc(int cmd, union bpf_attr *attr, size_t size)
{
return syscall(__NR_bpf, cmd, attr, size);
}
#define bpf missing_bpf
#define bpf bpf_lxc
#endif /* HAVE_BPF */
struct bpf_program {
int device_list_type;
int kernel_fd;
uint32_t prog_type;
__u32 prog_type;
size_t n_instructions;
#ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
struct bpf_insn *instructions;
#endif /* HAVE_STRUCT_BPF_CGROUP_DEV_CTX */
char *attached_path;
int fd_cgroup;
int attached_type;
uint32_t attached_flags;
__u32 attached_flags;
};
#ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
__hidden extern struct bpf_program *bpf_program_new(uint32_t prog_type);
__hidden extern int bpf_program_init(struct bpf_program *prog);
__hidden extern int bpf_program_append_device(struct bpf_program *prog, struct device_item *device);
__hidden extern int bpf_program_finalize(struct bpf_program *prog);
__hidden extern int bpf_program_cgroup_attach(struct bpf_program *prog, int type, const char *path,
uint32_t flags);
__hidden extern int bpf_program_cgroup_detach(struct bpf_program *prog);
__hidden extern void bpf_program_free(struct bpf_program *prog);
__hidden extern void bpf_device_program_free(struct cgroup_ops *ops);
__hidden extern bool bpf_devices_cgroup_supported(void);
__hidden extern int bpf_list_add_device(struct lxc_conf *conf, struct device_item *device);
#else /* !HAVE_STRUCT_BPF_CGROUP_DEV_CTX */
static inline struct bpf_program *bpf_program_new(uint32_t prog_type)
static inline bool bpf_device_block_all(const struct bpf_program *prog)
{
errno = ENOSYS;
return NULL;
/* LXC_BPF_DEVICE_CGROUP_ALLOWLIST -> allowlist (deny all) */
return prog->device_list_type == LXC_BPF_DEVICE_CGROUP_ALLOWLIST;
}
static inline int bpf_program_init(struct bpf_program *prog)
static inline bool bpf_device_add(const struct bpf_program *prog,
struct device_item *device)
{
errno = ENOSYS;
return -1;
}
if (device->global_rule > LXC_BPF_DEVICE_CGROUP_LOCAL_RULE)
return false;
static inline int bpf_program_append_device(struct bpf_program *prog, char type,
int major, int minor,
const char *access, int allow)
{
errno = ENOSYS;
return -1;
}
/* We're blocking all devices so skip individual deny rules. */
if (bpf_device_block_all(prog) && !device->allow)
return false;
static inline int bpf_program_finalize(struct bpf_program *prog)
{
errno = ENOSYS;
return -1;
/* We're allowing all devices so skip individual allow rules. */
if (!bpf_device_block_all(prog) && device->allow)
return false;
return true;
}
static inline int bpf_program_cgroup_attach(struct bpf_program *prog, int type,
const char *path, uint32_t flags)
static inline void bpf_device_set_type(struct bpf_program *prog,
struct lxc_list *devices)
{
errno = ENOSYS;
return -1;
}
struct lxc_list *it;
static inline int bpf_program_cgroup_detach(struct bpf_program *prog)
{
errno = ENOSYS;
return -1;
}
lxc_list_for_each (it, devices) {
struct device_item *cur = it->elem;
static inline void bpf_program_free(struct bpf_program *prog)
{
if (cur->global_rule > LXC_BPF_DEVICE_CGROUP_LOCAL_RULE)
prog->device_list_type = cur->global_rule;
}
}
static inline void bpf_device_program_free(struct cgroup_ops *ops)
{
}
__hidden extern struct bpf_program *bpf_program_new(__u32 prog_type);
__hidden extern int bpf_program_init(struct bpf_program *prog);
__hidden extern int bpf_program_append_device(struct bpf_program *prog, struct device_item *device);
__hidden extern int bpf_program_finalize(struct bpf_program *prog);
__hidden extern int bpf_program_cgroup_detach(struct bpf_program *prog);
__hidden extern void bpf_device_program_free(struct cgroup_ops *ops);
__hidden extern bool bpf_devices_cgroup_supported(void);
static inline bool bpf_devices_cgroup_supported(void)
{
return false;
}
__hidden extern int bpf_list_add_device(struct lxc_list *devices,
struct device_item *device);
__hidden extern bool bpf_cgroup_devices_attach(struct cgroup_ops *ops,
struct lxc_list *devices);
__hidden extern bool bpf_cgroup_devices_update(struct cgroup_ops *ops,
struct device_item *new,
struct lxc_list *devices);
static inline int bpf_list_add_device(struct lxc_conf *conf,
struct device_item *device)
static inline void bpf_program_free(struct bpf_program *prog)
{
errno = ENOSYS;
return -1;
if (prog) {
(void)bpf_program_cgroup_detach(prog);
free(prog->instructions);
free(prog);
}
}
#endif /* !HAVE_STRUCT_BPF_CGROUP_DEV_CTX */
define_cleanup_function(struct bpf_program *, bpf_program_free);
#define __do_bpf_program_free call_cleaner(bpf_program_free)
......
......@@ -1168,7 +1168,6 @@ static int lxc_cmd_add_state_client_callback(__owns int fd, struct lxc_cmd_req *
int lxc_cmd_add_bpf_device_cgroup(const char *name, const char *lxcpath,
struct device_item *device)
{
#ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
int stopped = 0;
struct lxc_cmd_rr cmd = {
.req = {
......@@ -1188,25 +1187,16 @@ int lxc_cmd_add_bpf_device_cgroup(const char *name, const char *lxcpath,
return log_error_errno(-1, errno, "Failed to add new bpf device cgroup rule");
return 0;
#else
return ret_set_errno(-1, ENOSYS);
#endif
}
static int lxc_cmd_add_bpf_device_cgroup_callback(int fd, struct lxc_cmd_req *req,
struct lxc_handler *handler,
struct lxc_epoll_descr *descr)
{
#ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
__do_bpf_program_free struct bpf_program *devices = NULL;
struct lxc_cmd_rsp rsp = {0};
struct lxc_conf *conf = handler->conf;
struct cgroup_ops *cgroup_ops = handler->cgroup_ops;
struct hierarchy *unified = cgroup_ops->unified;
int ret;
struct lxc_list *it;
struct lxc_cmd_rsp rsp = {};
struct device_item *device;
struct bpf_program *devices_old;
struct lxc_conf *conf;
if (req->datalen <= 0)
return LXC_CMD_REAP_CLIENT_FD;
......@@ -1216,58 +1206,19 @@ static int lxc_cmd_add_bpf_device_cgroup_callback(int fd, struct lxc_cmd_req *re
if (!req->data)
return LXC_CMD_REAP_CLIENT_FD;
device = (struct device_item *)req->data;
rsp.ret = -1;
if (!unified)
goto respond;
ret = bpf_list_add_device(conf, device);
if (ret < 0)
goto respond;
devices = bpf_program_new(BPF_PROG_TYPE_CGROUP_DEVICE);
if (!devices)
goto respond;
ret = bpf_program_init(devices);
if (ret)
goto respond;
lxc_list_for_each(it, &conf->devices) {
struct device_item *cur = it->elem;
ret = bpf_program_append_device(devices, cur);
if (ret)
goto respond;
}
ret = bpf_program_finalize(devices);
if (ret)
goto respond;
ret = bpf_program_cgroup_attach(devices, BPF_CGROUP_DEVICE,
unified->container_full_path,
BPF_F_ALLOW_MULTI);
if (ret)
goto respond;
/* Replace old bpf program. */
devices_old = move_ptr(cgroup_ops->cgroup2_devices);
cgroup_ops->cgroup2_devices = move_ptr(devices);
devices = move_ptr(devices_old);
rsp.ret = 0;
device = (struct device_item *)req->data;
conf = handler->conf;
if (!bpf_cgroup_devices_update(handler->cgroup_ops, device, &conf->devices))
rsp.ret = -1;
else
rsp.ret = 0;
respond:
ret = lxc_cmd_rsp_send(fd, &rsp);
if (ret < 0)
return LXC_CMD_REAP_CLIENT_FD;
return 0;
#else
return ret_set_errno(-1, ENOSYS);
#endif
}
int lxc_cmd_console_log(const char *name, const char *lxcpath,
......
......@@ -19,7 +19,9 @@
#endif
#endif
#ifndef __fallthrough
#if __GNUC__ >= 7
#define __fallthrough __attribute__((__fallthrough__))
#else
#define __fallthrough /* fall through */
#endif
......
......@@ -269,11 +269,11 @@ struct lxc_state_client {
lxc_state_t states[MAX_STATE];
};
enum {
LXC_BPF_DEVICE_CGROUP_LOCAL_RULE = -1,
LXC_BPF_DEVICE_CGROUP_ALLOWLIST = 0,
LXC_BPF_DEVICE_CGROUP_DENYLIST = 1,
};
typedef enum lxc_bpf_devices_rule_t {
LXC_BPF_DEVICE_CGROUP_LOCAL_RULE = -1,
LXC_BPF_DEVICE_CGROUP_ALLOWLIST = 0,
LXC_BPF_DEVICE_CGROUP_DENYLIST = 1,
} lxc_bpf_devices_rule_t;
struct device_item {
char type;
......
......@@ -2040,8 +2040,8 @@ static bool do_lxcapi_reboot2(struct lxc_container *c, int timeout)
else
killret = kill(pid, rebootsignal);
if (killret < 0)
return log_warn(false, "Failed to send signal %d to pid %d", rebootsignal, pid);
TRACE("Sent signal %d to pid %d", rebootsignal, pid);
return log_warn(false, "Failed to send signal %d to pidfd(%d)/pid(%d)", rebootsignal, pidfd, pid);
TRACE("Sent signal %d to pidfd(%d)/pid(%d)", rebootsignal, pidfd, pid);
if (timeout == 0)
return true;
......
......@@ -694,4 +694,11 @@ enum {
#define MAX_FILENO ~0U
#define swap(a, b) \
do { \
typeof(a) __tmp = (a); \
(a) = (b); \
(b) = __tmp; \
} while (0)
#endif /* __LXC_MACRO_H */
......@@ -95,4 +95,21 @@ static inline void *memdup(const void *data, size_t len)
(a) = move_fd((b)); \
})
#define close_equal(a, b) \
({ \
if (a >= 0 && a != b) \
close(a); \
if (close >= 0) \
close(b); \
a = b = -EBADF; \
})
#define free_equal(a, b) \
({ \
if (a != b) \
free(a); \
free(b); \
a = b = NULL; \
})
#endif /* __LXC_MEMORY_UTILS_H */
......@@ -1858,19 +1858,34 @@ bool multiply_overflow(int64_t base, uint64_t mult, int64_t *res)
int print_r(int fd, const char *path)
{
__do_close int dfd = -EBADF;
__do_close int dfd = -EBADF, dfd_dup = -EBADF;
__do_closedir DIR *dir = NULL;
int ret = 0;
struct dirent *direntp;
struct stat st;
if (is_empty_string(path))
dfd = dup(fd);
else
dfd = openat(fd, path, O_CLOEXEC | O_DIRECTORY);
if (is_empty_string(path)) {
char buf[LXC_PROC_SELF_FD_LEN];
ret = strnprintf(buf, sizeof(buf), "/proc/self/fd/%d", fd);
if (ret < 0)
return ret_errno(EIO);
/*
* O_PATH file descriptors can't be used so we need to re-open
* just in case.
*/
dfd = openat(-EBADF, buf, O_CLOEXEC | O_DIRECTORY, 0);
} else {
dfd = openat(fd, path, O_CLOEXEC | O_DIRECTORY, 0);
}
if (dfd < 0)
return -1;
dfd_dup = dup_cloexec(dfd);
if (dfd_dup < 0)
return -1;
dir = fdopendir(dfd);
if (!dir)
return -1;
......@@ -1882,26 +1897,29 @@ int print_r(int fd, const char *path)
!strcmp(direntp->d_name, ".."))
continue;
ret = fstatat(dfd, direntp->d_name, &st, AT_SYMLINK_NOFOLLOW);
ret = fstatat(dfd_dup, direntp->d_name, &st, AT_SYMLINK_NOFOLLOW);
if (ret < 0 && errno != ENOENT)
break;
ret = 0;
if (S_ISDIR(st.st_mode))
ret = print_r(dfd, direntp->d_name);
ret = print_r(dfd_dup, direntp->d_name);
else
INFO("mode(%o):uid(%d):gid(%d) -> %s/%s\n",
(st.st_mode & ~S_IFMT), st.st_uid, st.st_gid, path,
INFO("mode(%o):uid(%d):gid(%d) -> %d/%s\n",
(st.st_mode & ~S_IFMT), st.st_uid, st.st_gid, dfd_dup,
direntp->d_name);
if (ret < 0 && errno != ENOENT)
break;
}
ret = fstatat(fd, path, &st, AT_SYMLINK_NOFOLLOW);
if (is_empty_string(path))
ret = fstatat(fd, "", &st, AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH);
else
ret = fstatat(fd, path, &st, AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW);
if (ret)
return -1;
else
INFO("mode(%o):uid(%d):gid(%d) -> %s",
(st.st_mode & ~S_IFMT), st.st_uid, st.st_gid, path);
(st.st_mode & ~S_IFMT), st.st_uid, st.st_gid, maybe_empty(path));
return ret;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment