Commit 3bc449ed by Serge Hallyn

lxccontainer: update locking comment

Update the LOCKING comment. Take mem_lock in want_daemonize. convert lxcapi_destroy to not use privlock/slock by hand. Fix a coverity-found potential dereference of NULL c->lxc_conf. api_cgroup_get_item() and api_cgroup_set_item(): use disklock, not memlock, since the values are set through the cgroup fs on the running container. Signed-off-by: 's avatarSerge Hallyn <serge.hallyn@ubuntu.com> Acked-by: 's avatarStéphane Graber <stgraber@ubuntu.com>
parent 73e608b2
...@@ -145,11 +145,11 @@ void remove_partial(struct lxc_container *c, int fd) ...@@ -145,11 +145,11 @@ void remove_partial(struct lxc_container *c, int fd)
} }
/* LOCKING /* LOCKING
* 1. c->privlock protects the struct lxc_container from multiple threads. * 1. container_mem_lock(c) protects the struct lxc_container from multiple threads.
* 2. c->slock protects the on-disk container data * 2. container_disk_lock(c) protects the on-disk container data - in particular the
* 3. thread_mutex protects process data (ex: fd table) from multiple threads * container configuration file.
* slock is an flock, which does not exclude threads. Therefore slock should * The container_disk_lock also takes the container_mem_lock.
* always be wrapped inside privlock. * 3. thread_mutex protects process data (ex: fd table) from multiple threads.
* NOTHING mutexes two independent programs with their own struct * NOTHING mutexes two independent programs with their own struct
* lxc_container for the same c->name, between API calls. For instance, * lxc_container for the same c->name, between API calls. For instance,
* c->config_read(); c->start(); Between those calls, data on disk * c->config_read(); c->start(); Between those calls, data on disk
...@@ -160,7 +160,7 @@ void remove_partial(struct lxc_container *c, int fd) ...@@ -160,7 +160,7 @@ void remove_partial(struct lxc_container *c, int fd)
* due to hung callers. So I prefer to keep the locks only within our own * due to hung callers. So I prefer to keep the locks only within our own
* functions, not across functions. * functions, not across functions.
* *
* If you're going to fork while holding a lxccontainer, increment * If you're going to clone while holding a lxccontainer, increment
* c->numthreads (under privlock) before forking. When deleting, * c->numthreads (under privlock) before forking. When deleting,
* decrement numthreads under privlock, then if it hits 0 you can delete. * decrement numthreads under privlock, then if it hits 0 you can delete.
* Do not ever use a lxccontainer whose numthreads you did not bump. * Do not ever use a lxccontainer whose numthreads you did not bump.
...@@ -406,7 +406,12 @@ static void lxcapi_want_daemonize(struct lxc_container *c) ...@@ -406,7 +406,12 @@ static void lxcapi_want_daemonize(struct lxc_container *c)
{ {
if (!c) if (!c)
return; return;
if (!container_mem_lock(c)) {
ERROR("Error getting mem lock");
return;
}
c->daemonize = 1; c->daemonize = 1;
container_mem_unlock(c);
} }
static bool lxcapi_wait(struct lxc_container *c, const char *state, int timeout) static bool lxcapi_wait(struct lxc_container *c, const char *state, int timeout)
...@@ -1218,12 +1223,8 @@ static bool lxcapi_destroy(struct lxc_container *c) ...@@ -1218,12 +1223,8 @@ static bool lxcapi_destroy(struct lxc_container *c)
if (!c || !lxcapi_is_defined(c)) if (!c || !lxcapi_is_defined(c))
return false; return false;
if (lxclock(c->privlock, 0)) if (container_disk_lock(c))
return false;
if (lxclock(c->slock, 0)) {
lxcunlock(c->privlock);
return false; return false;
}
if (!is_stopped(c)) { if (!is_stopped(c)) {
// we should queue some sort of error - in c->error_string? // we should queue some sort of error - in c->error_string?
...@@ -1231,7 +1232,7 @@ static bool lxcapi_destroy(struct lxc_container *c) ...@@ -1231,7 +1232,7 @@ static bool lxcapi_destroy(struct lxc_container *c)
goto out; goto out;
} }
if (c->lxc_conf->rootfs.path && c->lxc_conf->rootfs.mount) if (c->lxc_conf && c->lxc_conf->rootfs.path && c->lxc_conf->rootfs.mount)
r = bdev_init(c->lxc_conf->rootfs.path, c->lxc_conf->rootfs.mount, NULL); r = bdev_init(c->lxc_conf->rootfs.path, c->lxc_conf->rootfs.mount, NULL);
if (r) { if (r) {
if (r->ops->destroy(r) < 0) { if (r->ops->destroy(r) < 0) {
...@@ -1250,8 +1251,7 @@ static bool lxcapi_destroy(struct lxc_container *c) ...@@ -1250,8 +1251,7 @@ static bool lxcapi_destroy(struct lxc_container *c)
ret = true; ret = true;
out: out:
lxcunlock(c->privlock); container_disk_unlock(c);
lxcunlock(c->slock);
return ret; return ret;
} }
...@@ -1374,42 +1374,38 @@ err: ...@@ -1374,42 +1374,38 @@ err:
static bool lxcapi_set_cgroup_item(struct lxc_container *c, const char *subsys, const char *value) static bool lxcapi_set_cgroup_item(struct lxc_container *c, const char *subsys, const char *value)
{ {
int ret; int ret;
bool b = false;
if (!c) if (!c)
return false; return false;
if (container_mem_lock(c)) if (is_stopped(c))
return false; return false;
if (is_stopped(c)) if (container_disk_lock(c))
goto err; return false;
ret = lxc_cgroup_set(c->name, subsys, value, c->config_path); ret = lxc_cgroup_set(c->name, subsys, value, c->config_path) == 0;
if (!ret)
b = true; container_disk_unlock(c);
err: return ret == 0;
container_mem_unlock(c);
return b;
} }
static int lxcapi_get_cgroup_item(struct lxc_container *c, const char *subsys, char *retv, int inlen) static int lxcapi_get_cgroup_item(struct lxc_container *c, const char *subsys, char *retv, int inlen)
{ {
int ret = -1; int ret;
if (!c || !c->lxc_conf) if (!c || !c->lxc_conf)
return -1; return -1;
if (container_mem_lock(c)) if (is_stopped(c))
return -1; return -1;
if (is_stopped(c)) if (container_disk_lock(c))
goto out; return -1;
ret = lxc_cgroup_get(c->name, subsys, retv, inlen, c->config_path); ret = lxc_cgroup_get(c->name, subsys, retv, inlen, c->config_path);
out: container_disk_unlock(c);
container_mem_unlock(c);
return ret; return ret;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment