Unverified Commit bccc67cf by Christian Brauner Committed by Stéphane Graber

commands: rename to lxc_cmd_add_state_client()

The new wait commands API is not yet stable so this change is ok. Signed-off-by: 's avatarChristian Brauner <christian.brauner@ubuntu.com>
parent 72e9f5b1
......@@ -87,7 +87,7 @@ static const char *lxc_cmd_str(lxc_cmd_t cmd)
[LXC_CMD_GET_CONFIG_ITEM] = "get_config_item",
[LXC_CMD_GET_NAME] = "get_name",
[LXC_CMD_GET_LXCPATH] = "get_lxcpath",
[LXC_CMD_STATE_SERVER] = "state_server",
[LXC_CMD_ADD_STATE_CLIENT] = "add_state_client",
};
if (cmd >= LXC_CMD_MAX)
......@@ -232,7 +232,7 @@ static int lxc_cmd(const char *name, struct lxc_cmd_rr *cmd, int *stopped,
bool stay_connected = false;
if (cmd->req.cmd == LXC_CMD_CONSOLE ||
cmd->req.cmd == LXC_CMD_STATE_SERVER)
cmd->req.cmd == LXC_CMD_ADD_STATE_CLIENT)
stay_connected = true;
*stopped = 0;
......@@ -813,14 +813,14 @@ static int lxc_cmd_get_lxcpath_callback(int fd, struct lxc_cmd_req *req,
}
/*
* lxc_cmd_state_server: register a client fd in the handler list
* lxc_cmd_add_state_client: register a client fd in the handler list
*
* @name : name of container to connect to
* @lxcpath : the lxcpath in which the container is running
*
* Returns the lxcpath on success, NULL on failure.
*/
int lxc_cmd_state_server(const char *name, const char *lxcpath,
int lxc_cmd_add_state_client(const char *name, const char *lxcpath,
lxc_state_t states[MAX_STATE])
{
int stopped;
......@@ -829,14 +829,14 @@ int lxc_cmd_state_server(const char *name, const char *lxcpath,
struct lxc_msg msg = {0};
struct lxc_cmd_rr cmd = {
.req = {
.cmd = LXC_CMD_STATE_SERVER,
.cmd = LXC_CMD_ADD_STATE_CLIENT,
.data = states,
.datalen = (sizeof(lxc_state_t) * MAX_STATE)
},
};
/* Lock the whole lxc_cmd_state_server_callback() call to ensure that
* lxc_set_state() doesn't cause us to miss a state.
/* Lock the whole lxc_cmd_add_state_client_callback() call to ensure
* that lxc_set_state() doesn't cause us to miss a state.
*/
process_lock();
/* Check if already in requested state. */
......@@ -912,7 +912,7 @@ again:
return msg.value;
}
static int lxc_cmd_state_server_callback(int fd, struct lxc_cmd_req *req,
static int lxc_cmd_add_state_client_callback(int fd, struct lxc_cmd_req *req,
struct lxc_handler *handler)
{
struct lxc_cmd_rsp rsp = {0};
......@@ -967,7 +967,7 @@ static int lxc_cmd_process(int fd, struct lxc_cmd_req *req,
[LXC_CMD_GET_CONFIG_ITEM] = lxc_cmd_get_config_item_callback,
[LXC_CMD_GET_NAME] = lxc_cmd_get_name_callback,
[LXC_CMD_GET_LXCPATH] = lxc_cmd_get_lxcpath_callback,
[LXC_CMD_STATE_SERVER] = lxc_cmd_state_server_callback,
[LXC_CMD_ADD_STATE_CLIENT] = lxc_cmd_add_state_client_callback,
};
if (req->cmd >= LXC_CMD_MAX) {
......
......@@ -43,7 +43,7 @@ typedef enum {
LXC_CMD_GET_CONFIG_ITEM,
LXC_CMD_GET_NAME,
LXC_CMD_GET_LXCPATH,
LXC_CMD_STATE_SERVER,
LXC_CMD_ADD_STATE_CLIENT,
LXC_CMD_MAX,
} lxc_cmd_t;
......@@ -85,7 +85,7 @@ extern char *lxc_cmd_get_lxcpath(const char *hashed_sock);
extern pid_t lxc_cmd_get_init_pid(const char *name, const char *lxcpath);
extern int lxc_cmd_get_state(const char *name, const char *lxcpath);
extern int lxc_cmd_stop(const char *name, const char *lxcpath);
extern int lxc_cmd_state_server(const char *name, const char *lxcpath,
extern int lxc_cmd_add_state_client(const char *name, const char *lxcpath,
lxc_state_t states[MAX_STATE]);
struct lxc_epoll_descr;
......
......@@ -338,7 +338,7 @@ static int lxc_serve_state_clients(const char *name,
process_lock();
/* Only set state under process lock held so that we don't cause
* lxc_cmd_state_server() to miss a state.
* lxc_cmd_add_state_client() to miss a state.
*/
handler->state = state;
TRACE("set container state to %s", lxc_state2str(state));
......
......@@ -114,7 +114,7 @@ extern int lxc_wait(const char *lxcname, const char *states, int timeout,
if (fillwaitedstates(states, s))
return -1;
state = lxc_cmd_state_server(lxcname, lxcpath, s);
state = lxc_cmd_add_state_client(lxcname, lxcpath, s);
if (state < 0) {
SYSERROR("failed to receive state from monitor");
return -1;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment