diff --git a/LICENSE.txt b/LICENSE.txt index d0d57461e7..c8e90f27f2 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -256,3 +256,26 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/st hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/TERMINAL + +======= +For hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/cJSON.[ch]: + +Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt index 7e8b19f3e6..677429bb99 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt @@ -16,12 +16,15 @@ cmake_minimum_required(VERSION 3.1 FATAL_ERROR) -list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common) +set(HADOOP_COMMON_PATH ${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common) +list(APPEND CMAKE_MODULE_PATH ${HADOOP_COMMON_PATH}) include(HadoopCommon) # Set gtest path set(GTEST_SRC_DIR ${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common/src/main/native/gtest) +set(HADOOP_COMMON_SEC_PATH ${HADOOP_COMMON_PATH}/src/main/native/src/org/apache/hadoop/security) + # determine if container-executor.conf.dir is an absolute # path in case the OS we're compiling on doesn't have # a hook in get_executable. We'll use this define @@ -115,6 +118,7 @@ include_directories( main/native/container-executor main/native/container-executor/impl main/native/oom-listener/impl + ${HADOOP_COMMON_SEC_PATH} ) # add gtest as system library to suppress gcc warnings include_directories(SYSTEM ${GTEST_SRC_DIR}/include) @@ -129,6 +133,7 @@ add_library(container main/native/container-executor/impl/configuration.c main/native/container-executor/impl/container-executor.c main/native/container-executor/impl/get_executable.c + main/native/container-executor/impl/utils/file-utils.c main/native/container-executor/impl/utils/string-utils.c main/native/container-executor/impl/utils/path-utils.c main/native/container-executor/impl/modules/cgroups/cgroups-operations.c @@ -138,6 +143,14 @@ add_library(container main/native/container-executor/impl/modules/devices/devices-module.c main/native/container-executor/impl/utils/docker-util.c main/native/container-executor/impl/utils/mount-utils.c + main/native/container-executor/impl/utils/cJSON/cJSON.c + main/native/container-executor/impl/runc/runc.c + main/native/container-executor/impl/runc/runc_base_ctx.c + main/native/container-executor/impl/runc/runc_launch_cmd.c + main/native/container-executor/impl/runc/runc_reap.c + main/native/container-executor/impl/runc/runc_write_config.c + ${HADOOP_COMMON_SEC_PATH}/hadoop_user_info.c + ${HADOOP_COMMON_SEC_PATH}/hadoop_group_info.c ) add_executable(container-executor @@ -146,6 +159,7 @@ add_executable(container-executor target_link_libraries(container-executor container + crypto ) output_directory(container-executor target/usr/local/bin) @@ -155,7 +169,9 @@ add_executable(test-container-executor main/native/container-executor/test/test-container-executor.c ) target_link_libraries(test-container-executor - container ${EXTRA_LIBS} + container + ${EXTRA_LIBS} + crypto ) output_directory(test-container-executor target/usr/local/bin) @@ -173,8 +189,15 @@ add_executable(cetest main/native/container-executor/test/modules/fpga/test-fpga-module.cc main/native/container-executor/test/modules/devices/test-devices-module.cc main/native/container-executor/test/test_util.cc - main/native/container-executor/test/utils/test_docker_util.cc) -target_link_libraries(cetest gtest container) + main/native/container-executor/test/utils/test_docker_util.cc + main/native/container-executor/test/utils/test_runc_util.cc +) +target_link_libraries(cetest + gtest + container + crypto +) + output_directory(cetest test) # CGroup OOM listener diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c index 72e0cf11fb..3de736529a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c @@ -21,6 +21,7 @@ #include "utils/docker-util.h" #include "utils/path-utils.h" #include "utils/string-utils.h" +#include "runc/runc.h" #include "util.h" #include "config.h" @@ -78,6 +79,7 @@ static const int DEFAULT_DOCKER_SUPPORT_ENABLED = 0; static const int DEFAULT_TC_SUPPORT_ENABLED = 0; static const int DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED = 0; static const int DEFAULT_YARN_SYSFS_SUPPORT_ENABLED = 0; +static const int DEFAULT_RUNC_SUPPORT_ENABLED = 0; static const char* PROC_PATH = "/proc"; @@ -191,7 +193,7 @@ int check_executor_permissions(char *executable_file) { /** * Change the effective user id to limit damage. */ -static int change_effective_user(uid_t user, gid_t group) { +int change_effective_user(uid_t user, gid_t group) { if (geteuid() == user) { return 0; } @@ -211,6 +213,10 @@ static int change_effective_user(uid_t user, gid_t group) { return 0; } +int change_effective_user_to_nm() { + return change_effective_user(nm_uid, nm_gid); +} + #ifdef __linux /** * Write the pid of the current process to the cgroup file. @@ -408,7 +414,7 @@ static int wait_and_get_exit_code(pid_t pid) { * the exit code file. * Returns the exit code of the container process. */ -static int wait_and_write_exit_code(pid_t pid, const char* exit_code_file) { +int wait_and_write_exit_code(pid_t pid, const char* exit_code_file) { int exit_code = -1; exit_code = wait_and_get_exit_code(pid); @@ -510,6 +516,12 @@ int is_yarn_sysfs_support_enabled() { DEFAULT_YARN_SYSFS_SUPPORT_ENABLED, &executor_cfg); } +int is_runc_support_enabled() { + return is_feature_enabled(RUNC_SUPPORT_ENABLED_KEY, + DEFAULT_RUNC_SUPPORT_ENABLED, &executor_cfg) + || runc_module_enabled(&CFG); +} + /** * Utility function to concatenate argB to argA using the concat_pattern. */ @@ -642,6 +654,20 @@ char *get_tmp_directory(const char *work_dir) { return concatenate("%s/%s", "tmp dir", 2, work_dir, TMP_DIR); } +/** + * Get the private /tmp directory under the working directory + */ +char *get_privatetmp_directory(const char *work_dir) { + return concatenate("%s/%s", "private /tmp dir", 2, work_dir, ROOT_TMP_DIR); +} + +/** + * Get the private /tmp directory under the working directory + */ +char *get_private_var_tmp_directory(const char *work_dir) { + return concatenate("%s/%s", "private /var/tmp dir", 2, work_dir, ROOT_VAR_TMP_DIR); +} + /** * Ensure that the given path and all of the parent directories are created * with the desired permissions. @@ -810,17 +836,51 @@ static int create_container_directories(const char* user, const char *app_id, return result; } - result = COULD_NOT_CREATE_TMP_DIRECTORIES; // also make the tmp directory char *tmp_dir = get_tmp_directory(work_dir); + char *private_tmp_dir = get_privatetmp_directory(work_dir); + char *private_var_tmp_dir = get_private_var_tmp_directory(work_dir); - if (tmp_dir == NULL) { + if (tmp_dir == NULL || private_tmp_dir == NULL || private_var_tmp_dir == NULL) { return OUT_OF_MEMORY; } - if (mkdirs(tmp_dir, perms) == 0) { - result = 0; + + if (mkdirs(tmp_dir, perms) != 0) { + fprintf(ERRORFILE, "Could not create tmp_dir: %s\n", tmp_dir); + result = COULD_NOT_CREATE_TMP_DIRECTORIES; + goto cleanup; } + + if (mkdirs(private_tmp_dir, perms) != 0) { + fprintf(ERRORFILE, "Could not create private_tmp_dir: %s\n", private_tmp_dir); + result = COULD_NOT_CREATE_TMP_DIRECTORIES; + goto cleanup; + } + + // clear group sticky bit on private_tmp_dir + if (chmod(private_tmp_dir, perms) != 0) { + fprintf(ERRORFILE, "Could not chmod private_tmp_dir: %s\n", private_tmp_dir); + result = COULD_NOT_CREATE_TMP_DIRECTORIES; + goto cleanup; + } + + if (mkdirs(private_var_tmp_dir, perms) != 0) { + fprintf(ERRORFILE, "Could not create private_var_tmp_dir: %s\n", private_var_tmp_dir); + result = COULD_NOT_CREATE_TMP_DIRECTORIES; + goto cleanup; + } + + // clear group sticky bit on private_tmp_dir + if (chmod(private_var_tmp_dir, perms) != 0) { + fprintf(ERRORFILE, "Could not chmod private_var_tmp_dir: %s\n", private_var_tmp_dir); + result = COULD_NOT_CREATE_TMP_DIRECTORIES; + goto cleanup; + } + +cleanup: free(tmp_dir); + free(private_tmp_dir); + free(private_var_tmp_dir); return result; } @@ -1051,6 +1111,36 @@ static int open_file_as_nm(const char* filename) { return result; } +/** + * Check the pidfile as the node manager. File should not exist. + * Returns 0 on file doesn't exist and -1 on file does exist. + */ +int check_pidfile_as_nm(const char* pidfile) { + int result = 0; + uid_t user = geteuid(); + gid_t group = getegid(); + if (change_effective_user(nm_uid, nm_gid) != 0) { + return -1; + } + + struct stat statbuf; + if (stat(pidfile, &statbuf) == 0) { + fprintf(ERRORFILE, "pid file already exists: %s\n", pidfile); + result = -1; + } + + if (errno != ENOENT) { + fprintf(ERRORFILE, "Error accessing %s : %s\n", pidfile, + strerror(errno)); + result = -1; + } + + if (change_effective_user(user, group)) { + result = -1; + } + return result; +} + /** * Copy a file from a fd to a given filename. * The new file must not exist and it is created with permissions perm. @@ -1863,6 +1953,61 @@ int create_yarn_sysfs(const char* user, const char *app_id, return result; } +int setup_container_paths(const char* user, const char* app_id, + const char *container_id, const char *work_dir, const char *script_name, + const char *cred_file, int https, const char *keystore_file, const char *truststore_file, + char* const* local_dirs, char* const* log_dirs) { + char *script_file_dest = NULL; + char *cred_file_dest = NULL; + char *keystore_file_dest = NULL; + char *truststore_file_dest = NULL; + int container_file_source = -1; + int cred_file_source = -1; + int keystore_file_source = -1; + int truststore_file_source = -1; + + int result = initialize_user(user, local_dirs); + if (result != 0) { + return result; + } + + int rc = create_script_paths( + work_dir, script_name, cred_file, https, keystore_file, truststore_file, &script_file_dest, &cred_file_dest, + &keystore_file_dest, &truststore_file_dest, &container_file_source, &cred_file_source, &keystore_file_source, &truststore_file_source); + + if (rc != 0) { + fputs("Could not create script path\n", ERRORFILE); + goto cleanup; + } + + rc = create_log_dirs(app_id, log_dirs); + if (rc != 0) { + fputs("Could not create log files and directories\n", ERRORFILE); + goto cleanup; + } + + rc = create_local_dirs(user, app_id, container_id, + work_dir, script_name, cred_file, https, keystore_file, truststore_file, local_dirs, log_dirs, + 1, script_file_dest, cred_file_dest, keystore_file_dest, truststore_file_dest, + container_file_source, cred_file_source, keystore_file_source, truststore_file_source); + + if (rc != 0) { + fputs("Could not create local files and directories\n", ERRORFILE); + goto cleanup; + } + + rc = create_yarn_sysfs(user, app_id, container_id, work_dir, local_dirs); + if (rc != 0) { + fputs("Could not create user yarn sysfs directory\n", ERRORFILE); + goto cleanup; + } + +cleanup: + free(script_file_dest); + free(cred_file_dest); + return rc; +} + int launch_docker_container_as_user(const char * user, const char *app_id, const char *container_id, const char *work_dir, const char *script_name, const char *cred_file, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h index 757bd16c63..8219a67550 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h @@ -52,7 +52,9 @@ enum operations { REMOVE_DOCKER_CONTAINER = 13, INSPECT_DOCKER_CONTAINER = 14, RUN_AS_USER_SYNC_YARN_SYSFS = 15, - EXEC_CONTAINER = 16 + EXEC_CONTAINER = 16, + RUN_RUNC_CONTAINER = 17, + REAP_RUNC_LAYER_MOUNTS = 18 }; #define NM_GROUP_KEY "yarn.nodemanager.linux-container-executor.group" @@ -72,10 +74,14 @@ enum operations { #define TC_SUPPORT_ENABLED_KEY "feature.tc.enabled" #define MOUNT_CGROUP_SUPPORT_ENABLED_KEY "feature.mount-cgroup.enabled" #define YARN_SYSFS_SUPPORT_ENABLED_KEY "feature.yarn.sysfs.enabled" +#define RUNC_SUPPORT_ENABLED_KEY "feature.runc.enabled" #define TMP_DIR "tmp" +#define ROOT_TMP_DIR "private_slash_tmp" +#define ROOT_VAR_TMP_DIR "private_var_slash_tmp" #define COMMAND_FILE_SECTION "command-execution" extern struct passwd *user_detail; +extern struct section executor_cfg; //function used to load the configurations present in the secure config void read_executor_config(const char* file_name); @@ -175,6 +181,9 @@ int delete_as_user(const char *user, // assumed to be an absolute path. int list_as_user(const char *target_dir); +// Check the pidfile as the node manager. File should not exist. +int check_pidfile_as_nm(const char* filename); + // set the uid and gid of the node manager. This is used when doing some // priviledged operations for setting the effective uid and gid. void set_nm_uid(uid_t user, gid_t group); @@ -244,6 +253,10 @@ int create_directory_for_user(const char* path); int change_user(uid_t user, gid_t group); +int change_effective_user(uid_t user, gid_t group); + +int change_effective_user_to_nm(); + int mount_cgroup(const char *pair, const char *hierarchy); int check_dir(const char* npath, mode_t st_mode, mode_t desired, @@ -255,6 +268,14 @@ int create_validate_dir(const char* npath, mode_t perm, const char* path, /** Check if a feature is enabled in the specified configuration. */ int is_feature_enabled(const char* feature_key, int default_value, struct section *cfg); +char* get_exit_code_file(const char* pid_file); + +int wait_and_write_exit_code(pid_t pid, const char* exit_code_file); + +int setup_container_paths(const char* user, const char* app_id, + const char *container_id, const char* work_dir, const char* script_path, + const char *cred_path, int https, const char *keystore_file, const char *truststore_file, + char * const* local_dirs, char* const* log_dirs); /** Check if tc (traffic control) support is enabled in configuration. */ int is_tc_support_enabled(); @@ -341,3 +362,9 @@ int remove_docker_container(char **argv, int argc); * Check if terminal feature is enabled */ int is_terminal_support_enabled(); + + +/** + * Check if runC feature is enabled + */ +int is_runc_support_enabled(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c index ce3e21ee54..01c054a665 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c @@ -26,6 +26,8 @@ #include "modules/cgroups/cgroups-operations.h" #include "modules/devices/devices-module.h" #include "utils/string-utils.h" +#include "runc/runc.h" +#include "runc/runc_reap.h" #include #include @@ -35,45 +37,33 @@ #include static void display_usage(FILE *stream) { + const char* disabled = "[DISABLED]"; + const char* enabled = " "; + + fputs("Usage: container-executor --checksetup\n" + " container-executor --mount-cgroups " + "...\n", stream); + + const char* de = is_tc_support_enabled() ? enabled : disabled; fprintf(stream, - "Usage: container-executor --checksetup\n" - " container-executor --mount-cgroups " - "\n" ); + "%s container-executor --tc-modify-state \n" + "%s container-executor --tc-read-state \n" + "%s container-executor --tc-read-stats \n", + de, de, de); - if(is_tc_support_enabled()) { - fprintf(stream, - " container-executor --tc-modify-state \n" - " container-executor --tc-read-state \n" - " container-executor --tc-read-stats \n" ); - } else { - fprintf(stream, - "[DISABLED] container-executor --tc-modify-state \n" - "[DISABLED] container-executor --tc-read-state \n" - "[DISABLED] container-executor --tc-read-stats \n"); - } + de = is_terminal_support_enabled() ? enabled : disabled; + fprintf(stream, "%s container-executor --exec-container \n", de); - if(is_docker_support_enabled()) { - fprintf(stream, - " container-executor --run-docker \n" - " container-executor --remove-docker-container [hierarchy] " - "\n" - " container-executor --inspect-docker-container \n"); - } else { - fprintf(stream, - "[DISABLED] container-executor --run-docker \n" - "[DISABLED] container-executor --remove-docker-container [hierarchy] " - "\n" - "[DISABLED] container-executor --inspect-docker-container " - " ... \n"); - } + de = is_docker_support_enabled() ? enabled : disabled; + fprintf(stream, "%s container-executor --run-docker \n", de); + fprintf(stream, "%s container-executor --remove-docker-container [hierarchy] \n", de); + fprintf(stream, "%s container-executor --inspect-docker-container \n", de); - if (is_terminal_support_enabled()) { - fprintf(stream, - " container-executor --exec-container \n"); - } else { - fprintf(stream, - "[DISABLED] container-executor --exec-container \n"); - } + de = is_runc_support_enabled() ? enabled : disabled; + fprintf(stream, + "%s container-executor --run-runc-container \n", de); + fprintf(stream, + "%s container-executor --reap-runc-layer-mounts \n", de); fprintf(stream, " container-executor \n" @@ -85,27 +75,21 @@ static void display_usage(FILE *stream) { INITIALIZE_CONTAINER, LAUNCH_CONTAINER); if(is_tc_support_enabled()) { - fprintf(stream, "optional-tc-command-file\n"); + fputs("optional-tc-command-file\n", stream); } else { - fprintf(stream, "\n"); + fputs("\n", stream); } - if(is_docker_support_enabled()) { - fprintf(stream, - " launch docker container: %2d appid containerid workdir " + de = is_docker_support_enabled() ? enabled : disabled; + fprintf(stream, + "%11s launch docker container: %2d appid containerid workdir " "container-script tokens pidfile nm-local-dirs nm-log-dirs " - "docker-command-file resources ", LAUNCH_DOCKER_CONTAINER); - } else { - fprintf(stream, - "[DISABLED] launch docker container: %2d appid containerid workdir " - "container-script tokens pidfile nm-local-dirs nm-log-dirs " - "docker-command-file resources ", LAUNCH_DOCKER_CONTAINER); - } + "docker-command-file resources ", de, LAUNCH_DOCKER_CONTAINER); if(is_tc_support_enabled()) { - fprintf(stream, "optional-tc-command-file\n"); + fputs("optional-tc-command-file\n", stream); } else { - fprintf(stream, "\n"); + fputs("\n", stream); } fprintf(stream, @@ -244,7 +228,7 @@ static void display_feature_disabled_message(const char* name) { fprintf(ERRORFILE, "Feature disabled: %s\n", name); } -/* Use to store parsed input parmeters for various operations */ +/* Use to store parsed input parameters for various operations */ static struct { char *cgroups_hierarchy; char *traffic_control_command_file; @@ -267,6 +251,7 @@ static struct { const char *target_dir; int container_pid; int signal; + int runc_layer_count; const char *command_file; } cmd_input; @@ -435,6 +420,44 @@ static int validate_arguments(int argc, char **argv , int *operation) { } } + if (strcmp("--run-runc-container", argv[1]) == 0) { + if (is_runc_support_enabled()) { + if (argc != 3) { + display_usage(stdout); + return INVALID_ARGUMENT_NUMBER; + } + optind++; + cmd_input.command_file = argv[optind++]; + *operation = RUN_RUNC_CONTAINER; + return 0; + } else { + display_feature_disabled_message("runc"); + return FEATURE_DISABLED; + } + } + + if (strcmp("--reap-runc-layer-mounts", argv[1]) == 0) { + if (is_runc_support_enabled()) { + if (argc != 3) { + display_usage(stdout); + return INVALID_ARGUMENT_NUMBER; + } + optind++; + const char* valstr = argv[optind++]; + if (sscanf(valstr, "%d", &cmd_input.runc_layer_count) != 1 + || cmd_input.runc_layer_count < 0) { + fprintf(ERRORFILE, "Bad runc layer count: %s\n", valstr); + return INVALID_COMMAND_PROVIDED; + } + *operation = REAP_RUNC_LAYER_MOUNTS; + return 0; + } else { + display_feature_disabled_message("runc"); + return FEATURE_DISABLED; + } + } + + /* Now we have to validate 'run as user' operations that don't use a 'long option' - we should fix this at some point. The validation/argument parsing here is extensive enough that it done in a separate function */ @@ -786,6 +809,16 @@ int main(int argc, char **argv) { exit_code = FEATURE_DISABLED; } break; + case RUN_RUNC_CONTAINER: + exit_code = run_runc_container(cmd_input.command_file); + break; + case REAP_RUNC_LAYER_MOUNTS: + exit_code = reap_runc_layer_mounts(cmd_input.runc_layer_count); + break; + default: + fprintf(ERRORFILE, "Unexpected operation code: %d\n", operation); + exit_code = INVALID_COMMAND_PROVIDED; + break; } if (exit_code) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc.c new file mode 100644 index 0000000000..cde6d8add0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc.c @@ -0,0 +1,910 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../modules/common/module-configs.h" +// TODO: Figure out how to address new openssl dependency for container-executor +#include + +// workaround for building on RHEL6 but running on RHEL7 +#ifndef LOOP_CTL_GET_FREE +#define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#include "utils/string-utils.h" +#include "util.h" +#include "configuration.h" +#include "container-executor.h" + +#include "runc.h" +#include "runc_base_ctx.h" +#include "runc_config.h" +#include "runc_launch_cmd.h" +#include "runc_reap.h" +#include "runc_write_config.h" + +#define NUM_ROOTFS_UNMOUNT_ATTEMPTS 40 +#define MAX_ROOTFS_UNMOUNT_BACKOFF_MSEC 1000 + +// NOTE: Update init_runc_overlay_desc and destroy_runc_overlay_desc +// when this is changed. +typedef struct runc_overlay_desc_struct { + char* top_path; // top-level directory + char* mount_path; // overlay mount point under top_path + char* upper_path; // root path of upper layer under top_path + char* work_path; // overlay work path under top_path +} runc_overlay_desc; + +// NOTE: Update init_runc_mount_context and destroy_runc_mount_context +// when this is changed. +typedef struct runc_mount_context_struct { + char* src_path; // path to raw layer data + char* layer_path; // path under layer database for this layer + char* mount_path; // mount point of filesystem under layer_path + int fd; // opened file descriptor or -1 +} runc_mount_ctx; + +// NOTE: Update init_runc_launch_cmd_ctx and destroy_runc_launch_cmd_ctx +// when this is changed. +typedef struct runc_launch_cmd_context_struct { + runc_base_ctx base_ctx; // run root and layer lock + runc_overlay_desc upper; // writable upper layer descriptor + runc_mount_ctx* layers; // layer mount info + unsigned int num_layers; // number of layer mount contexts +} runc_launch_cmd_ctx; + +int runc_module_enabled(const struct configuration *conf) { + struct section *section = get_configuration_section(CONTAINER_EXECUTOR_CFG_RUNC_SECTION, conf); + if (section != NULL) { + return module_enabled(section, CONTAINER_EXECUTOR_CFG_RUNC_SECTION); + } + return 0; +} + +static void init_runc_overlay_desc(runc_overlay_desc* desc) { + memset(desc, 0, sizeof(*desc)); +} + +static void destroy_runc_overlay_desc(runc_overlay_desc* desc) { + if (desc != NULL) { + free(desc->top_path); + free(desc->mount_path); + free(desc->upper_path); + free(desc->work_path); + } +} + +static void init_runc_mount_ctx(runc_mount_ctx* ctx) { + memset(ctx, 0, sizeof(*ctx)); + ctx->fd = -1; +} + +static void destroy_runc_mount_ctx(runc_mount_ctx* ctx) { + if (ctx != NULL) { + free(ctx->src_path); + free(ctx->layer_path); + free(ctx->mount_path); + if (ctx->fd != -1) { + close(ctx->fd); + } + } +} + +static void init_runc_launch_cmd_ctx(runc_launch_cmd_ctx* ctx) { + memset(ctx, 0, sizeof(*ctx)); + init_runc_base_ctx(&ctx->base_ctx); + init_runc_overlay_desc(&ctx->upper); +} + +static void destroy_runc_launch_cmd_ctx(runc_launch_cmd_ctx* ctx) { + if (ctx != NULL) { + if (ctx->layers != NULL) { + for (unsigned int i = 0; i < ctx->num_layers; ++i) { + destroy_runc_mount_ctx(&ctx->layers[i]); + } + free(ctx->layers); + } + destroy_runc_overlay_desc(&ctx->upper); + destroy_runc_base_ctx(&ctx->base_ctx); + } +} + +static runc_launch_cmd_ctx* alloc_runc_launch_cmd_ctx() { + runc_launch_cmd_ctx* ctx = malloc(sizeof(*ctx)); + if (ctx != NULL) { + init_runc_launch_cmd_ctx(ctx); + } + return ctx; +} + +static void free_runc_launch_cmd_ctx(runc_launch_cmd_ctx* ctx) { + if (ctx != NULL) { + destroy_runc_launch_cmd_ctx(ctx); + free(ctx); + } +} + +static runc_launch_cmd_ctx* setup_runc_launch_cmd_ctx() { + runc_launch_cmd_ctx* ctx = alloc_runc_launch_cmd_ctx(); + if (ctx == NULL) { + fputs("Cannot allocate memory\n", ERRORFILE); + return NULL; + } + + if (!open_runc_base_ctx(&ctx->base_ctx)) { + free_runc_launch_cmd_ctx(ctx); + return NULL; + } + + return ctx; +} + +/** + * Compute a digest of a layer based on the layer's pathname. + * Returns the malloc'd digest hexstring or NULL if there was an error. + */ +static char* compute_layer_hash(const char* path) { + char* digest = NULL; + EVP_MD_CTX* mdctx = EVP_MD_CTX_create(); + if (mdctx == NULL) { + fputs("Unable to create EVP MD context\n", ERRORFILE); + goto cleanup; + } + + if (!EVP_DigestInit_ex(mdctx, EVP_sha256(), NULL)) { + fputs("Unable to initialize SHA256 digester\n", ERRORFILE); + goto cleanup; + } + + if (!EVP_DigestUpdate(mdctx, path, strlen(path))) { + fputs("Unable to compute layer path digest\n", ERRORFILE); + goto cleanup; + } + + unsigned char raw_digest[EVP_MAX_MD_SIZE]; + unsigned int raw_digest_len = 0; + if (!EVP_DigestFinal_ex(mdctx, raw_digest, &raw_digest_len)) { + fputs("Unable to compute layer path digest\n", ERRORFILE); + goto cleanup; + } + + digest = to_hexstring(raw_digest, raw_digest_len); + +cleanup: + if (mdctx != NULL) { + EVP_MD_CTX_destroy(mdctx); + } + return digest; +} + +/** + * Open the specified path which is expected to be a mount point. + * + * Returns an valid file descriptor when the path exists and is a mount point + * or -1 if the path does not exist or is not a mount point. + * + * NOTE: The corresponding read lock must be acquired. + */ +static int open_mountpoint(const char* path) { + int fd = open(path, O_RDONLY | O_CLOEXEC); + if (fd == -1) { + if (errno != ENOENT) { + fprintf(ERRORFILE, "Error accessing mount point at %s : %s\n", path, + strerror(errno)); + } + return fd; + } + + struct stat mstat, pstat; + if (fstat(fd, &mstat) == -1) { + fprintf(ERRORFILE, "Error accessing mount point at %s : %s\n", path, + strerror(errno)); + goto close_fail; + } + if (!S_ISDIR(mstat.st_mode)) { + fprintf(ERRORFILE, "Mount point %s is not a directory\n", path); + goto close_fail; + } + + if (fstatat(fd, "..", &pstat, 0) == -1) { + fprintf(ERRORFILE, "Error accessing mount point parent of %s : %s\n", path, + strerror(errno)); + goto close_fail; + } + + // If the parent directory's device matches the child directory's device + // then we didn't cross a device boundary in the filesystem and therefore + // this is likely not a mount point. + // TODO: This assumption works for loopback mounts but would not work for + // bind mounts or some other situations. Worst case would need to + // walk the mount table and otherwise replicate the mountpoint(1) cmd. + if (mstat.st_dev == pstat.st_dev) { + goto close_fail; + } + + return fd; + +close_fail: + close(fd); + return -1; +} + +static bool init_overlay_descriptor(runc_overlay_desc* desc, + const char* run_root, const char* container_id) { + if (asprintf(&desc->top_path, "%s/%s", run_root, container_id) == -1) { + return false; + } + if (asprintf(&desc->mount_path, "%s/rootfs", desc->top_path) == -1) { + return false; + } + if (asprintf(&desc->upper_path, "%s/upper", desc->top_path) == -1) { + return false; + } + if (asprintf(&desc->work_path, "%s/work", desc->top_path) == -1) { + return false; + } + return true; +} + +static bool init_layer_mount_ctx(runc_mount_ctx* ctx, const rlc_layer_spec* spec, + const char* run_root) { + char* hash = compute_layer_hash(spec->path); + if (hash == NULL) { + return false; + } + + ctx->layer_path = get_runc_layer_path(run_root, hash); + free(hash); + if (ctx->layer_path == NULL) { + return false; + } + + ctx->mount_path = get_runc_layer_mount_path(ctx->layer_path); + if (ctx->mount_path == NULL) { + return false; + } + + ctx->fd = open(spec->path, O_RDONLY | O_CLOEXEC); + if (ctx->fd == -1) { + fprintf(ERRORFILE, "Error opening layer image at %s : %s\n", spec->path, + strerror(errno)); + return false; + } + + ctx->src_path = strdup(spec->path); + return ctx->src_path != NULL; +} + +/** + * Initialize the layers mount contexts and open each layer image as the user + * to validate the user should be allowed to access the image composed of + * these layers. + */ +static bool init_layer_mount_ctxs(runc_launch_cmd_ctx* ctx, + const rlc_layer_spec* layer_specs, unsigned int num_layers) { + ctx->layers = malloc(num_layers * sizeof(*ctx->layers)); + if (ctx->layers == NULL) { + fputs("Unable to allocate memory\n", ERRORFILE); + return false; + } + + for (unsigned int i = 0; i < num_layers; ++i) { + init_runc_mount_ctx(&ctx->layers[i]); + } + ctx->num_layers = num_layers; + + for (unsigned int i = 0; i < num_layers; ++i) { + if (!init_layer_mount_ctx(&ctx->layers[i], &layer_specs[i], + ctx->base_ctx.run_root)) { + return false; + } + } + + return true; +} + +/** + * Allocate a loop device and assruncate it with a file descriptor. + * Returns the file descriptor of the opened loop device or -1 on error. + */ +static int allocate_and_open_loop_device(char** loopdev_name_out, int src_fd) { + *loopdev_name_out = NULL; + int loopctl = open("/dev/loop-control", O_RDWR); + if (loopctl == -1) { + fprintf(ERRORFILE, "Error opening /dev/loop-control : %s\n", + strerror(errno)); + return -1; + } + + char* loopdev_name = NULL; + int loop_fd = -1; + while (true) { + int loop_num = ioctl(loopctl, LOOP_CTL_GET_FREE); + if (loop_num < 0) { + fprintf(ERRORFILE, "Error allocating a new loop device: %s\n", + strerror(errno)); + goto fail; + } + + if (asprintf(&loopdev_name, "/dev/loop%d", loop_num) == -1) { + fputs("Unable to allocate memory\n", ERRORFILE); + goto fail; + } + loop_fd = open(loopdev_name, O_RDWR | O_CLOEXEC); + if (loop_fd == -1) { + fprintf(ERRORFILE, "Unable to open loop device at %s : %s\n", + loopdev_name, strerror(errno)); + goto fail; + } + + if (ioctl(loop_fd, LOOP_SET_FD, src_fd) != -1) { + break; + } + + // EBUSY indicates another process stole this loop device + if (errno != EBUSY) { + fprintf(ERRORFILE, "Error setting loop source file: %s\n", + strerror(errno)); + goto fail; + } + + close(loop_fd); + loop_fd = -1; + free(loopdev_name); + loopdev_name = NULL; + } + + struct loop_info64 loop_info; + memset(&loop_info, 0, sizeof(loop_info)); + loop_info.lo_flags = LO_FLAGS_READ_ONLY | LO_FLAGS_AUTOCLEAR; + if (ioctl(loop_fd, LOOP_SET_STATUS64, &loop_info) == -1) { + fprintf(ERRORFILE, "Error setting loop flags: %s\n", strerror(errno)); + goto fail; + } + + close(loopctl); + *loopdev_name_out = loopdev_name; + return loop_fd; + +fail: + if (loop_fd != -1) { + close(loop_fd); + } + close(loopctl); + free(loopdev_name); + return -1; +} + +/** + * Mount a filesystem with the specified arguments, see the mount(2) manpage. + * If the mount fails an error message is printed to ERRORFILE. + * Returns true for success or false on failure. + */ +static bool do_mount(const char* src, const char* target, + const char* fs_type, unsigned long mount_flags, const char* mount_options) { + if (mount(src, target, fs_type, mount_flags, mount_options) == -1) { + const char* nullstr = "NULL"; + src = (src != NULL) ? src : nullstr; + fs_type = (fs_type != NULL) ? fs_type : nullstr; + mount_options = (mount_options != NULL) ? mount_options : nullstr; + fprintf(ERRORFILE, "Error mounting %s at %s type %s with options %s : %s\n", + src, target, fs_type, mount_options, strerror(errno)); + return false; + } + return true; +} + +/** + * Mount a filesystem and return a file descriptor opened to the mount point. + * The mount point directory will be created if necessary. + * Returns a file descriptor to the mount point or -1 if there was an error. + */ +static int mount_and_open(const char* src, const char* target, + const char* fs_type, unsigned long mount_flags, const char* mount_options) { + if (mkdir(target, S_IRWXU) == -1 && errno != EEXIST) { + fprintf(ERRORFILE, "Error creating mountpoint directory at %s : %s\n", + target, strerror(errno)); + return -1; + } + + if (!do_mount(src, target, fs_type, mount_flags, mount_options)) { + return -1; + } + + return open_mountpoint(target); +} + +static int mount_layer_and_open(const runc_mount_ctx* layer) { + if (mkdir(layer->layer_path, S_IRWXU) == -1) { + if (errno != EEXIST) { + fprintf(ERRORFILE, "Error creating layer directory at %s : %s\n", + layer->layer_path, strerror(errno)); + return -1; + } + } + + char *loopdev_name = NULL; + int loopfd = allocate_and_open_loop_device(&loopdev_name, layer->fd); + if (loopfd == -1) { + return -1; + } + + int mount_fd = mount_and_open(loopdev_name, layer->mount_path, "squashfs", + MS_RDONLY, NULL); + + // If the mount worked then the mount holds the loop device open. If the mount + // failed then the loop device is no longer needed, so close it either way. + close(loopfd); + + free(loopdev_name); + return mount_fd; +} + +static bool do_mount_layers_with_lock(runc_launch_cmd_ctx* ctx) { + bool have_write_lock = false; + for (unsigned int i = 0; i < ctx->num_layers; ++i) { + int layer_mount_fd = open_mountpoint(ctx->layers[i].mount_path); + if (layer_mount_fd != -1) { + // Touch layer directory to show this existing layer was recently used. + if (utimes(ctx->layers[i].layer_path, NULL) == -1) { + // Error is not critical to container launch so just print a warning. + fprintf(ERRORFILE, "Error updating timestamps of %s : %s\n", + ctx->layers[i].layer_path, strerror(errno)); + } + } else { + if (!have_write_lock) { + if (!acquire_runc_layers_write_lock(&ctx->base_ctx)) { + return false; + } + have_write_lock = true; + // Try to open the mount point again in case another process created it + // while we were waiting for the write lock. + layer_mount_fd = open_mountpoint(ctx->layers[i].mount_path); + } + if (layer_mount_fd == -1) { + layer_mount_fd = mount_layer_and_open(&ctx->layers[i]); + + if (layer_mount_fd == -1) { + fprintf(ERRORFILE, "Unable to mount layer data from %s\n", + ctx->layers[i].src_path); + return false; + } + } + } + + // Now that the layer is mounted we can start tracking the open mount point + // for the layer rather than the descriptor to the layer image. + // The mount point references the underlying image, so we no longer need + // a direct reference to the layer image. + close(ctx->layers[i].fd); + ctx->layers[i].fd = layer_mount_fd; + } + + return true; +} + +static bool mount_layers(runc_launch_cmd_ctx* ctx) { + if (!acquire_runc_layers_read_lock(&ctx->base_ctx)) { + return false; + } + + bool result = do_mount_layers_with_lock(ctx); + + if (!release_runc_layers_lock(&ctx->base_ctx)) { + return false; + } + + return result; +} + +static char* build_overlay_options(runc_mount_ctx* layers, + unsigned int num_layers, const runc_overlay_desc* upper) { + char* result = NULL; + const int sb_incr = 16*1024; + strbuf sb; + if (!strbuf_init(&sb, sb_incr)) { + fputs("Unable to allocate memory\n", ERRORFILE); + goto cleanup; + } + + if (!strbuf_append_fmt(&sb, sb_incr, "upperdir=%s,workdir=%s,lowerdir=", + upper->upper_path, upper->work_path)) { + goto cleanup; + } + + // Overlay expects the base layer to be the last layer listed, but the + // OCI image manifest specifies the base layer first. + bool need_separator = false; + for (int i = num_layers - 1; i >= 0; --i) { + char* fmt = need_separator ? ":%s" : "%s"; + if (!strbuf_append_fmt(&sb, sb_incr, fmt, layers[i].mount_path)) { + goto cleanup; + } + need_separator = true; + } + + result = strbuf_detach_buffer(&sb); + +cleanup: + strbuf_destroy(&sb); + return result; +} + +static bool create_overlay_dirs(runc_overlay_desc* od) { + if (mkdir(od->top_path, S_IRWXU) != 0) { + fprintf(ERRORFILE, "Error creating %s : %s\n", od->top_path, + strerror(errno)); + return false; + } + + if (mkdir(od->mount_path, S_IRWXU) != 0) { + fprintf(ERRORFILE, "Error creating %s : %s\n", od->mount_path, + strerror(errno)); + return false; + } + + mode_t upper_mode = S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH; + if (mkdir(od->upper_path, upper_mode) != 0) { + fprintf(ERRORFILE, "Error creating %s : %s\n", od->upper_path, + strerror(errno)); + return false; + } + + if (mkdir(od->work_path, S_IRWXU) != 0) { + fprintf(ERRORFILE, "Error creating %s : %s\n", od->work_path, + strerror(errno)); + return false; + } + + return true; +} + +static bool mount_container_rootfs(runc_launch_cmd_ctx* ctx) { + if (!create_overlay_dirs(&ctx->upper)) { + return false; + } + + if (!mount_layers(ctx)) { + return false; + } + + char* overlay_opts = build_overlay_options(ctx->layers, ctx->num_layers, + &ctx->upper); + if (overlay_opts == NULL) { + return false; + } + + bool mount_ok = do_mount("overlay", ctx->upper.mount_path, "overlay", 0, + overlay_opts); + free(overlay_opts); + if (!mount_ok) { + return false; + } + + // It would be tempting to close the layer file descriptors here since the + // overlay should also be holding references to all the layers. However + // overlay somehow does NOT hold a hard reference to underlying filesystems, + // so the layer file descriptors need to be kept open in order to prevent + // other containers from unmounting shared layers when they cleanup. + + return true; +} + +static bool unmount_and_remove(const char* path) { + if (umount(path) == -1 && errno != EINVAL && errno != ENOENT) { + if (errno == EBUSY) { + // Layer is in use by another container. + return false; + } + fprintf(ERRORFILE, "Error unmounting %s : %s\n", path, strerror(errno)); + return false; + } + if (rmdir(path) == -1 && errno != ENOENT) { + fprintf(ERRORFILE, "Error removing mount directory %s : %s\n", path, + strerror(errno)); + return false; + } + return true; +} + +static bool unmount_and_remove_with_retry(const char* path, int max_attempts, + long max_backoff_msec) { + long backoff_msec = 1; + for (int i = 0; i < max_attempts - 1; ++i) { + if (unmount_and_remove(path)) { + return true; + } + struct timespec ts; + memset(&ts, 0, sizeof(ts)); + ts.tv_sec = backoff_msec / 1000; + ts.tv_nsec = (backoff_msec % 1000) * 1000 * 1000; + nanosleep(&ts, NULL); + backoff_msec *= 2; + if (backoff_msec > max_backoff_msec) { + backoff_msec = max_backoff_msec; + } + } + + return unmount_and_remove(path); +} + +static bool rmdir_recursive_fd(int fd) { + int dirfd = dup(fd); + if (dirfd == -1) { + fputs("Unable to duplicate file descriptor\n", ERRORFILE); + return false; + } + + DIR* dir = fdopendir(dirfd); + if (dir == NULL) { + fprintf(ERRORFILE, "Error deleting directory: %s\n", strerror(errno)); + return false; + } + + bool result = false; + struct dirent* de; + while ((de = readdir(dir)) != NULL) { + if (strcmp(".", de->d_name) == 0 || strcmp("..", de->d_name) == 0) { + continue; + } + + struct stat statbuf; + if (fstatat(dirfd, de->d_name, &statbuf, AT_SYMLINK_NOFOLLOW) == -1) { + if (errno == ENOENT) { + continue; + } + fprintf(ERRORFILE, "Error accessing %s : %s\n", de->d_name, + strerror(errno)); + goto cleanup; + } + + int rmflags = 0; + if (S_ISDIR(statbuf.st_mode)) { + rmflags = AT_REMOVEDIR; + int de_fd = openat(dirfd, de->d_name, O_RDONLY | O_NOFOLLOW); + if (de_fd == -1) { + if (errno == ENOENT) { + continue; + } + fprintf(ERRORFILE, "Error opening %s for delete: %s\n", de->d_name, + strerror(errno)); + goto cleanup; + } + bool ok = rmdir_recursive_fd(de_fd); + close(de_fd); + if (!ok) { + goto cleanup; + } + } + + if (unlinkat(dirfd, de->d_name, rmflags) == -1 && errno != ENOENT) { + fprintf(ERRORFILE, "Error removing %s : %s\n", de->d_name, + strerror(errno)); + goto cleanup; + } + } + + result = true; + +cleanup: + closedir(dir); + return result; +} + +static bool rmdir_recursive(const char* path) { + int fd = open(path, O_RDONLY | O_NOFOLLOW); + if (fd == -1) { + if (errno == ENOENT) { + return true; + } + fprintf(ERRORFILE, "Error opening %s for delete: %s\n", path, + strerror(errno)); + return false; + } + bool result = rmdir_recursive_fd(fd); + close(fd); + if (rmdir(path) == -1) { + fprintf(ERRORFILE, "Error deleting %s : %s\n", path, strerror(errno)); + result = false; + } + return result; +} + +static void close_layer_fds(runc_launch_cmd_ctx* ctx) { + for (unsigned int i = 0; i < ctx->num_layers; ++i) { + if (ctx->layers[i].fd != -1) { + close(ctx->layers[i].fd); + ctx->layers[i].fd = -1; + } + } +} + +/** + * Unmounts the container rootfs directory and MAY unmount layers on the host + * based on the specified number of total layer mounts on the host specified. + */ +static void cleanup_container_mounts(runc_launch_cmd_ctx* ctx, + int num_reap_layers_keep) { + unmount_and_remove_with_retry(ctx->upper.mount_path, + NUM_ROOTFS_UNMOUNT_ATTEMPTS, MAX_ROOTFS_UNMOUNT_BACKOFF_MSEC); + rmdir_recursive(ctx->upper.top_path); + reap_runc_layer_mounts_with_ctx(&ctx->base_ctx, num_reap_layers_keep); +} + +/** + * Unmounts the container rootfs directory and MAY unmount layers on the host + * based on the specified number of total layer mounts on the host specified. + * + * IMPORTANT NOTE: This method may perform the unmount in a background process + * and can return before that has completed! + */ +static void background_cleanup_container_mounts(runc_launch_cmd_ctx* ctx, + int num_reap_layers_keep) { + pid_t child_pid = fork(); + if (child_pid == -1) { + fprintf(ERRORFILE, "Error forking child process: %s\n", strerror(errno)); + // try to clean it up in the foreground process + child_pid = 0; + } + if (child_pid == 0) { + cleanup_container_mounts(ctx, num_reap_layers_keep); + } +} + +static void exec_runc(const char* container_id, const char* runc_config_path, + const char* pid_file_path) { + char* runc_path = get_configuration_value(RUNC_BINARY_KEY, CONTAINER_EXECUTOR_CFG_RUNC_SECTION, get_cfg()); + if (runc_path == NULL) { + runc_path = strdup(DEFAULT_RUNC_BINARY); + if (runc_path == NULL) { + fputs("Unable to allocate memory\n", ERRORFILE); + exit(1); + } + } + + char* dir_end = strrchr(runc_config_path, '/'); + if (dir_end == NULL) { + fprintf(ERRORFILE, "Error getting bundle path from config path %s\n", + runc_config_path); + exit(1); + } + char* bundle_path = strndup(runc_config_path, dir_end - runc_config_path); + + const char* const runc_args[] = { + runc_path, "run", + "--pid-file", pid_file_path, + "-b", bundle_path, + container_id, + NULL + }; + const char* const runc_env[] = { NULL }; + + if (execve(runc_path, (char* const*)runc_args, (char* const*)runc_env) == -1) { + char* errstr = strerror(errno); + fputs("Failed to exec:", ERRORFILE); + for (const char* const* argp = runc_args; *argp != NULL; ++argp) { + fprintf(ERRORFILE, " %s", *argp); + } + fprintf(ERRORFILE, " : %s\n", errstr); + } + + exit(1); +} + +int run_runc_container(const char* command_file) { + int rc = 0; + char* exit_code_file = NULL; + char* runc_config_path = NULL; + runc_launch_cmd* rlc = NULL; + runc_launch_cmd_ctx* ctx = setup_runc_launch_cmd_ctx(); + if (ctx == NULL) { + rc = ERROR_RUNC_SETUP_FAILED; + goto cleanup; + } + + rlc = parse_runc_launch_cmd(command_file); + if (rlc == NULL) { + rc = ERROR_RUNC_SETUP_FAILED; + goto cleanup; + } + + rc = set_user(rlc->run_as_user); + if (rc != 0) { + goto cleanup; + } + + exit_code_file = get_exit_code_file(rlc->pid_file); + if (exit_code_file == NULL) { + rc = OUT_OF_MEMORY; + goto cleanup; + } + + const char* work_dir = rlc->config.process.cwd->valuestring; + rc = setup_container_paths(rlc->username, rlc->app_id, rlc->container_id, + work_dir, rlc->script_path, rlc->cred_path, rlc->https, rlc->keystore_path, + rlc->truststore_path, rlc->local_dirs, rlc->log_dirs); + if (rc != 0) { + goto cleanup; + } + + rc = ERROR_RUNC_RUN_FAILED; + if (!is_valid_runc_launch_cmd(rlc)) { + goto cleanup; + } + + if (!init_layer_mount_ctxs(ctx, rlc->layers, rlc->num_layers)) { + goto cleanup; + } + + if (!init_overlay_descriptor(&ctx->upper, ctx->base_ctx.run_root, + rlc->container_id)) { + goto cleanup; + } + + runc_config_path = write_runc_runc_config(rlc, ctx->upper.mount_path); + if (runc_config_path == NULL) { + goto cleanup; + } + + if (seteuid(0) != 0) { + fputs("Unable to become root\n", ERRORFILE); + goto cleanup; + } + + if (!mount_container_rootfs(ctx)) { + goto umount_and_cleanup; + } + + pid_t child_pid = fork(); + if (child_pid == 0) { + exec_runc(rlc->container_id, runc_config_path, rlc->pid_file); + exit(1); // just in case exec_runc returns somehow + } else if (child_pid == -1) { + fprintf(ERRORFILE, "Error cannot fork: %s\n", strerror(errno)); + rc = OUT_OF_MEMORY; + goto umount_and_cleanup; + } + + rc = wait_and_write_exit_code(child_pid, exit_code_file); + +umount_and_cleanup: + // Container is no longer running, so layer references are no longer desired. + close_layer_fds(ctx); + + // Cleanup mounts in a background process to keep it off the critical path. + background_cleanup_container_mounts(ctx, rlc->num_reap_layers_keep); + +cleanup: + free(exit_code_file); + free(runc_config_path); + free_runc_launch_cmd(rlc); + free_runc_launch_cmd_ctx(ctx); + return rc; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc.h new file mode 100644 index 0000000000..b3e86e8b9a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc.h @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef RUNC_RUNC_H +#define RUNC_RUNC_H + +#include + +/** + * Check to see if runC is enabled. + */ +int runc_module_enabled(const struct configuration *conf); + +/** + * Run a container via runC. + */ +int run_runc_container(const char* command_file); + +#endif /* RUNC_RUNC_H */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_base_ctx.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_base_ctx.c new file mode 100644 index 0000000000..f4a198580c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_base_ctx.c @@ -0,0 +1,307 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "configuration.h" +#include "container-executor.h" +#include "util.h" + +#include "runc_base_ctx.h" +#include "runc_config.h" + +#define LAYER_MOUNT_SUFFIX "/mnt" +#define LAYER_MOUNT_SUFFIX_LEN (sizeof(LAYER_MOUNT_SUFFIX) -1) + +/** + * Get the path to the runtime layers directory. + * + * Returns the heap-allocated path to the layers directory or NULL on error. + */ +char* get_runc_layers_path(const char* run_root) { + char* layers_path = NULL; + if (asprintf(&layers_path, "%s/layers", run_root) == -1) { + layers_path = NULL; + } + return layers_path; +} + +/** + * Get the path to a layer directory. + * + * Returns the heap-allocated path to the layer directory or NULL on error. + */ +char* get_runc_layer_path(const char* run_root, const char* layer_name) { + char* layer_path = NULL; + if (asprintf(&layer_path, "%s/layers/%s", run_root, layer_name) == -1) { + layer_path = NULL; + } + return layer_path; +} + +/** + * Get the path to a layer's mountpoint. + * + * Returns the heap-allocated path to the layer's mountpoint or NULL on error. + */ +char* get_runc_layer_mount_path(const char* layer_path) { + char* mount_path = NULL; + if (asprintf(&mount_path, "%s" LAYER_MOUNT_SUFFIX, layer_path) == -1) { + mount_path = NULL; + } + return mount_path; +} + +/** + * Get the layer path from a layer's mountpoint. + * + * Returns the heap-allocated path to the layer directory or NULL on error. + */ +char* get_runc_layer_path_from_mount_path(const char* mount_path) { + size_t mount_path_len = strlen(mount_path); + if (mount_path_len <= LAYER_MOUNT_SUFFIX_LEN) { + return NULL; + } + size_t layer_path_len = mount_path_len - LAYER_MOUNT_SUFFIX_LEN; + const char* suffix = mount_path + layer_path_len; + if (strcmp(suffix, LAYER_MOUNT_SUFFIX)) { + return NULL; + } + return strndup(mount_path, layer_path_len); +} + +/** + * Creates the run root directory and layers directory structure + * underneath if necessary. + * Returns the malloc'd run root path or NULL if there was an error. + */ +static char* setup_runc_run_root_directories() { + char* layers_path = NULL; + char* run_root = get_configuration_value(RUNC_RUN_ROOT_KEY, + CONTAINER_EXECUTOR_CFG_RUNC_SECTION, get_cfg()); + if (run_root == NULL) { + run_root = strdup(DEFAULT_RUNC_ROOT); + if (run_root == NULL) { + goto mem_fail; + } + } + + if (mkdir(run_root, S_IRWXU) != 0 && errno != EEXIST) { + fprintf(ERRORFILE, "Error creating runC run root at %s : %s\n", run_root, + strerror(errno)); + goto fail; + } + + layers_path = get_runc_layers_path(run_root); + if (layers_path == NULL) { + goto mem_fail; + } + + if (mkdir(layers_path, S_IRWXU) != 0 && errno != EEXIST) { + fprintf(ERRORFILE, "Error creating layers directory at %s : %s\n", + layers_path, strerror(errno)); + goto fail; + } + + free(layers_path); + return run_root; + +fail: + free(layers_path); + free(run_root); + return NULL; + +mem_fail: + fputs("Cannot allocate memory\n", ERRORFILE); + goto fail; +} + + +/** + * Initialize an uninitialized runC base context. + */ +void init_runc_base_ctx(runc_base_ctx* ctx) { + memset(ctx, 0, sizeof(*ctx)); + ctx->layers_lock_fd = -1; + ctx->layers_lock_state = F_UNLCK; +} + +/** + * Releases the resources underneath a runC base context but does NOT free the + * structure itself. This is particularly useful for stack-allocated contexts + * or structures that embed the context. + * free_runc_base_ctx should be used for heap-allocated contexts. + */ +void destroy_runc_base_ctx(runc_base_ctx* ctx) { + if (ctx != NULL) { + free(ctx->run_root); + if (ctx->layers_lock_fd != -1) { + close(ctx->layers_lock_fd); + } + } +} + +/** + * Allocates and initializes a runC base context. + * + * Returns a pointer to the allocated and initialized context or NULL on error. + */ +runc_base_ctx* alloc_runc_base_ctx() { + runc_base_ctx* ctx = malloc(sizeof(*ctx)); + if (ctx != NULL) { + init_runc_base_ctx(ctx); + } + return ctx; +} + +/** + * Free a runC base context and all memory assruncated with it. + */ +void free_runc_base_ctx(runc_base_ctx* ctx) { + destroy_runc_base_ctx(ctx); + free(ctx); +} + +/** + * Opens the base context for use. This will create the container runtime + * root directory and layer lock files, if necessary. + * + * Returns true on success or false if there was an error. + */ +bool open_runc_base_ctx(runc_base_ctx* ctx) { + ctx->run_root = setup_runc_run_root_directories(); + if (ctx->run_root == NULL) { + return false; + } + + char* lock_path = get_runc_layer_path(ctx->run_root, "lock"); + if (lock_path == NULL) { + fputs("Cannot allocate memory\n", ERRORFILE); + return false; + } + + bool result = true; + ctx->layers_lock_fd = open(lock_path, O_RDWR | O_CREAT | O_CLOEXEC, S_IRWXU); + if (ctx->layers_lock_fd == -1) { + fprintf(ERRORFILE, "Cannot open lock file %s : %s\n", lock_path, + strerror(errno)); + result = false; + } + + free(lock_path); + return result; +} + +/** + * Allocates and opens a base context. + * + * Returns a pointer to the context or NULL on error. + */ +runc_base_ctx* setup_runc_base_ctx() { + runc_base_ctx* ctx = alloc_runc_base_ctx(); + if (ctx != NULL) { + if (!open_runc_base_ctx(ctx)) { + free_runc_base_ctx(ctx); + ctx = NULL; + } + } + return ctx; +} + + +static bool do_lock_cmd(int fd, int lock_cmd) { + struct flock fl; + memset(&fl, 0, sizeof(fl)); + fl.l_type = lock_cmd; + fl.l_whence = SEEK_SET; + fl.l_start = 0; + fl.l_len = 0; + while (true) { + int rc = fcntl(fd, F_SETLKW, &fl); + if (rc == 0) { + return true; + } + if (errno != EINTR) { + fprintf(ERRORFILE, "Error updating lock: %s\n", strerror(errno)); + return false; + } + } +} + +/** + * Acquire the layer read lock. + * + * Returns true on success or false on error. + */ +bool acquire_runc_layers_read_lock(runc_base_ctx* ctx) { + if (ctx->layers_lock_state == F_RDLCK) { + return true; + } + if (do_lock_cmd(ctx->layers_lock_fd, F_RDLCK)) { + ctx->layers_lock_state = F_RDLCK; + return true; + } + return false; +} + +/** + * Acquire the layer write lock. + * + * Returns true on success or false on error. + */ +bool acquire_runc_layers_write_lock(runc_base_ctx* ctx) { + if (ctx->layers_lock_state == F_WRLCK) { + return true; + } + if (ctx->layers_lock_state == F_RDLCK) { + // Release before trying to acquire write lock, otherwise two processes + // attempting to upgrade from read lock to a write lock can deadlock. + if (!release_runc_layers_lock(ctx)) { + return false; + } + } + if (do_lock_cmd(ctx->layers_lock_fd, F_WRLCK)) { + ctx->layers_lock_state = F_WRLCK; + return true; + } + return false; +} + +/** + * Release the layer lock. + * + * Returns true on success or false on error. + */ +bool release_runc_layers_lock(runc_base_ctx* ctx) { + if (ctx->layers_lock_state == F_UNLCK) { + return true; + } + if (do_lock_cmd(ctx->layers_lock_fd, F_UNLCK)) { + ctx->layers_lock_state = F_UNLCK; + return true; + } + return false; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_base_ctx.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_base_ctx.h new file mode 100644 index 0000000000..eb2e825f98 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_base_ctx.h @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef RUNC_RUNC_BASE_CTX_H +#define RUNC_RUNC_BASE_CTX_H + +#include + +// Length of layer basename, equal to the hexstring length of SHA256 +#define LAYER_NAME_LENGTH 64 + +// NOTE: Update init_runc_base_ctx and destroy_runc_base_ctx when this is changed. +typedef struct runc_base_ctx_struct { + char* run_root; // root directory of filesystem database + int layers_lock_fd; // file descriptor for layers lock file + int layers_lock_state; // lock state: F_RDLCK, F_WRLCK, or F_UNLCK +} runc_base_ctx; + + +/** + * Allocates and initializes a runC base context. + * + * Returns a pointer to the allocated and initialized context or NULL on error. + */ +runc_base_ctx* alloc_runc_base_ctx(); + +/** + * Free a runC base context and all memory assruncated with it. + */ +void free_runc_base_ctx(runc_base_ctx* ctx); + +/** + * Initialize an uninitialized runC base context. + */ +void init_runc_base_ctx(runc_base_ctx* ctx); + +/** + * Releases the resources underneath a runC base context but does NOT free the + * structure itself. This is particularly useful for stack-allocated contexts + * or structures that embed the context. + * free_runc_base_ctx should be used for heap-allocated contexts. + */ +void destroy_runc_base_ctx(runc_base_ctx* ctx); + +/** + * Opens the base context for use. This will create the container runtime + * root directory and layer lock files, if necessary. + * + * Returns true on success or false if there was an error. + */ +bool open_runc_base_ctx(runc_base_ctx* ctx); + +/** + * Allocates and opens a base context. + * + * Returns a pointer to the context or NULL on error. + */ +runc_base_ctx* setup_runc_base_ctx(); + +/** + * Acquire the layers read lock. + * + * Returns true on success or false on error. + */ +bool acquire_runc_layers_read_lock(runc_base_ctx* ctx); + +/** + * Acquire the layers write lock. + * + * Returns true on success or false on error. + */ +bool acquire_runc_layers_write_lock(runc_base_ctx* ctx); + +/** + * Release the layers lock. + * + * Returns true on success or false on error. + */ +bool release_runc_layers_lock(runc_base_ctx* ctx); + +/** + * Get the path to the runtime layers directory. + * + * Returns the heap-allocated path to the layers directory or NULL on error. + */ +char* get_runc_layers_path(const char* run_root); + +/** + * Get the path to a layer directory. + * + * Returns the heap-allocated path to the layer directory or NULL on error. + */ +char* get_runc_layer_path(const char* run_root, const char* layer_name); + +/** + * Get the path to a layer's mountpoint. + * + * Returns the heap-allocated path to the layer's mountpoint or NULL on error. + */ +char* get_runc_layer_mount_path(const char* layer_path); + +/** + * Get the layer path from a layer's mountpoint. + * + * Returns the heap-allocated path to the layer directory or NULL on error. + */ +char* get_runc_layer_path_from_mount_path(const char* mount_path); + +#endif /* RUNC_RUNC_BASE_CTX_H */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_config.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_config.h new file mode 100644 index 0000000000..cc0415ee5b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_config.h @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef RUNC_RUNC_CONFIG_H +#define RUNC_RUNC_CONFIG_H + +// Section for all runC config keys +#define CONTAINER_EXECUTOR_CFG_RUNC_SECTION "runc" + +// Configuration for top-level directory of runtime database +// Ideally this should be configured to a tmpfs or other RAM-based filesystem. +#define RUNC_RUN_ROOT_KEY "runc.run-root" +#define DEFAULT_RUNC_ROOT "/run/yarn-container-executor" + +// Configuration for the path to the runC executable on the host +#define RUNC_BINARY_KEY "runc.binary" +#define DEFAULT_RUNC_BINARY "/usr/bin/runc" + +#endif /* RUNC_RUNC_CONFIG_H */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_launch_cmd.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_launch_cmd.c new file mode 100644 index 0000000000..5d336ffd2f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_launch_cmd.c @@ -0,0 +1,765 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include + +#include "util.h" +#include "utils/cJSON/cJSON.h" +#include "utils/file-utils.h" +#include "utils/string-utils.h" +#include "utils/mount-utils.h" + +#include "configuration.h" +#include "container-executor.h" +#include "runc_config.h" +#include "runc_launch_cmd.h" + +#define SQUASHFS_MEDIA_TYPE "application/vnd.squashfs" + +static void free_rlc_layers(rlc_layer_spec* layers, unsigned int num_layers) { + for (unsigned int i = 0; i < num_layers; ++i) { + free(layers[i].media_type); + free(layers[i].path); + } + free(layers); +} + +/** + * Free a NULL-terminated array of pointers + */ +static void free_ntarray(char** parray) { + if (parray != NULL) { + for (char** p = parray; *p != NULL; ++p) { + free(*p); + } + free(parray); + } +} + +/** + * Free a runC launch command structure and all memory assruncated with it. + */ +void free_runc_launch_cmd(runc_launch_cmd* rlc) { + if (rlc != NULL) { + free(rlc->run_as_user); + free(rlc->username); + free(rlc->app_id); + free(rlc->container_id); + free(rlc->pid_file); + free(rlc->script_path); + free(rlc->cred_path); + free_ntarray(rlc->local_dirs); + free_ntarray(rlc->log_dirs); + free_rlc_layers(rlc->layers, rlc->num_layers); + cJSON_Delete(rlc->config.hostname); + cJSON_Delete(rlc->config.linux_config); + cJSON_Delete(rlc->config.mounts); + cJSON_Delete(rlc->config.process.args); + cJSON_Delete(rlc->config.process.cwd); + cJSON_Delete(rlc->config.process.env); + free(rlc); + } +} + +static cJSON* parse_json_file(const char* filename) { + char* data = read_file_to_string_as_nm_user(filename); + if (data == NULL) { + fprintf(ERRORFILE, "Cannot read command file %s\n", filename); + return NULL; + } + + const char* parse_error_location = NULL; + cJSON* json = cJSON_ParseWithOpts(data, &parse_error_location, 1); + if (json == NULL) { + fprintf(ERRORFILE, "Error parsing command file %s at byte offset %ld\n", + filename, parse_error_location - data); + } + + free(data); + return json; +} + +static char** parse_dir_list(const cJSON* dirs_json) { + if (!cJSON_IsArray(dirs_json)) { + return NULL; + } + + int num_dirs = cJSON_GetArraySize(dirs_json); + if (num_dirs <= 0) { + return NULL; + } + + char** dirs = calloc(num_dirs + 1, sizeof(*dirs)); // +1 for terminating NULL + int i = 0; + const cJSON* e; + cJSON_ArrayForEach(e, dirs_json) { + if (!cJSON_IsString(e)) { + free_ntarray(dirs); + return NULL; + } + dirs[i++] = strdup(e->valuestring); + } + + return dirs; +} + +static bool parse_runc_launch_cmd_layer(rlc_layer_spec* layer_out, + const cJSON* layer_json) { + if (!cJSON_IsObject(layer_json)) { + fputs("runC launch command layer is not an object\n", ERRORFILE); + return false; + } + + const cJSON* media_type_json = cJSON_GetObjectItemCaseSensitive(layer_json, + "mediaType"); + if (!cJSON_IsString(media_type_json)) { + fputs("Bad/Missing media type for runC launch command layer\n", ERRORFILE); + return false; + } + + const cJSON* path_json = cJSON_GetObjectItemCaseSensitive(layer_json, "path"); + if (!cJSON_IsString(path_json)) { + fputs("Bad/Missing path for runC launch command layer\n", ERRORFILE); + return false; + } + + layer_out->media_type = strdup(media_type_json->valuestring); + layer_out->path = strdup(path_json->valuestring); + return true; +} + +static rlc_layer_spec* parse_runc_launch_cmd_layers(unsigned int* num_layers_out, + const cJSON* layers_json) { + if (!cJSON_IsArray(layers_json)) { + fputs("Bad/Missing runC launch command layers\n", ERRORFILE); + return NULL; + } + + unsigned int num_layers = (unsigned int) cJSON_GetArraySize(layers_json); + if (num_layers <= 0) { + return NULL; + } + + rlc_layer_spec* layers = calloc(num_layers, sizeof(*layers)); + if (layers == NULL) { + fprintf(ERRORFILE, "Cannot allocate memory for %d layers\n", + num_layers + 1); + return NULL; + } + + unsigned int layer_index = 0; + const cJSON* e; + cJSON_ArrayForEach(e, layers_json) { + if (layer_index >= num_layers) { + fputs("Iterating past end of layer array\n", ERRORFILE); + free_rlc_layers(layers, layer_index); + return NULL; + } + + if (!parse_runc_launch_cmd_layer(&layers[layer_index], e)) { + free_rlc_layers(layers, layer_index); + return NULL; + } + + ++layer_index; + } + + *num_layers_out = layer_index; + return layers; +} + +static int parse_json_int(cJSON* json) { + if (!cJSON_IsNumber(json)) { + fputs("Bad/Missing runC int\n", ERRORFILE); + return -1; + } + return json->valueint; +} + +static int parse_runc_launch_cmd_runc_config(runc_config* rc, cJSON* rc_json) { + if (!cJSON_IsObject(rc_json)) { + fputs("Bad/Missing runC runtime config in launch command\n", ERRORFILE); + return -1; + } + rc->hostname = cJSON_DetachItemFromObjectCaseSensitive(rc_json, "hostname"); + rc->linux_config = cJSON_DetachItemFromObjectCaseSensitive(rc_json, "linux"); + rc->mounts = cJSON_DetachItemFromObjectCaseSensitive(rc_json, "mounts"); + + cJSON* process_json = cJSON_GetObjectItemCaseSensitive(rc_json, "process"); + if (!cJSON_IsObject(process_json)) { + fputs("Bad/Missing process section in runC config\n", ERRORFILE); + return -1; + } + rc->process.args = cJSON_DetachItemFromObjectCaseSensitive( + process_json, "args"); + rc->process.cwd = cJSON_DetachItemFromObjectCaseSensitive( + process_json, "cwd"); + rc->process.env = cJSON_DetachItemFromObjectCaseSensitive( + process_json, "env"); + + return 0; +} + +static bool is_valid_layer_media_type(char* media_type) { + if (media_type == NULL) { + return false; + } + + if (strcmp(SQUASHFS_MEDIA_TYPE, media_type)) { + fprintf(ERRORFILE, "Unrecognized layer media type: %s\n", media_type); + return false; + } + + return true; +} + +static bool is_valid_runc_launch_cmd_layers(rlc_layer_spec* layers, + unsigned int num_layers) { + if (layers == NULL) { + return false; + } + + for (unsigned int i = 0; i < num_layers; ++i) { + if (!is_valid_layer_media_type(layers[i].media_type)) { + return false; + } + if (layers[i].path == NULL) { + return false; + } + } + + return true; +} + +static bool is_valid_runc_config_linux_resources(const cJSON* rclr) { + if (!cJSON_IsObject(rclr)) { + fputs("runC config linux resources missing or not an object\n", ERRORFILE); + return false; + } + + bool all_sections_ok = true; + const cJSON* e; + cJSON_ArrayForEach(e, rclr) { + if (strcmp("blockIO", e->string) == 0) { + // block I/O settings allowed + } else if (strcmp("cpu", e->string) == 0) { + // cpu settings allowed + } else { + fprintf(ERRORFILE, + "Unrecognized runC config linux resources element: %s\n", e->string); + all_sections_ok = false; + } + } + + return all_sections_ok; +} + +static bool is_valid_runc_config_linux_seccomp(const cJSON* rcls) { + if (!cJSON_IsObject(rcls)) { + fputs("runC config linux seccomp missing or not an object\n", ERRORFILE); + return false; + } + + bool all_sections_ok = true; + const cJSON* e; + cJSON_ArrayForEach(e, rcls) { + if (strcmp("defaultAction", e->string) == 0) { + // defaultAction allowed + } else if (strcmp("architectures", e->string) == 0) { + // architecture settings allowed + } else if (strcmp("flags", e->string) == 0) { + // flags allowed + } else if (strcmp("syscalls", e->string) == 0) { + // syscalls allowed + } else { + fprintf(ERRORFILE, + "Unrecognized runC config linux seccomp element: %s\n", e->string); + all_sections_ok = false; + } + } + + return all_sections_ok; + +} + +static bool is_valid_runc_config_linux(const cJSON* rcl) { + if (!cJSON_IsObject(rcl)) { + fputs("runC config linux section missing or not an object\n", ERRORFILE); + return false; + } + + bool all_sections_ok = true; + const cJSON* e; + cJSON_ArrayForEach(e, rcl) { + if (strcmp("cgroupsPath", e->string) == 0) { + if (!cJSON_IsString(e)) { + all_sections_ok = false; + } + } else if (strcmp("resources", e->string) == 0) { + all_sections_ok &= is_valid_runc_config_linux_resources(e); + } else if (strcmp("seccomp", e->string) == 0) { + all_sections_ok &= is_valid_runc_config_linux_seccomp(e); + } else { + fprintf(ERRORFILE, "Unrecognized runC config linux element: %s\n", + e->string); + all_sections_ok = false; + } + } + + return all_sections_ok; +} + +static bool is_valid_mount_type(const char *type) { + if (strcmp("bind", type)) { + fprintf(ERRORFILE, "Invalid runC mount type '%s'\n", type); + return false; + } + return true; +} + +static mount_options* get_mount_options(const cJSON* mo) { + if (!cJSON_IsArray(mo)) { + fputs("runC config mount options not an array\n", ERRORFILE); + return NULL; + } + + unsigned int num_options = cJSON_GetArraySize(mo); + + mount_options *options = (mount_options *) calloc(1, sizeof(*options)); + char **options_array = (char **) calloc(num_options + 1, sizeof(char*)); + + options->num_opts = num_options; + options->opts = options_array; + + bool has_rbind = false; + bool has_rprivate = false; + int i = 0; + const cJSON* e; + cJSON_ArrayForEach(e, mo) { + if (!cJSON_IsString(e)) { + fputs("runC config mount option is not a string\n", ERRORFILE); + free_mount_options(options); + return NULL; + } + if (strcmp("rbind", e->valuestring) == 0) { + has_rbind = true; + } else if (strcmp("rprivate", e->valuestring) == 0) { + has_rprivate = true; + } else if (strcmp("rw", e->valuestring) == 0) { + options->rw = 1; + } else if (strcmp("ro", e->valuestring) == 0) { + options->rw = 0; + } + + options->opts[i] = strdup(e->valuestring); + i++; + } + options->opts[i] = NULL; + + if (!has_rbind) { + fputs("runC config mount options missing rbind\n", ERRORFILE); + free_mount_options(options); + return NULL; + } + if (!has_rprivate) { + fputs("runC config mount options missing rprivate\n", ERRORFILE); + free_mount_options(options); + return NULL; + } + + return options; +} + +static int get_runc_mounts(mount* mounts, const cJSON* rcm) { + if (!cJSON_IsArray(rcm)) { + fputs("runC config mount entry is not an object\n", ERRORFILE); + return INVALID_MOUNT; + } + + bool has_type = false; + const cJSON *e; + const cJSON *mount; + int i = 0; + int ret = 0; + cJSON_ArrayForEach(mount, rcm) { + cJSON_ArrayForEach(e, mount) { + if (strcmp("type", e->string) == 0) { + if (!cJSON_IsString(e) || !is_valid_mount_type(e->valuestring)) { + ret = INVALID_MOUNT; + goto free_and_exit; + } + has_type = true; + } else if (strcmp("source", e->string) == 0) { + if (!cJSON_IsString(e)) { + ret = INVALID_MOUNT; + goto free_and_exit; + } + mounts[i].src = strdup(e->valuestring); + } else if (strcmp("destination", e->string) == 0) { + if (!cJSON_IsString(e)) { + ret = INVALID_MOUNT; + goto free_and_exit; + } + mounts[i].dest = strdup(e->valuestring); + } else if (strcmp("options", e->string) == 0) { + if (!cJSON_IsArray(e)) { + ret = INVALID_MOUNT; + goto free_and_exit; + } + mounts[i].options = get_mount_options(e); + } else { + fprintf(ERRORFILE, "Unrecognized runC config mount parameter: %s\n", + e->string); + ret = INVALID_MOUNT; + goto free_and_exit; + } + } + + if (!has_type) { + fputs("runC config mount missing mount type\n", ERRORFILE); + ret = INVALID_MOUNT; + goto free_and_exit; + } + + if (mounts[i].src == NULL) { + fputs("runC config mount missing source\n", ERRORFILE); + ret = INVALID_MOUNT; + goto free_and_exit; + } + + if (mounts[i].dest == NULL) { + fputs("runC config mount missing destination\n", ERRORFILE); + ret = INVALID_MOUNT; + goto free_and_exit; + } + + if (mounts[i].options == NULL) { + fputs("runC config mount missing mount options\n", ERRORFILE); + ret = INVALID_MOUNT; + goto free_and_exit; + } + + i++; + } + +free_and_exit: + return ret; +} + +static bool is_valid_runc_config_mounts(const cJSON* rcm) { + mount *mounts = NULL; + unsigned int num_mounts = 0; + int ret = 0; + bool all_mounts_ok = true; + char **permitted_ro_mounts = NULL; + char **permitted_rw_mounts = NULL; + + if (rcm == NULL) { + return true; // OK to have no extra mounts + } + if (!cJSON_IsArray(rcm)) { + fputs("runC config mounts is not an array\n", ERRORFILE); + return false; + } + + permitted_ro_mounts = get_configuration_values_delimiter("runc.allowed.ro-mounts", + CONTAINER_EXECUTOR_CFG_RUNC_SECTION, get_cfg(), ","); + permitted_rw_mounts = get_configuration_values_delimiter("runc.allowed.rw-mounts", + CONTAINER_EXECUTOR_CFG_RUNC_SECTION, get_cfg(), ","); + + num_mounts = cJSON_GetArraySize(rcm); + + mounts = (mount *) calloc(num_mounts, sizeof(*mounts)); + if (mounts == NULL) { + fprintf(ERRORFILE, "Unable to allocate %ld bytes\n", num_mounts * sizeof(*mounts)); + all_mounts_ok = false; + goto free_and_exit; + } + + ret = get_runc_mounts(mounts, rcm); + if (ret != 0) { + all_mounts_ok = false; + goto free_and_exit; + } + + ret = validate_mounts(permitted_ro_mounts, permitted_rw_mounts, mounts, num_mounts); + if (ret != 0) { + all_mounts_ok = false; + goto free_and_exit; + } + +free_and_exit: + free_values(permitted_ro_mounts); + free_values(permitted_rw_mounts); + free_mounts(mounts, num_mounts); + return all_mounts_ok; +} + +static bool is_valid_runc_config_process(const runc_config_process* rcp) { + if (rcp == NULL) { + return false; + } + + if (!cJSON_IsArray(rcp->args)) { + fputs("runC config process args is missing or not an array\n", ERRORFILE); + return false; + } + + const cJSON* e; + cJSON_ArrayForEach(e, rcp->args) { + if (!cJSON_IsString(e)) { + fputs("runC config process args has a non-string in array\n", ERRORFILE); + return false; + } + } + + if (!cJSON_IsString(rcp->cwd)) { + fputs("Bad/Missing runC config process cwd\n", ERRORFILE); + return false; + } + + if (!cJSON_IsArray(rcp->env)) { + fputs("runC config process env is missing or not an array\n", ERRORFILE); + return false; + } + cJSON_ArrayForEach(e, rcp->env) { + if (!cJSON_IsString(e)) { + fputs("runC config process env has a non-string in array\n", ERRORFILE); + return false; + } + } + + return true; +} + +static bool is_valid_runc_config(const runc_config* rc) { + bool is_valid = true; + if (rc->hostname != NULL && !cJSON_IsString(rc->hostname)) { + fputs("runC config hostname is not a string\n", ERRORFILE); + is_valid = false; + } + + is_valid &= is_valid_runc_config_linux(rc->linux_config); + is_valid &= is_valid_runc_config_mounts(rc->mounts); + is_valid &= is_valid_runc_config_process(&rc->process); + return is_valid; +} + +bool is_valid_runc_launch_cmd(const runc_launch_cmd* rlc) { + if (rlc == NULL) { + return false; + } + + if (rlc->run_as_user == NULL) { + fputs("runC command has bad/missing runAsUser\n", ERRORFILE); + return false; + } + + if (rlc->username == NULL) { + fputs("runC command has bad/missing username\n", ERRORFILE); + return false; + } + + if (rlc->app_id == NULL) { + fputs("runC command has bad/missing application ID\n", ERRORFILE); + return false; + } + + if (rlc->container_id == NULL) { + fputs("runC command has bad/missing container ID\n", ERRORFILE); + return false; + } + if (!validate_container_id(rlc->container_id)) { + fprintf(ERRORFILE, "Bad container id in runC command: %s\n", + rlc->container_id); + return false; + } + + if (rlc->pid_file == NULL) { + fputs("runC command has bad/missing pid file\n", ERRORFILE); + return false; + } + if (check_pidfile_as_nm(rlc->pid_file) != 0) { + fprintf(ERRORFILE, "Bad pidfile %s : %s\n", rlc->pid_file, + strerror(errno)); + return false; + } + + if (rlc->script_path == NULL) { + fputs("runC command has bad/missing container script path\n", ERRORFILE); + return false; + } + + if (rlc->cred_path == NULL) { + fputs("runC command has bad/missing container credentials path\n", + ERRORFILE); + return false; + } + + if (rlc->local_dirs == NULL) { + fputs("runC command has bad/missing local directories\n", ERRORFILE); + return false; + } + + if (rlc->log_dirs == NULL) { + fputs("runC command has bad/missing log directories\n", ERRORFILE); + return false; + } + + if (!is_valid_runc_launch_cmd_layers(rlc->layers, rlc->num_layers)) { + return false; + } + + if (rlc->num_reap_layers_keep < 0) { + fprintf(ERRORFILE, "Bad number of layers to preserve: %d\n", + rlc->num_reap_layers_keep); + return false; + } + + return is_valid_runc_config(&rlc->config); +} + +/** + * Read, parse, and validate a runC container launch command. + * + * Returns a pointer to the launch command or NULL on error. + */ +runc_launch_cmd* parse_runc_launch_cmd(const char* command_filename) { + int ret = 0; + runc_launch_cmd* rlc = NULL; + cJSON* rlc_json = NULL; + + rlc_json = parse_json_file(command_filename); + if (rlc_json == NULL) { + goto cleanup; + } + + rlc = calloc(1, sizeof(*rlc)); + if (rlc == NULL) { + fprintf(ERRORFILE, "Unable to allocate %ld bytes\n", sizeof(*rlc)); + goto cleanup; + } + + char* run_as_user = cJSON_GetStringValue(cJSON_GetObjectItemCaseSensitive( + rlc_json, "runAsUser")); + if (run_as_user== NULL) { + goto fail_and_exit; + } + rlc->run_as_user= strdup(run_as_user); + + char* username = cJSON_GetStringValue(cJSON_GetObjectItemCaseSensitive( + rlc_json, "username")); + if (username == NULL) { + goto fail_and_exit; + } + rlc->username = strdup(username); + + char* app_id = cJSON_GetStringValue(cJSON_GetObjectItemCaseSensitive( + rlc_json, "applicationId")); + if (app_id == NULL) { + goto fail_and_exit; + } + rlc->app_id = strdup(app_id); + + char* container_id = cJSON_GetStringValue(cJSON_GetObjectItemCaseSensitive( + rlc_json, "containerId")); + if (container_id == NULL) { + goto fail_and_exit; + } + rlc->container_id = strdup(container_id); + + char* pid_file = cJSON_GetStringValue(cJSON_GetObjectItemCaseSensitive( + rlc_json, "pidFile")); + if (pid_file == NULL) { + goto fail_and_exit; + } + rlc->pid_file = strdup(pid_file); + + char* script_path = cJSON_GetStringValue(cJSON_GetObjectItemCaseSensitive( + rlc_json, "containerScriptPath")); + if (script_path == NULL) { + goto fail_and_exit; + } + rlc->script_path = strdup(script_path); + + char* cred_path = cJSON_GetStringValue(cJSON_GetObjectItemCaseSensitive( + rlc_json, "containerCredentialsPath")); + if (cred_path == NULL) { + goto fail_and_exit; + } + rlc->cred_path = strdup(cred_path); + + rlc->https = parse_json_int(cJSON_GetObjectItemCaseSensitive(rlc_json, "https")); + + char* keystore_path = cJSON_GetStringValue(cJSON_GetObjectItemCaseSensitive( + rlc_json, "keystorePath")); + if (keystore_path != NULL) { + rlc->keystore_path = strdup(keystore_path); + } + + char* truststore_path = cJSON_GetStringValue(cJSON_GetObjectItemCaseSensitive( + rlc_json, "truststorePath")); + if (truststore_path != NULL) { + rlc->truststore_path = strdup(truststore_path); + } + + char **local_dirs = parse_dir_list(cJSON_GetObjectItemCaseSensitive( + rlc_json, "localDirs")); + if (local_dirs == NULL) { + goto fail_and_exit; + } + rlc->local_dirs = local_dirs; + + char **log_dirs = parse_dir_list(cJSON_GetObjectItemCaseSensitive( + rlc_json, "logDirs")); + if (log_dirs == NULL) { + goto fail_and_exit; + } + rlc->log_dirs = log_dirs; + + rlc_layer_spec* layers = parse_runc_launch_cmd_layers(&rlc->num_layers, + cJSON_GetObjectItemCaseSensitive(rlc_json,"layers")); + if (layers == NULL) { + goto fail_and_exit; + } + rlc->layers = layers; + + rlc->num_reap_layers_keep = parse_json_int( + cJSON_GetObjectItemCaseSensitive(rlc_json, "reapLayerKeepCount")); + + ret = parse_runc_launch_cmd_runc_config(&rlc->config, + cJSON_GetObjectItemCaseSensitive(rlc_json, "ociRuntimeConfig")); + if (ret < 0) { + goto fail_and_exit; + } + +cleanup: + cJSON_Delete(rlc_json); + return rlc; + +fail_and_exit: + cJSON_Delete(rlc_json); + free_runc_launch_cmd(rlc); + return NULL; +} + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_launch_cmd.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_launch_cmd.h new file mode 100644 index 0000000000..cb0d45fa65 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_launch_cmd.h @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef RUNC_RUNC_LAUNCH_CMD_H +#define RUNC_RUNC_LAUNCH_CMD_H + +#include "utils/cJSON/cJSON.h" + +// NOTE: Update free_runc_launch_cmd when this is changed. +typedef struct runc_launch_cmd_layer_spec { + char* media_type; // MIME type for layer data + char* path; // local filesystem location of layer data +} rlc_layer_spec; + +// NOTE: Update free_runc_launch_cmd when this is changed. +typedef struct runc_config_process_struct { + cJSON* args; // execve-style command and arguments + cJSON* cwd; // working dir for container + cJSON* env; // execve-style environment +} runc_config_process; + +// NOTE: Update free_runc_launch_cmd when this is changed. +typedef struct runc_config_struct { + cJSON* hostname; // hostname for the container + cJSON* linux_config; // Linux section of the runC config + cJSON* mounts; // bind-mounts for the container + runc_config_process process; // process config for the container +} runc_config; + +// NOTE: Update free_runc_launch_cmd when this is changed. +typedef struct runc_launch_cmd_struct { + char* run_as_user; // user name of the runAs user + char* username; // user name of the container user + char* app_id; // YARN application ID + char* container_id; // YARN container ID + char* pid_file; // pid file path to create + char* script_path; // path to container launch script + char* cred_path; // path to container credentials file + int https; // whether or not https is enabled + char* keystore_path; // path to keystore file + char* truststore_path; // path to truststore file + char** local_dirs; // NULL-terminated array of local dirs + char** log_dirs; // NULL-terminated array of log dirs + rlc_layer_spec* layers; // array of layers + unsigned int num_layers; // number of entries in the layers array + int num_reap_layers_keep; // number of total layer mounts to preserve + runc_config config; // runC config for the container +} runc_launch_cmd; + + +/** + * Free a runC launch command structure and all memory assruncated with it. + */ +void free_runc_launch_cmd(runc_launch_cmd* rlc); + + +/** + * + * Valildate runC container launch command. + * Returns true on valid and false on invalid. + */ +bool is_valid_runc_launch_cmd(const runc_launch_cmd* rlc); + +/** + * Read, parse, and validate a runC container launch command. + * + * Returns a pointer to the launch command or NULL on error. + */ +runc_launch_cmd* parse_runc_launch_cmd(const char* command_filename); + +#endif /* RUNC_RUNC_LAUNCH_CMD_H */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_reap.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_reap.c new file mode 100644 index 0000000000..b67c60492f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_reap.c @@ -0,0 +1,622 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "container-executor.h" + +#include "runc_base_ctx.h" +#include "runc_reap.h" + +#include "util.h" + +#define DEV_LOOP_PREFIX "/dev/loop" +#define DEV_LOOP_PREFIX_LEN (sizeof(DEV_LOOP_PREFIX) - 1) +#define DELETED_SUFFIX " (deleted)\n" +#define DELETED_SUFFIX_LEN (sizeof(DELETED_SUFFIX) - 1) + +// The size of the buffer to use when reading the mount table. This should be +// large enough so ideally the mount table is read all at once. +// Otherwise the mount table could change in-between underlying read() calls +// and result in a table with missing or corrupted entries. +#define MOUNT_TABLE_BUFFER_SIZE (1024*1024) + +// NOTE: Update destroy_dent_stat when this is updated. +typedef struct dent_stat_struct { + char* basename; // basename of directory entry + struct timespec mtime; // modification time +} dent_stat; + +// NOTE: Update init_dent_stats and destroy_dent_stats when this is changed. +typedef struct dent_stats_array_struct { + dent_stat* stats; // array of dent_stat structures + size_t capacity; // capacity of the stats array + size_t length; // number of valid entries in the stats array +} dent_stats_array; + + +/** + * Releases the resources assruncated with a dent_stat structure but + * does NOT free the structure itself. This is particularly useful for + * stack-allocated structures or other structures that embed this structure. + */ +static void destroy_dent_stat(dent_stat* ds) { + if (ds != NULL) { + free(ds->basename); + ds->basename = NULL; + } +} + +/** + * Initialize an uninitialized dent_stats_array with the specified + * number of entries as its initial capacity. + * + * Returns true on success or false on error. + */ +static bool init_dent_stats(dent_stats_array* dsa, size_t initial_size) { + memset(dsa, 0, sizeof(*dsa)); + dsa->stats = malloc(sizeof(*dsa->stats) * initial_size); + if (dsa->stats == NULL) { + return false; + } + dsa->capacity = initial_size; + dsa->length = 0; + return true; +} + +/** + * Allocates and initializes a dent_stats_array with the specified + * number of entries as its initial capacity. + * + * Returns a pointer to the dent_stats_array or NULL on error. + */ +static dent_stats_array* alloc_dent_stats(size_t initial_size) { + dent_stats_array* dsa = malloc(sizeof(*dsa)); + if (dsa != NULL) { + if (!init_dent_stats(dsa, initial_size)) { + free(dsa); + dsa = NULL; + } + } + return dsa; +} + +/** + * Grows the capacity of a dent_stats_array to the new specified number of + * elements. + * + * Returns true on success or false on error. + */ +static bool realloc_dent_stats(dent_stats_array* dsa, size_t new_size) { + if (new_size < dsa->length) { + // New capacity would result in a truncation. + return false; + } + + dent_stat* new_stats = realloc(dsa->stats, new_size * sizeof(*dsa->stats)); + if (new_stats == NULL) { + return false; + } + + dsa->stats = new_stats; + dsa->capacity = new_size; + return true; +} + +/** + * Append a new dent_stat entry to a dent_stats_array, reallocating the + * array if necessary with the specified increase in capacity. + * + * Returns true on success or false on error. + */ +static bool append_dent_stat(dent_stats_array* dsa, size_t stats_size_incr, + const char* basename, const struct timespec* mtime) { + if (dsa->length == dsa->capacity) { + if (!realloc_dent_stats(dsa, dsa->capacity + stats_size_incr)) { + return false; + } + } + + char* ds_name = strdup(basename); + if (ds_name == NULL) { + return false; + } + + dent_stat* ds = &dsa->stats[dsa->length++]; + ds->basename = ds_name; + ds->mtime = *mtime; + return true; +} + +/** + * Releases the resources assruncated with a dent_stats_array structure but + * does NOT free the structure itself. This is particularly useful for + * stack-allocated contexts or other structures that embed this structure. + */ +static void destroy_dent_stats(dent_stats_array* dsa) { + if (dsa != NULL ) { + for (size_t i = 0; i < dsa->length; ++i) { + destroy_dent_stat(&dsa->stats[i]); + } + free(dsa->stats); + dsa->capacity = 0; + dsa->length = 0; + } +} + +/** + * Frees a dent_stats_array structure and all memory assruncted with it. + */ +static void free_dent_stats(dent_stats_array* dsa) { + destroy_dent_stats(dsa); + free(dsa); +} + +/** + * Get the array of dent_stats for the layers directory. + * Only directory entries that look like layers will be returned. + * + * Returns the array of dent_stats or NULL on error. + */ +static dent_stats_array* get_dent_stats(int layers_fd) { + DIR* layers_dir = NULL; + // number of stat buffers to allocate each time we run out + const size_t stats_size_incr = 8192; + dent_stats_array* dsa = alloc_dent_stats(stats_size_incr); + if (dsa == NULL) { + return NULL; + } + + int dir_fd = dup(layers_fd); + if (dir_fd == -1) { + fprintf(ERRORFILE, "Unable to duplicate layer dir fd: %s\n", + strerror(errno)); + goto fail; + } + + layers_dir = fdopendir(dir_fd); + if (layers_dir == NULL) { + fprintf(ERRORFILE, "Cannot open layers directory: %s\n", strerror(errno)); + goto fail; + } + + struct dirent* de; + while ((de = readdir(layers_dir)) != NULL) { + // skip entries that don't look like layers + if (strlen(de->d_name) != LAYER_NAME_LENGTH) { + continue; + } + + struct stat statbuf; + if (fstatat(layers_fd, de->d_name, &statbuf, AT_SYMLINK_NOFOLLOW) == -1) { + if (errno == ENOENT) { + continue; + } + fprintf(ERRORFILE, "Error getting stats for layer %s : %s\n", de->d_name, + strerror(errno)); + goto fail; + } + + if (!append_dent_stat(dsa, stats_size_incr, de->d_name, + &statbuf.st_mtim)) { + fputs("Unable to allocate memory\n", ERRORFILE); + goto fail; + } + } + +cleanup: + if (layers_dir != NULL) { + closedir(layers_dir); + } + return dsa; + +fail: + free_dent_stats(dsa); + dsa = NULL; + goto cleanup; +} + +/** + * Umount a layer and remove the directories assruncated with the layer mount. + * + * Returns true on success or false on error. + */ +static bool unmount_layer(const char* layer_dir_path) { + char* mount_path = get_runc_layer_mount_path(layer_dir_path); + if (mount_path == NULL) { + fputs("Unable to allocate memory\n", ERRORFILE); + return false; + } + + bool result = false; + if (umount(mount_path) == -1) { + if (errno == EBUSY) { + // Layer is in use by another container. + goto cleanup; + } else if (errno != ENOENT && errno != EINVAL) { + fprintf(ERRORFILE, "Error unmounting %s : %s\n", mount_path, + strerror(errno)); + goto cleanup; + } + } else { + // unmount was successful so report success even if directory removals + // fail after this. + result = true; + } + + if (rmdir(mount_path) == -1 && errno != ENOENT) { + fprintf(ERRORFILE, "Error removing %s : %s\n", mount_path, + strerror(errno)); + goto cleanup; + } + + if (rmdir(layer_dir_path) == -1 && errno != ENOENT) { + fprintf(ERRORFILE, "Error removing %s : %s\n", layer_dir_path, + strerror(errno)); + goto cleanup; + } + + result = true; + +cleanup: + free(mount_path); + return result; +} + +/** + * Order directory entries by increasing modification time. + */ +static int compare_dent_stats_mtime(const void* va, const void* vb) { + const dent_stat* a = (const dent_stat*)va; + const dent_stat* b = (const dent_stat*)vb; + if (a->mtime.tv_sec < b->mtime.tv_sec) { + return -1; + } else if (a->mtime.tv_sec > b->mtime.tv_sec) { + return 1; + } + return a->mtime.tv_nsec - b->mtime.tv_nsec; +} + +static bool do_reap_layer_mounts_with_lock(runc_base_ctx* ctx, + int layers_fd, int num_preserve) { + dent_stats_array* dsa = get_dent_stats(layers_fd); + if (dsa == NULL) { + return false; + } + + qsort(&dsa->stats[0], dsa->length, sizeof(*dsa->stats), + compare_dent_stats_mtime); + + bool result = false; + size_t num_remain = dsa->length; + if (num_remain <= num_preserve) { + result = true; + goto cleanup; + } + + if (!acquire_runc_layers_write_lock(ctx)) { + fputs("Unable to acquire layer write lock\n", ERRORFILE); + goto cleanup; + } + + for (size_t i = 0; i < dsa->length && num_remain > num_preserve; ++i) { + char* layer_dir_path = get_runc_layer_path(ctx->run_root, + dsa->stats[i].basename); + if (layer_dir_path == NULL) { + fputs("Unable to allocate memory\n", ERRORFILE); + goto cleanup; + } + if (unmount_layer(layer_dir_path)) { + --num_remain; + printf("Unmounted layer %s\n", dsa->stats[i].basename); + } + free(layer_dir_path); + } + + result = true; + +cleanup: + free_dent_stats(dsa); + return result; +} + +/** + * Determine if the specified loopback device is assruncated with a file that + * has been deleted. + * + * Returns true if the loopback file is deleted or false otherwise or on error. + */ +bool is_loop_file_deleted(const char* loopdev) { + bool result = false; + FILE* f = NULL; + char* path = NULL; + char* linebuf = NULL; + + // locate the numeric part of the loop device + const char* loop_num_str = loopdev + DEV_LOOP_PREFIX_LEN; + + if (asprintf(&path, "/sys/devices/virtual/block/loop%s/loop/backing_file", + loop_num_str) == -1) { + return false; + } + + f = fopen(path, "r"); + if (f == NULL) { + goto cleanup; + } + + size_t linebuf_len = 0; + ssize_t len = getline(&linebuf, &linebuf_len, f); + if (len <= DELETED_SUFFIX_LEN) { + goto cleanup; + } + + result = !strcmp(DELETED_SUFFIX, linebuf + len - DELETED_SUFFIX_LEN); + +cleanup: + if (f != NULL) { + fclose(f); + } + free(linebuf); + free(path); + return result; +} + +static bool copy_mntent(struct mntent* dest, const struct mntent* src) { + memset(dest, 0, sizeof(*dest)); + if (src->mnt_fsname != NULL) { + dest->mnt_fsname = strdup(src->mnt_fsname); + if (dest->mnt_fsname == NULL) { + return false; + } + } + if (src->mnt_dir != NULL) { + dest->mnt_dir = strdup(src->mnt_dir); + if (dest->mnt_dir == NULL) { + return false; + } + } + if (src->mnt_type != NULL) { + dest->mnt_type = strdup(src->mnt_type); + if (dest->mnt_type == NULL) { + return false; + } + } + if (src->mnt_opts != NULL) { + dest->mnt_opts = strdup(src->mnt_opts); + if (dest->mnt_opts == NULL) { + return false; + } + } + dest->mnt_freq = src->mnt_freq; + dest->mnt_passno = src->mnt_passno; + return true; +} + +static void free_mntent_array(struct mntent* entries, size_t num_entries) { + if (entries != NULL) { + for (size_t i = 0; i < num_entries; ++i) { + struct mntent* me = entries + i; + free(me->mnt_fsname); + free(me->mnt_dir); + free(me->mnt_type); + free(me->mnt_opts); + } + free(entries); + } +} + +/** + * Get the array of mount table entries that are layer mounts. + * + * Returns the heap-allocated array of mount entries or NULL on error. + * The num_entries argument is updated to the number of elements in the array. + */ +static struct mntent* get_layer_mounts(size_t* num_entries_out, + const char* layers_path) { + const size_t layers_path_len = strlen(layers_path); + char* read_buffer = NULL; + FILE* f = NULL; + const size_t num_entries_per_alloc = 8192; + size_t num_entries = 0; + size_t entries_capacity = num_entries_per_alloc; + struct mntent* entries = malloc(sizeof(*entries) * entries_capacity); + if (entries == NULL) { + fputs("Unable to allocate memory\n", ERRORFILE); + goto fail; + } + + read_buffer = malloc(MOUNT_TABLE_BUFFER_SIZE); + if (read_buffer == NULL) { + fprintf(ERRORFILE, "Unable to allocate read buffer of %d bytes\n", + MOUNT_TABLE_BUFFER_SIZE); + goto fail; + } + + f = fopen("/proc/mounts", "r"); + if (f == NULL) { + fprintf(ERRORFILE, "Unable to open /proc/mounts : %s\n", strerror(errno)); + goto fail; + } + + if (setvbuf(f, read_buffer, _IOFBF, MOUNT_TABLE_BUFFER_SIZE) != 0) { + fprintf(ERRORFILE, "Unable to set mount table buffer to %d\n", + MOUNT_TABLE_BUFFER_SIZE); + goto fail; + } + + struct mntent* me; + while ((me = getmntent(f)) != NULL) { + // Skip mounts that are not loopback mounts + if (strncmp(me->mnt_fsname, DEV_LOOP_PREFIX, DEV_LOOP_PREFIX_LEN)) { + continue; + } + + // skip destinations that are not under the layers mount area + if (strncmp(layers_path, me->mnt_dir, layers_path_len)) { + continue; + } + + if (num_entries == entries_capacity) { + entries_capacity += num_entries_per_alloc; + entries = realloc(entries, sizeof(*entries) * entries_capacity); + if (entries == NULL) { + fputs("Unable to allocate memory\n", ERRORFILE); + goto fail; + } + } + + if (!copy_mntent(entries + num_entries, me)) { + goto fail; + } + ++num_entries; + } + +cleanup: + if (f != NULL) { + fclose(f); + } + free(read_buffer); + *num_entries_out = num_entries; + return entries; + +fail: + free_mntent_array(entries, num_entries); + entries = NULL; + num_entries = 0; + goto cleanup; +} + +/** + * Search for layer mounts that correspond with deleted files and unmount them. + */ +static bool reap_deleted_mounts_with_lock(runc_base_ctx* ctx) { + const char* layers_path = get_runc_layers_path(ctx->run_root); + if (layers_path == NULL) { + fputs("Unable to allocate memory\n", ERRORFILE); + return false; + } + + bool result = false; + size_t num_mnt_entries = 0; + struct mntent* mnt_entries = get_layer_mounts(&num_mnt_entries, layers_path); + if (mnt_entries == NULL) { + fputs("Error parsing mount table\n", ERRORFILE); + goto cleanup; + } + + bool have_write_lock = false; + for (size_t i = 0; i < num_mnt_entries; ++i) { + const struct mntent* me = mnt_entries + i; + if (is_loop_file_deleted(me->mnt_fsname)) { + if (!have_write_lock) { + if (!acquire_runc_layers_write_lock(ctx)) { + goto cleanup; + } + have_write_lock = true; + } + + char* layer_dir = get_runc_layer_path_from_mount_path(me->mnt_dir); + if (layer_dir != NULL) { + if (unmount_layer(layer_dir)) { + printf("Unmounted layer %s (deleted)\n", basename(layer_dir)); + } + free(layer_dir); + } + } + } + + result = true; + +cleanup: + free_mntent_array(mnt_entries, num_mnt_entries); + return result; +} + +/** + * Equivalent to reap_runc_layer_mounts but avoids the need to re-create the + * runC base context. + */ +int reap_runc_layer_mounts_with_ctx(runc_base_ctx* ctx, int num_preserve) { + int rc = ERROR_RUNC_REAP_LAYER_MOUNTS_FAILED; + int layers_fd = -1; + char* layers_path = get_runc_layers_path(ctx->run_root); + if (layers_path == NULL) { + fputs("Unable to allocate memory\n", ERRORFILE); + rc = OUT_OF_MEMORY; + goto cleanup; + } + + layers_fd = open(layers_path, O_RDONLY | O_NOFOLLOW); + if (layers_fd == -1) { + fprintf(ERRORFILE, "Unable to open layers directory at %s : %s\n", + layers_path, strerror(errno)); + goto cleanup; + } + + if (!acquire_runc_layers_read_lock(ctx)) { + fputs("Unable to obtain layer lock\n", ERRORFILE); + goto cleanup; + } + + bool reap_deleted_ok = reap_deleted_mounts_with_lock(ctx); + bool reap_layers_ok = do_reap_layer_mounts_with_lock(ctx, layers_fd, + num_preserve); + if (reap_deleted_ok && reap_layers_ok) { + rc = 0; + } + + release_runc_layers_lock(ctx); + +cleanup: + if (layers_fd != -1) { + close(layers_fd); + } + free(layers_path); + return rc; +} + +/** + * Attempt to trim the number of layer mounts to the specified target number to + * preserve. Layers are unmounted in a least-recently-used fashion. Layers that + * are still in use by containers are preserved, so the number of layers mounts + * after trimming may exceed the target number. + * + * Returns 0 on success or a non-zero error code on failure. + */ +int reap_runc_layer_mounts(int num_preserve) { + int rc = ERROR_RUNC_REAP_LAYER_MOUNTS_FAILED; + runc_base_ctx* ctx = setup_runc_base_ctx(); + if (ctx == NULL) { + return rc; + } + + rc = reap_runc_layer_mounts_with_ctx(ctx, num_preserve); + free_runc_base_ctx(ctx); + return rc; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_reap.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_reap.h new file mode 100644 index 0000000000..74f4901e95 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_reap.h @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef RUNC_RUNC_REAP_H +#define RUNC_RUNC_REAP_H + +#include "runc_base_ctx.h" + +/** + * Attempt to trim the number of layer mounts to the specified target number to + * preserve. Layers are unmounted in a least-recently-used fashion. Layers that + * are still in use by containers are preserved, so the number of layers mounts + * after trimming may exceed the target number. + * + * Returns 0 on success or a non-zero error code on failure. + */ +int reap_runc_layer_mounts(int num_preserve); + +/** + * Equivalent to reap_runc_layer_mounts but avoids the need to re-create the + * runC base context. + */ +int reap_runc_layer_mounts_with_ctx(runc_base_ctx* ctx, int num_preserve); + +#endif /* RUNC_RUNC_REAP_H */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_write_config.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_write_config.c new file mode 100644 index 0000000000..c82cdc5e5d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_write_config.c @@ -0,0 +1,497 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include + +#include "hadoop_user_info.h" + +#include "container-executor.h" +#include "utils/cJSON/cJSON.h" +#include "utils/file-utils.h" +#include "util.h" + +#include "runc_launch_cmd.h" +#include "runc_write_config.h" + +#define RUNC_CONFIG_FILENAME "config.json" +#define STARTING_JSON_BUFFER_SIZE (128*1024) + + +static cJSON* build_runc_config_root(const char* rootfs_path) { + cJSON* root = cJSON_CreateObject(); + if (cJSON_AddStringToObject(root, "path", rootfs_path) == NULL) { + goto fail; + } + if (cJSON_AddTrueToObject(root, "readonly") == NULL) { + goto fail; + } + return root; + +fail: + cJSON_Delete(root); + return NULL; +} + +static cJSON* build_runc_config_process_user(const char* username) { + cJSON* user_json = cJSON_CreateObject(); + struct hadoop_user_info* hui = hadoop_user_info_alloc(); + if (hui == NULL) { + return NULL; + } + + int rc = hadoop_user_info_fetch(hui, username); + if (rc != 0) { + fprintf(ERRORFILE, "Error looking up user %s : %s\n", username, + strerror(rc)); + goto fail; + } + + if (cJSON_AddNumberToObject(user_json, "uid", hui->pwd.pw_uid) == NULL) { + goto fail; + } + if (cJSON_AddNumberToObject(user_json, "gid", hui->pwd.pw_gid) == NULL) { + goto fail; + } + + rc = hadoop_user_info_getgroups(hui); + if (rc != 0) { + fprintf(ERRORFILE, "Error getting groups for user %s : %s\n", username, + strerror(rc)); + goto fail; + } + + if (hui->num_gids > 1) { + cJSON* garray = cJSON_AddArrayToObject(user_json, "additionalGids"); + if (garray == NULL) { + goto fail; + } + + // first gid entry is the primary group which is accounted for above + for (int i = 1; i < hui->num_gids; ++i) { + cJSON* g = cJSON_CreateNumber(hui->gids[i]); + if (g == NULL) { + goto fail; + } + cJSON_AddItemToArray(garray, g); + } + } + + return user_json; + +fail: + hadoop_user_info_free(hui); + cJSON_Delete(user_json); + return NULL; +} + +static cJSON* build_runc_config_process(const runc_launch_cmd* rlc) { + cJSON* process = cJSON_CreateObject(); + if (process == NULL) { + return NULL; + } + + cJSON_AddItemReferenceToObject(process, "args", rlc->config.process.args); + cJSON_AddItemReferenceToObject(process, "cwd", rlc->config.process.cwd); + cJSON_AddItemReferenceToObject(process, "env", rlc->config.process.env); + if (cJSON_AddTrueToObject(process, "noNewPrivileges") == NULL) { + goto fail; + } + + cJSON* user_json = build_runc_config_process_user(rlc->run_as_user); + if (user_json == NULL) { + goto fail; + } + cJSON_AddItemToObjectCS(process, "user", user_json); + + return process; + +fail: + cJSON_Delete(process); + return NULL; +} + +static bool add_mount_opts(cJSON* mount_json, va_list opts) { + const char* opt = va_arg(opts, const char*); + if (opt == NULL) { + return true; + } + + cJSON* opts_array = cJSON_AddArrayToObject(mount_json, "options"); + if (opts_array == NULL) { + return false; + } + + do { + cJSON* opt_json = cJSON_CreateString(opt); + if (opt_json == NULL) { + return false; + } + cJSON_AddItemToArray(opts_array, opt_json); + opt = va_arg(opts, const char*); + } while (opt != NULL); + + return true; +} + +static bool add_mount_json(cJSON* mounts_array, const char* src, + const char* dest, const char* fstype, ...) { + bool result = false; + cJSON* m = cJSON_CreateObject(); + if (cJSON_AddStringToObject(m, "source", src) == NULL) { + goto cleanup; + } + if (cJSON_AddStringToObject(m, "destination", dest) == NULL) { + goto cleanup; + } + if (cJSON_AddStringToObject(m, "type", fstype) == NULL) { + goto cleanup; + } + + va_list vargs; + va_start(vargs, fstype); + result = add_mount_opts(m, vargs); + va_end(vargs); + + if (result) { + cJSON_AddItemToArray(mounts_array, m); + } + +cleanup: + if (!result) { + cJSON_Delete(m); + } + return result; +} + +static bool add_std_mounts_json(cJSON* mounts_array) { + bool result = true; + result &= add_mount_json(mounts_array, "proc", "/proc", "proc", NULL); + result &= add_mount_json(mounts_array, "tmpfs", "/dev", "tmpfs", + "nosuid", "strictatime", "mode=755", "size=65536k", NULL); + result &= add_mount_json(mounts_array, "devpts", "/dev/pts", "devpts", + "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5", + NULL); + result &= add_mount_json(mounts_array, "shm", "/dev/shm", "tmpfs", + "nosuid", "noexec", "nodev", "mode=1777", "size=8g", NULL); + result &= add_mount_json(mounts_array, "mqueue", "/dev/mqueue", "mqueue", + "nosuid", "noexec", "nodev", NULL); + result &= add_mount_json(mounts_array, "sysfs", "/sys", "sysfs", + "nosuid", "noexec", "nodev", "ro", NULL); + result &= add_mount_json(mounts_array, "cgroup", "/sys/fs/cgroup", "cgroup", + "nosuid", "noexec", "nodev", "relatime", "ro", NULL); + return result; +} + +static cJSON* build_runc_config_mounts(const runc_launch_cmd* rlc) { + cJSON* mjson = cJSON_CreateArray(); + if (!add_std_mounts_json(mjson)) { + goto fail; + } + + cJSON* e; + cJSON_ArrayForEach(e, rlc->config.mounts) { + cJSON_AddItemReferenceToArray(mjson, e); + } + + return mjson; + +fail: + cJSON_Delete(mjson); + return NULL; +} + +static cJSON* get_default_linux_devices_json() { + cJSON* devs = cJSON_CreateArray(); + if (devs == NULL) { + return NULL; + } + + cJSON* o = cJSON_CreateObject(); + if (o == NULL) { + goto fail; + } + cJSON_AddItemToArray(devs, o); + + if (cJSON_AddStringToObject(o, "access", "rwm") == NULL) { + goto fail; + } + + if (cJSON_AddFalseToObject(o, "allow") == NULL) { + goto fail; + } + + return devs; + +fail: + cJSON_Delete(devs); + return NULL; +} + +static bool add_linux_cgroups_json(cJSON* ljson, const runc_launch_cmd* rlc) { + cJSON* cj = cJSON_GetObjectItemCaseSensitive(rlc->config.linux_config, + "cgroupsPath"); + if (cj != NULL) { + cJSON_AddItemReferenceToObject(ljson, "cgroupsPath", cj); + } + return true; +} + +static bool add_linux_resources_json(cJSON* ljson, const runc_launch_cmd* rlc) { + cJSON* robj = cJSON_AddObjectToObject(ljson, "resources"); + if (robj == NULL) { + return false; + } + + cJSON* devs = get_default_linux_devices_json(); + if (devs == NULL) { + return false; + } + cJSON_AddItemToObjectCS(robj, "devices", devs); + + const cJSON* rlc_rsrc = cJSON_GetObjectItemCaseSensitive( + rlc->config.linux_config, "resources"); + cJSON* e; + cJSON_ArrayForEach(e, rlc_rsrc) { + if (strcmp("devices", e->string) == 0) { + cJSON* dev_e; + cJSON_ArrayForEach(dev_e, e) { + cJSON_AddItemReferenceToArray(devs, dev_e); + } + } else { + cJSON_AddItemReferenceToObject(robj, e->string, e); + } + } + + return true; +} + +static bool add_linux_namespace_json(cJSON* ljson, const char* ns_type) { + cJSON* ns = cJSON_CreateObject(); + if (ns == NULL) { + return false; + } + cJSON_AddItemToArray(ljson, ns); + return (cJSON_AddStringToObject(ns, "type", ns_type) != NULL); +} + +static bool add_linux_namespaces_json(cJSON* ljson) { + cJSON* ns_array = cJSON_AddArrayToObject(ljson, "namespaces"); + if (ns_array == NULL) { + return false; + } + bool result = add_linux_namespace_json(ns_array, "pid"); + result &= add_linux_namespace_json(ns_array, "ipc"); + result &= add_linux_namespace_json(ns_array, "uts"); + result &= add_linux_namespace_json(ns_array, "mount"); + return result; +} + +static const char* runc_masked_paths[] = { + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" +}; + +static bool add_linux_masked_paths_json(cJSON* ljson) { + size_t num_paths = sizeof(runc_masked_paths) / sizeof(runc_masked_paths[0]); + cJSON* paths = cJSON_CreateStringArray(runc_masked_paths, num_paths); + if (paths == NULL) { + return false; + } + cJSON_AddItemToObject(ljson, "maskedPaths", paths); + return true; +} + +static const char* runc_readonly_paths[] = { + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" +}; + +static bool add_linux_readonly_paths_json(cJSON* ljson) { + size_t num_paths = sizeof(runc_readonly_paths) / sizeof(runc_readonly_paths[0]); + cJSON* paths = cJSON_CreateStringArray(runc_readonly_paths, num_paths); + if (paths == NULL) { + return false; + } + cJSON_AddItemToObject(ljson, "readonlyPaths", paths); + return true; +} + +static bool add_linux_seccomp_json(cJSON* ljson, const runc_launch_cmd* rlc) { + cJSON* sj = cJSON_GetObjectItemCaseSensitive(rlc->config.linux_config, + "seccomp"); + if (sj != NULL) { + cJSON_AddItemReferenceToObject(ljson, "seccomp", sj); + } + return true; +} + +static cJSON* build_runc_config_linux(const runc_launch_cmd* rlc) { + cJSON* ljson = cJSON_CreateObject(); + if (ljson == NULL) { + return NULL; + } + + if (!add_linux_cgroups_json(ljson, rlc)) { + goto fail; + } + + if (!add_linux_resources_json(ljson, rlc)) { + goto fail; + } + + if (!add_linux_namespaces_json(ljson)) { + goto fail; + } + + if (!add_linux_masked_paths_json(ljson)) { + goto fail; + } + + if (!add_linux_readonly_paths_json(ljson)) { + goto fail; + } + + if (!add_linux_seccomp_json(ljson, rlc)) { + goto fail; + } + + return ljson; + +fail: + cJSON_Delete(ljson); + return NULL; +} + +static char* build_runc_config(const runc_launch_cmd* rlc, + const char* rootfs_path) { + char* json_data = NULL; + + cJSON* rcj = build_runc_config_json(rlc, rootfs_path); + + json_data = cJSON_PrintBuffered(rcj, STARTING_JSON_BUFFER_SIZE, false); + + return json_data; +} + +cJSON* build_runc_config_json(const runc_launch_cmd* rlc, + const char* rootfs_path) { + cJSON* rcj = cJSON_CreateObject(); + if (rcj == NULL) { + goto fail; + } + + if (cJSON_AddStringToObject(rcj, "runcVersion", "1.0.0") == NULL) { + goto fail; + } + + struct utsname uts; + uname(&uts); + if (cJSON_AddStringToObject(rcj, "hostname", uts.nodename) == NULL) { + goto fail; + } + + cJSON* item = build_runc_config_root(rootfs_path); + if (item == NULL) { + goto fail; + } + cJSON_AddItemToObjectCS(rcj, "root", item); + + item = build_runc_config_process(rlc); + if (item == NULL) { + goto fail; + } + cJSON_AddItemToObjectCS(rcj, "process", item); + + item = build_runc_config_mounts(rlc); + if (item == NULL) { + goto fail; + } + cJSON_AddItemToObjectCS(rcj, "mounts", item); + + item = build_runc_config_linux(rlc); + if (item == NULL) { + goto fail; + } + cJSON_AddItemToObjectCS(rcj, "linux", item); + return rcj; + +fail: + cJSON_Delete(rcj); + return NULL; +} + +static char* get_runc_config_path(const char* pid_file) { + char* dir_end = strrchr(pid_file, '/'); + if (dir_end == NULL) { + fprintf(ERRORFILE, "Error pid file %s has no parent directory\n", pid_file); + return NULL; + } + + int dir_len = (dir_end + 1) - pid_file; // include trailing slash + char* config_path = malloc(dir_len + strlen(RUNC_CONFIG_FILENAME) + 1); + if (config_path == NULL) { + return NULL; + } + + char* cp = stpncpy(config_path, pid_file, dir_len); + stpcpy(cp, RUNC_CONFIG_FILENAME); + return config_path; +} + +/** + * Creates the runC runtime configuration file for a container. + * + * Returns the path to the written configuration file or NULL on error. + */ +char* write_runc_runc_config(const runc_launch_cmd* rlc, + const char* rootfs_path) { + char* config_data = build_runc_config(rlc, rootfs_path); + if (config_data == NULL) { + return NULL; + } + + char* runc_config_path = get_runc_config_path(rlc->pid_file); + if (runc_config_path == NULL) { + fputs("Unable to generate runc config path\n", ERRORFILE); + free(config_data); + return NULL; + } + + bool write_ok = write_file_as_nm(runc_config_path, config_data, + strlen(config_data)); + free(config_data); + if (!write_ok) { + free(runc_config_path); + return NULL; + } + + return runc_config_path; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_write_config.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_write_config.h new file mode 100644 index 0000000000..032af40846 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_write_config.h @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef RUNC_RUNC_WRITE_CONFIG_H +#define RUNC_RUNC_WRITE_CONFIG_H + +/** + * * Creates a runC runtime configuration JSON + * * + * * Returns the config JSON or NULL on error + * */ +cJSON* build_runc_config_json(const runc_launch_cmd* rlc, + const char* rootfs_path); + +/** + * Creates the runC runtime configuration file for a container. + * + * Returns the path to the written configuration file or NULL on error. + */ +char* write_runc_runc_config(const runc_launch_cmd* rlc, const char* rootfs_path); + +#endif /* RUNC_RUNC_WRITE_CONFIG_H */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c index c0b73d39cd..9567ccc001 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c @@ -323,6 +323,12 @@ const char *get_error_message(const int error_code) { return "Invalid docker runtime"; case DOCKER_SERVICE_MODE_DISABLED: return "Docker service mode disabled"; + case ERROR_RUNC_SETUP_FAILED: + return "runC setup failed"; + case ERROR_RUNC_RUN_FAILED: + return "runC run failed"; + case ERROR_RUNC_REAP_LAYER_MOUNTS_FAILED: + return "runC reap layer mounts failed"; default: return "Unknown error code"; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h index dcc00a90db..b984a2337a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h @@ -100,7 +100,10 @@ enum errorcodes { INVALID_DOCKER_IMAGE_TRUST = 72, INVALID_DOCKER_TMPFS_MOUNT = 73, INVALID_DOCKER_RUNTIME = 74, - DOCKER_SERVICE_MODE_DISABLED = 75 + DOCKER_SERVICE_MODE_DISABLED = 75, + ERROR_RUNC_SETUP_FAILED = 76, + ERROR_RUNC_RUN_FAILED = 77, + ERROR_RUNC_REAP_LAYER_MOUNTS_FAILED = 78 }; /* Macros for min/max. */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/cJSON/cJSON.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/cJSON/cJSON.c new file mode 100644 index 0000000000..5da278ee2b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/cJSON/cJSON.c @@ -0,0 +1,2932 @@ +/* + Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. +*/ + +/* cJSON */ +/* JSON parser in C. */ + +/* disable warnings about old C89 functions in MSVC */ +#if !defined(_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) +#define _CRT_SECURE_NO_DEPRECATE +#endif + +#ifdef __GNUC__ +#pragma GCC visibility push(default) +#endif +#if defined(_MSC_VER) +#pragma warning (push) +/* disable warning about single line comments in system headers */ +#pragma warning (disable : 4001) +#endif + +#include +#include +#include +#include +#include +#include + +#ifdef ENABLE_LOCALES +#include +#endif + +#if defined(_MSC_VER) +#pragma warning (pop) +#endif +#ifdef __GNUC__ +#pragma GCC visibility pop +#endif + +#include "cJSON.h" + +/* define our own boolean type */ +#define true ((cJSON_bool)1) +#define false ((cJSON_bool)0) + +typedef struct { + const unsigned char *json; + size_t position; +} error; +static error global_error = { NULL, 0 }; + +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) +{ + return (const char*) (global_error.json + global_error.position); +} + +CJSON_PUBLIC(char *) cJSON_GetStringValue(cJSON *item) { + if (!cJSON_IsString(item)) { + return NULL; + } + + return item->valuestring; +} + +/* This is a safeguard to prevent copy-pasters from using incompatible C and header files */ +#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || (CJSON_VERSION_PATCH != 8) + #error cJSON.h and cJSON.c have different versions. Make sure that both have the same. +#endif + +CJSON_PUBLIC(const char*) cJSON_Version(void) +{ + static char version[15]; + sprintf(version, "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH); + + return version; +} + +/* Case insensitive string comparison, doesn't consider two NULL pointers equal though */ +static int case_insensitive_strcmp(const unsigned char *string1, const unsigned char *string2) +{ + if ((string1 == NULL) || (string2 == NULL)) + { + return 1; + } + + if (string1 == string2) + { + return 0; + } + + for(; tolower(*string1) == tolower(*string2); (void)string1++, string2++) + { + if (*string1 == '\0') + { + return 0; + } + } + + return tolower(*string1) - tolower(*string2); +} + +typedef struct internal_hooks +{ + void *(CJSON_CDECL *allocate)(size_t size); + void (CJSON_CDECL *deallocate)(void *pointer); + void *(CJSON_CDECL *reallocate)(void *pointer, size_t size); +} internal_hooks; + +#if defined(_MSC_VER) +/* work around MSVC error C2322: '...' address of dillimport '...' is not static */ +static void * CJSON_CDECL internal_malloc(size_t size) +{ + return malloc(size); +} +static void CJSON_CDECL internal_free(void *pointer) +{ + free(pointer); +} +static void * CJSON_CDECL internal_realloc(void *pointer, size_t size) +{ + return realloc(pointer, size); +} +#else +#define internal_malloc malloc +#define internal_free free +#define internal_realloc realloc +#endif + +static internal_hooks global_hooks = { internal_malloc, internal_free, internal_realloc }; + +static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks) +{ + size_t length = 0; + unsigned char *copy = NULL; + + if (string == NULL) + { + return NULL; + } + + length = strlen((const char*)string) + sizeof(""); + copy = (unsigned char*)hooks->allocate(length); + if (copy == NULL) + { + return NULL; + } + memcpy(copy, string, length); + + return copy; +} + +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks) +{ + if (hooks == NULL) + { + /* Reset hooks */ + global_hooks.allocate = malloc; + global_hooks.deallocate = free; + global_hooks.reallocate = realloc; + return; + } + + global_hooks.allocate = malloc; + if (hooks->malloc_fn != NULL) + { + global_hooks.allocate = hooks->malloc_fn; + } + + global_hooks.deallocate = free; + if (hooks->free_fn != NULL) + { + global_hooks.deallocate = hooks->free_fn; + } + + /* use realloc only if both free and malloc are used */ + global_hooks.reallocate = NULL; + if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free)) + { + global_hooks.reallocate = realloc; + } +} + +/* Internal constructor. */ +static cJSON *cJSON_New_Item(const internal_hooks * const hooks) +{ + cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON)); + if (node) + { + memset(node, '\0', sizeof(cJSON)); + } + + return node; +} + +/* Delete a cJSON structure. */ +CJSON_PUBLIC(void) cJSON_Delete(cJSON *item) +{ + cJSON *next = NULL; + while (item != NULL) + { + next = item->next; + if (!(item->type & cJSON_IsReference) && (item->child != NULL)) + { + cJSON_Delete(item->child); + } + if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL)) + { + global_hooks.deallocate(item->valuestring); + } + if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) + { + global_hooks.deallocate(item->string); + } + global_hooks.deallocate(item); + item = next; + } +} + +/* get the decimal point character of the current locale */ +static unsigned char get_decimal_point(void) +{ +#ifdef ENABLE_LOCALES + struct lconv *lconv = localeconv(); + return (unsigned char) lconv->decimal_point[0]; +#else + return '.'; +#endif +} + +typedef struct +{ + const unsigned char *content; + size_t length; + size_t offset; + size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */ + internal_hooks hooks; +} parse_buffer; + +/* check if the given size is left to read in a given parse buffer (starting with 1) */ +#define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) +/* check if the buffer can be accessed at the given index (starting with 0) */ +#define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) +#define cannot_access_at_index(buffer, index) (!can_access_at_index(buffer, index)) +/* get a pointer to the buffer at the position */ +#define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset) + +/* Parse the input text to generate a number, and populate the result into item. */ +static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer) +{ + double number = 0; + unsigned char *after_end = NULL; + unsigned char number_c_string[64]; + unsigned char decimal_point = get_decimal_point(); + size_t i = 0; + + if ((input_buffer == NULL) || (input_buffer->content == NULL)) + { + return false; + } + + /* copy the number into a temporary buffer and replace '.' with the decimal point + * of the current locale (for strtod) + * This also takes care of '\0' not necessarily being available for marking the end of the input */ + for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++) + { + switch (buffer_at_offset(input_buffer)[i]) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '+': + case '-': + case 'e': + case 'E': + number_c_string[i] = buffer_at_offset(input_buffer)[i]; + break; + + case '.': + number_c_string[i] = decimal_point; + break; + + default: + goto loop_end; + } + } +loop_end: + number_c_string[i] = '\0'; + + number = strtod((const char*)number_c_string, (char**)&after_end); + if (number_c_string == after_end) + { + return false; /* parse_error */ + } + + item->valuedouble = number; + + /* use saturation in case of overflow */ + if (number >= INT_MAX) + { + item->valueint = INT_MAX; + } + else if (number <= INT_MIN) + { + item->valueint = INT_MIN; + } + else + { + item->valueint = (int)number; + } + + item->type = cJSON_Number; + + input_buffer->offset += (size_t)(after_end - number_c_string); + return true; +} + +/* don't ask me, but the original cJSON_SetNumberValue returns an integer or double */ +CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) +{ + if (number >= INT_MAX) + { + object->valueint = INT_MAX; + } + else if (number <= INT_MIN) + { + object->valueint = INT_MIN; + } + else + { + object->valueint = (int)number; + } + + return object->valuedouble = number; +} + +typedef struct +{ + unsigned char *buffer; + size_t length; + size_t offset; + size_t depth; /* current nesting depth (for formatted printing) */ + cJSON_bool noalloc; + cJSON_bool format; /* is this print a formatted print */ + internal_hooks hooks; +} printbuffer; + +/* realloc printbuffer if necessary to have at least "needed" bytes more */ +static unsigned char* ensure(printbuffer * const p, size_t needed) +{ + unsigned char *newbuffer = NULL; + size_t newsize = 0; + + if ((p == NULL) || (p->buffer == NULL)) + { + return NULL; + } + + if ((p->length > 0) && (p->offset >= p->length)) + { + /* make sure that offset is valid */ + return NULL; + } + + if (needed > INT_MAX) + { + /* sizes bigger than INT_MAX are currently not supported */ + return NULL; + } + + needed += p->offset + 1; + if (needed <= p->length) + { + return p->buffer + p->offset; + } + + if (p->noalloc) { + return NULL; + } + + /* calculate new buffer size */ + if (needed > (INT_MAX / 2)) + { + /* overflow of int, use INT_MAX if possible */ + if (needed <= INT_MAX) + { + newsize = INT_MAX; + } + else + { + return NULL; + } + } + else + { + newsize = needed * 2; + } + + if (p->hooks.reallocate != NULL) + { + /* reallocate with realloc if available */ + newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize); + if (newbuffer == NULL) + { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + } + else + { + /* otherwise reallocate manually */ + newbuffer = (unsigned char*)p->hooks.allocate(newsize); + if (!newbuffer) + { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + if (newbuffer) + { + memcpy(newbuffer, p->buffer, p->offset + 1); + } + p->hooks.deallocate(p->buffer); + } + p->length = newsize; + p->buffer = newbuffer; + + return newbuffer + p->offset; +} + +/* calculate the new length of the string in a printbuffer and update the offset */ +static void update_offset(printbuffer * const buffer) +{ + const unsigned char *buffer_pointer = NULL; + if ((buffer == NULL) || (buffer->buffer == NULL)) + { + return; + } + buffer_pointer = buffer->buffer + buffer->offset; + + buffer->offset += strlen((const char*)buffer_pointer); +} + +/* Render the number nicely from the given item into a string. */ +static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + double d = item->valuedouble; + int length = 0; + size_t i = 0; + unsigned char number_buffer[26]; /* temporary buffer to print the number into */ + unsigned char decimal_point = get_decimal_point(); + double test; + + if (output_buffer == NULL) + { + return false; + } + + /* This checks for NaN and Infinity */ + if ((d * 0) != 0) + { + length = sprintf((char*)number_buffer, "null"); + } + else + { + /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */ + length = sprintf((char*)number_buffer, "%1.15g", d); + + /* Check whether the original double can be recovered */ + if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || ((double)test != d)) + { + /* If not, print with 17 decimal places of precision */ + length = sprintf((char*)number_buffer, "%1.17g", d); + } + } + + /* sprintf failed or buffer overrun occured */ + if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1))) + { + return false; + } + + /* reserve appropriate space in the output */ + output_pointer = ensure(output_buffer, (size_t)length + sizeof("")); + if (output_pointer == NULL) + { + return false; + } + + /* copy the printed number to the output and replace locale + * dependent decimal point with '.' */ + for (i = 0; i < ((size_t)length); i++) + { + if (number_buffer[i] == decimal_point) + { + output_pointer[i] = '.'; + continue; + } + + output_pointer[i] = number_buffer[i]; + } + output_pointer[i] = '\0'; + + output_buffer->offset += (size_t)length; + + return true; +} + +/* parse 4 digit hexadecimal number */ +static unsigned parse_hex4(const unsigned char * const input) +{ + unsigned int h = 0; + size_t i = 0; + + for (i = 0; i < 4; i++) + { + /* parse digit */ + if ((input[i] >= '0') && (input[i] <= '9')) + { + h += (unsigned int) input[i] - '0'; + } + else if ((input[i] >= 'A') && (input[i] <= 'F')) + { + h += (unsigned int) 10 + input[i] - 'A'; + } + else if ((input[i] >= 'a') && (input[i] <= 'f')) + { + h += (unsigned int) 10 + input[i] - 'a'; + } + else /* invalid */ + { + return 0; + } + + if (i < 3) + { + /* shift left to make place for the next nibble */ + h = h << 4; + } + } + + return h; +} + +/* converts a UTF-16 literal to UTF-8 + * A literal can be one or two sequences of the form \uXXXX */ +static unsigned char utf16_literal_to_utf8(const unsigned char * const input_pointer, const unsigned char * const input_end, unsigned char **output_pointer) +{ + long unsigned int codepoint = 0; + unsigned int first_code = 0; + const unsigned char *first_sequence = input_pointer; + unsigned char utf8_length = 0; + unsigned char utf8_position = 0; + unsigned char sequence_length = 0; + unsigned char first_byte_mark = 0; + + if ((input_end - first_sequence) < 6) + { + /* input ends unexpectedly */ + goto fail; + } + + /* get the first utf16 sequence */ + first_code = parse_hex4(first_sequence + 2); + + /* check that the code is valid */ + if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) + { + goto fail; + } + + /* UTF16 surrogate pair */ + if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) + { + const unsigned char *second_sequence = first_sequence + 6; + unsigned int second_code = 0; + sequence_length = 12; /* \uXXXX\uXXXX */ + + if ((input_end - second_sequence) < 6) + { + /* input ends unexpectedly */ + goto fail; + } + + if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u')) + { + /* missing second half of the surrogate pair */ + goto fail; + } + + /* get the second utf16 sequence */ + second_code = parse_hex4(second_sequence + 2); + /* check that the code is valid */ + if ((second_code < 0xDC00) || (second_code > 0xDFFF)) + { + /* invalid second half of the surrogate pair */ + goto fail; + } + + + /* calculate the unicode codepoint from the surrogate pair */ + codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF)); + } + else + { + sequence_length = 6; /* \uXXXX */ + codepoint = first_code; + } + + /* encode as UTF-8 + * takes at maximum 4 bytes to encode: + * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ + if (codepoint < 0x80) + { + /* normal ascii, encoding 0xxxxxxx */ + utf8_length = 1; + } + else if (codepoint < 0x800) + { + /* two bytes, encoding 110xxxxx 10xxxxxx */ + utf8_length = 2; + first_byte_mark = 0xC0; /* 11000000 */ + } + else if (codepoint < 0x10000) + { + /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */ + utf8_length = 3; + first_byte_mark = 0xE0; /* 11100000 */ + } + else if (codepoint <= 0x10FFFF) + { + /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */ + utf8_length = 4; + first_byte_mark = 0xF0; /* 11110000 */ + } + else + { + /* invalid unicode codepoint */ + goto fail; + } + + /* encode as utf8 */ + for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--) + { + /* 10xxxxxx */ + (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF); + codepoint >>= 6; + } + /* encode first byte */ + if (utf8_length > 1) + { + (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF); + } + else + { + (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F); + } + + *output_pointer += utf8_length; + + return sequence_length; + +fail: + return 0; +} + +/* Parse the input text into an unescaped cinput, and populate item. */ +static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer) +{ + const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1; + const unsigned char *input_end = buffer_at_offset(input_buffer) + 1; + unsigned char *output_pointer = NULL; + unsigned char *output = NULL; + + /* not a string */ + if (buffer_at_offset(input_buffer)[0] != '\"') + { + goto fail; + } + + { + /* calculate approximate size of the output (overestimate) */ + size_t allocation_length = 0; + size_t skipped_bytes = 0; + while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"')) + { + /* is escape sequence */ + if (input_end[0] == '\\') + { + if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length) + { + /* prevent buffer overflow when last input character is a backslash */ + goto fail; + } + skipped_bytes++; + input_end++; + } + input_end++; + } + if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"')) + { + goto fail; /* string ended unexpectedly */ + } + + /* This is at most how much we need for the output */ + allocation_length = (size_t) (input_end - buffer_at_offset(input_buffer)) - skipped_bytes; + output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof("")); + if (output == NULL) + { + goto fail; /* allocation failure */ + } + } + + output_pointer = output; + /* loop through the string literal */ + while (input_pointer < input_end) + { + if (*input_pointer != '\\') + { + *output_pointer++ = *input_pointer++; + } + /* escape sequence */ + else + { + unsigned char sequence_length = 2; + if ((input_end - input_pointer) < 1) + { + goto fail; + } + + switch (input_pointer[1]) + { + case 'b': + *output_pointer++ = '\b'; + break; + case 'f': + *output_pointer++ = '\f'; + break; + case 'n': + *output_pointer++ = '\n'; + break; + case 'r': + *output_pointer++ = '\r'; + break; + case 't': + *output_pointer++ = '\t'; + break; + case '\"': + case '\\': + case '/': + *output_pointer++ = input_pointer[1]; + break; + + /* UTF-16 literal */ + case 'u': + sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer); + if (sequence_length == 0) + { + /* failed to convert UTF16-literal to UTF-8 */ + goto fail; + } + break; + + default: + goto fail; + } + input_pointer += sequence_length; + } + } + + /* zero terminate the output */ + *output_pointer = '\0'; + + item->type = cJSON_String; + item->valuestring = (char*)output; + + input_buffer->offset = (size_t) (input_end - input_buffer->content); + input_buffer->offset++; + + return true; + +fail: + if (output != NULL) + { + input_buffer->hooks.deallocate(output); + } + + if (input_pointer != NULL) + { + input_buffer->offset = (size_t)(input_pointer - input_buffer->content); + } + + return false; +} + +/* Render the cstring provided to an escaped version that can be printed. */ +static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer) +{ + const unsigned char *input_pointer = NULL; + unsigned char *output = NULL; + unsigned char *output_pointer = NULL; + size_t output_length = 0; + /* numbers of additional characters needed for escaping */ + size_t escape_characters = 0; + + if (output_buffer == NULL) + { + return false; + } + + /* empty string */ + if (input == NULL) + { + output = ensure(output_buffer, sizeof("\"\"")); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "\"\""); + + return true; + } + + /* set "flag" to 1 if something needs to be escaped */ + for (input_pointer = input; *input_pointer; input_pointer++) + { + switch (*input_pointer) + { + case '\"': + case '\\': + case '\b': + case '\f': + case '\n': + case '\r': + case '\t': + /* one character escape sequence */ + escape_characters++; + break; + default: + if (*input_pointer < 32) + { + /* UTF-16 escape sequence uXXXX */ + escape_characters += 5; + } + break; + } + } + output_length = (size_t)(input_pointer - input) + escape_characters; + + output = ensure(output_buffer, output_length + sizeof("\"\"")); + if (output == NULL) + { + return false; + } + + /* no characters have to be escaped */ + if (escape_characters == 0) + { + output[0] = '\"'; + memcpy(output + 1, input, output_length); + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; + } + + output[0] = '\"'; + output_pointer = output + 1; + /* copy the string */ + for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++) + { + if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\')) + { + /* normal character, copy */ + *output_pointer = *input_pointer; + } + else + { + /* character needs to be escaped */ + *output_pointer++ = '\\'; + switch (*input_pointer) + { + case '\\': + *output_pointer = '\\'; + break; + case '\"': + *output_pointer = '\"'; + break; + case '\b': + *output_pointer = 'b'; + break; + case '\f': + *output_pointer = 'f'; + break; + case '\n': + *output_pointer = 'n'; + break; + case '\r': + *output_pointer = 'r'; + break; + case '\t': + *output_pointer = 't'; + break; + default: + /* escape and print as unicode codepoint */ + sprintf((char*)output_pointer, "u%04x", *input_pointer); + output_pointer += 4; + break; + } + } + } + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; +} + +/* Invoke print_string_ptr (which is useful) on an item. */ +static cJSON_bool print_string(const cJSON * const item, printbuffer * const p) +{ + return print_string_ptr((unsigned char*)item->valuestring, p); +} + +/* Predeclare these prototypes. */ +static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer); +static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer); +static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer); + +/* Utility to jump whitespace and cr/lf */ +static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer) +{ + if ((buffer == NULL) || (buffer->content == NULL)) + { + return NULL; + } + + while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32)) + { + buffer->offset++; + } + + if (buffer->offset == buffer->length) + { + buffer->offset--; + } + + return buffer; +} + +/* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */ +static parse_buffer *skip_utf8_bom(parse_buffer * const buffer) +{ + if ((buffer == NULL) || (buffer->content == NULL) || (buffer->offset != 0)) + { + return NULL; + } + + if (can_access_at_index(buffer, 4) && (strncmp((const char*)buffer_at_offset(buffer), "\xEF\xBB\xBF", 3) == 0)) + { + buffer->offset += 3; + } + + return buffer; +} + +/* Parse an object - create a new root, and populate. */ +CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated) +{ + parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } }; + cJSON *item = NULL; + + /* reset error position */ + global_error.json = NULL; + global_error.position = 0; + + if (value == NULL) + { + goto fail; + } + + buffer.content = (const unsigned char*)value; + buffer.length = strlen((const char*)value) + sizeof(""); + buffer.offset = 0; + buffer.hooks = global_hooks; + + item = cJSON_New_Item(&global_hooks); + if (item == NULL) /* memory fail */ + { + goto fail; + } + + if (!parse_value(item, buffer_skip_whitespace(skip_utf8_bom(&buffer)))) + { + /* parse failure. ep is set. */ + goto fail; + } + + /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */ + if (require_null_terminated) + { + buffer_skip_whitespace(&buffer); + if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0') + { + goto fail; + } + } + if (return_parse_end) + { + *return_parse_end = (const char*)buffer_at_offset(&buffer); + } + + return item; + +fail: + if (item != NULL) + { + cJSON_Delete(item); + } + + if (value != NULL) + { + error local_error; + local_error.json = (const unsigned char*)value; + local_error.position = 0; + + if (buffer.offset < buffer.length) + { + local_error.position = buffer.offset; + } + else if (buffer.length > 0) + { + local_error.position = buffer.length - 1; + } + + if (return_parse_end != NULL) + { + *return_parse_end = (const char*)local_error.json + local_error.position; + } + + global_error = local_error; + } + + return NULL; +} + +/* Default options for cJSON_Parse */ +CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value) +{ + return cJSON_ParseWithOpts(value, 0, 0); +} + +#define cjson_min(a, b) ((a < b) ? a : b) + +static unsigned char *print(const cJSON * const item, cJSON_bool format, const internal_hooks * const hooks) +{ + static const size_t default_buffer_size = 256; + printbuffer buffer[1]; + unsigned char *printed = NULL; + + memset(buffer, 0, sizeof(buffer)); + + /* create buffer */ + buffer->buffer = (unsigned char*) hooks->allocate(default_buffer_size); + buffer->length = default_buffer_size; + buffer->format = format; + buffer->hooks = *hooks; + if (buffer->buffer == NULL) + { + goto fail; + } + + /* print the value */ + if (!print_value(item, buffer)) + { + goto fail; + } + update_offset(buffer); + + /* check if reallocate is available */ + if (hooks->reallocate != NULL) + { + printed = (unsigned char*) hooks->reallocate(buffer->buffer, buffer->offset + 1); + if (printed == NULL) { + goto fail; + } + buffer->buffer = NULL; + } + else /* otherwise copy the JSON over to a new buffer */ + { + printed = (unsigned char*) hooks->allocate(buffer->offset + 1); + if (printed == NULL) + { + goto fail; + } + memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1)); + printed[buffer->offset] = '\0'; /* just to be sure */ + + /* free the buffer */ + hooks->deallocate(buffer->buffer); + } + + return printed; + +fail: + if (buffer->buffer != NULL) + { + hooks->deallocate(buffer->buffer); + } + + if (printed != NULL) + { + hooks->deallocate(printed); + } + + return NULL; +} + +/* Render a cJSON item/entity/structure to text. */ +CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item) +{ + return (char*)print(item, true, &global_hooks); +} + +CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item) +{ + return (char*)print(item, false, &global_hooks); +} + +CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt) +{ + printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; + + if (prebuffer < 0) + { + return NULL; + } + + p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer); + if (!p.buffer) + { + return NULL; + } + + p.length = (size_t)prebuffer; + p.offset = 0; + p.noalloc = false; + p.format = fmt; + p.hooks = global_hooks; + + if (!print_value(item, &p)) + { + global_hooks.deallocate(p.buffer); + return NULL; + } + + return (char*)p.buffer; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buf, const int len, const cJSON_bool fmt) +{ + printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; + + if ((len < 0) || (buf == NULL)) + { + return false; + } + + p.buffer = (unsigned char*)buf; + p.length = (size_t)len; + p.offset = 0; + p.noalloc = true; + p.format = fmt; + p.hooks = global_hooks; + + return print_value(item, &p); +} + +/* Parser core - when encountering text, process appropriately. */ +static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer) +{ + if ((input_buffer == NULL) || (input_buffer->content == NULL)) + { + return false; /* no input */ + } + + /* parse the different types of values */ + /* null */ + if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0)) + { + item->type = cJSON_NULL; + input_buffer->offset += 4; + return true; + } + /* false */ + if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0)) + { + item->type = cJSON_False; + input_buffer->offset += 5; + return true; + } + /* true */ + if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0)) + { + item->type = cJSON_True; + item->valueint = 1; + input_buffer->offset += 4; + return true; + } + /* string */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"')) + { + return parse_string(item, input_buffer); + } + /* number */ + if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9')))) + { + return parse_number(item, input_buffer); + } + /* array */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '[')) + { + return parse_array(item, input_buffer); + } + /* object */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{')) + { + return parse_object(item, input_buffer); + } + + return false; +} + +/* Render a value to text. */ +static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output = NULL; + + if ((item == NULL) || (output_buffer == NULL)) + { + return false; + } + + switch ((item->type) & 0xFF) + { + case cJSON_NULL: + output = ensure(output_buffer, 5); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "null"); + return true; + + case cJSON_False: + output = ensure(output_buffer, 6); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "false"); + return true; + + case cJSON_True: + output = ensure(output_buffer, 5); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "true"); + return true; + + case cJSON_Number: + return print_number(item, output_buffer); + + case cJSON_Raw: + { + size_t raw_length = 0; + if (item->valuestring == NULL) + { + return false; + } + + raw_length = strlen(item->valuestring) + sizeof(""); + output = ensure(output_buffer, raw_length); + if (output == NULL) + { + return false; + } + memcpy(output, item->valuestring, raw_length); + return true; + } + + case cJSON_String: + return print_string(item, output_buffer); + + case cJSON_Array: + return print_array(item, output_buffer); + + case cJSON_Object: + return print_object(item, output_buffer); + + default: + return false; + } +} + +/* Build an array from input text. */ +static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer) +{ + cJSON *head = NULL; /* head of the linked list */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) + { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (buffer_at_offset(input_buffer)[0] != '[') + { + /* not an array */ + goto fail; + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']')) + { + /* empty array */ + goto success; + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) + { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do + { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) + { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) + { + /* start the linked list */ + current_item = head = new_item; + } + else + { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse next value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) + { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } + while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']') + { + goto fail; /* expected end of array */ + } + +success: + input_buffer->depth--; + + item->type = cJSON_Array; + item->child = head; + + input_buffer->offset++; + + return true; + +fail: + if (head != NULL) + { + cJSON_Delete(head); + } + + return false; +} + +/* Render an array to text */ +static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_element = item->child; + + if (output_buffer == NULL) + { + return false; + } + + /* Compose the output array. */ + /* opening square bracket */ + output_pointer = ensure(output_buffer, 1); + if (output_pointer == NULL) + { + return false; + } + + *output_pointer = '['; + output_buffer->offset++; + output_buffer->depth++; + + while (current_element != NULL) + { + if (!print_value(current_element, output_buffer)) + { + return false; + } + update_offset(output_buffer); + if (current_element->next) + { + length = (size_t) (output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ','; + if(output_buffer->format) + { + *output_pointer++ = ' '; + } + *output_pointer = '\0'; + output_buffer->offset += length; + } + current_element = current_element->next; + } + + output_pointer = ensure(output_buffer, 2); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ']'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; +} + +/* Build an object from the text. */ +static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer) +{ + cJSON *head = NULL; /* linked list head */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) + { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{')) + { + goto fail; /* not an object */ + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}')) + { + goto success; /* empty object */ + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) + { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do + { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) + { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) + { + /* start the linked list */ + current_item = head = new_item; + } + else + { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse the name of the child */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_string(current_item, input_buffer)) + { + goto fail; /* faile to parse name */ + } + buffer_skip_whitespace(input_buffer); + + /* swap valuestring and string, because we parsed the name */ + current_item->string = current_item->valuestring; + current_item->valuestring = NULL; + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':')) + { + goto fail; /* invalid object */ + } + + /* parse the value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) + { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } + while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}')) + { + goto fail; /* expected end of object */ + } + +success: + input_buffer->depth--; + + item->type = cJSON_Object; + item->child = head; + + input_buffer->offset++; + return true; + +fail: + if (head != NULL) + { + cJSON_Delete(head); + } + + return false; +} + +/* Render an object to text. */ +static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_item = item->child; + + if (output_buffer == NULL) + { + return false; + } + + /* Compose the output: */ + length = (size_t) (output_buffer->format ? 2 : 1); /* fmt: {\n */ + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + + *output_pointer++ = '{'; + output_buffer->depth++; + if (output_buffer->format) + { + *output_pointer++ = '\n'; + } + output_buffer->offset += length; + + while (current_item) + { + if (output_buffer->format) + { + size_t i; + output_pointer = ensure(output_buffer, output_buffer->depth); + if (output_pointer == NULL) + { + return false; + } + for (i = 0; i < output_buffer->depth; i++) + { + *output_pointer++ = '\t'; + } + output_buffer->offset += output_buffer->depth; + } + + /* print key */ + if (!print_string_ptr((unsigned char*)current_item->string, output_buffer)) + { + return false; + } + update_offset(output_buffer); + + length = (size_t) (output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ':'; + if (output_buffer->format) + { + *output_pointer++ = '\t'; + } + output_buffer->offset += length; + + /* print value */ + if (!print_value(current_item, output_buffer)) + { + return false; + } + update_offset(output_buffer); + + /* print comma if not last */ + length = (size_t) ((output_buffer->format ? 1 : 0) + (current_item->next ? 1 : 0)); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + if (current_item->next) + { + *output_pointer++ = ','; + } + + if (output_buffer->format) + { + *output_pointer++ = '\n'; + } + *output_pointer = '\0'; + output_buffer->offset += length; + + current_item = current_item->next; + } + + output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2); + if (output_pointer == NULL) + { + return false; + } + if (output_buffer->format) + { + size_t i; + for (i = 0; i < (output_buffer->depth - 1); i++) + { + *output_pointer++ = '\t'; + } + } + *output_pointer++ = '}'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; +} + +/* Get Array size/item / object item. */ +CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) +{ + cJSON *child = NULL; + size_t size = 0; + + if (array == NULL) + { + return 0; + } + + child = array->child; + + while(child != NULL) + { + size++; + child = child->next; + } + + /* FIXME: Can overflow here. Cannot be fixed without breaking the API */ + + return (int)size; +} + +static cJSON* get_array_item(const cJSON *array, size_t index) +{ + cJSON *current_child = NULL; + + if (array == NULL) + { + return NULL; + } + + current_child = array->child; + while ((current_child != NULL) && (index > 0)) + { + index--; + current_child = current_child->next; + } + + return current_child; +} + +CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) +{ + if (index < 0) + { + return NULL; + } + + return get_array_item(array, (size_t)index); +} + +static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive) +{ + cJSON *current_element = NULL; + + if ((object == NULL) || (name == NULL)) + { + return NULL; + } + + current_element = object->child; + if (case_sensitive) + { + while ((current_element != NULL) && (strcmp(name, current_element->string) != 0)) + { + current_element = current_element->next; + } + } + else + { + while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0)) + { + current_element = current_element->next; + } + } + + return current_element; +} + +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string) +{ + return get_object_item(object, string, false); +} + +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string) +{ + return get_object_item(object, string, true); +} + +CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string) +{ + return cJSON_GetObjectItem(object, string) ? 1 : 0; +} + +/* Utility for array list handling. */ +static void suffix_object(cJSON *prev, cJSON *item) +{ + prev->next = item; + item->prev = prev; +} + +/* Utility for handling references. */ +static cJSON *create_reference(const cJSON *item, const internal_hooks * const hooks) +{ + cJSON *reference = NULL; + if (item == NULL) + { + return NULL; + } + + reference = cJSON_New_Item(hooks); + if (reference == NULL) + { + return NULL; + } + + memcpy(reference, item, sizeof(cJSON)); + reference->string = NULL; + reference->type |= cJSON_IsReference; + reference->next = reference->prev = NULL; + return reference; +} + +static cJSON_bool add_item_to_array(cJSON *array, cJSON *item) +{ + cJSON *child = NULL; + + if ((item == NULL) || (array == NULL)) + { + return false; + } + + child = array->child; + + if (child == NULL) + { + /* list is empty, start new one */ + array->child = item; + } + else + { + /* append to the end */ + while (child->next) + { + child = child->next; + } + suffix_object(child, item); + } + + return true; +} + +/* Add item to array/object. */ +CJSON_PUBLIC(void) cJSON_AddItemToArray(cJSON *array, cJSON *item) +{ + add_item_to_array(array, item); +} + +#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) + #pragma GCC diagnostic push +#endif +#ifdef __GNUC__ +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif +/* helper function to cast away const */ +static void* cast_away_const(const void* string) +{ + return (void*)string; +} +#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) + #pragma GCC diagnostic pop +#endif + + +static cJSON_bool add_item_to_object(cJSON * const object, const char * const string, cJSON * const item, const internal_hooks * const hooks, const cJSON_bool constant_key) +{ + char *new_key = NULL; + int new_type = cJSON_Invalid; + + if ((object == NULL) || (string == NULL) || (item == NULL)) + { + return false; + } + + if (constant_key) + { + new_key = (char*)cast_away_const(string); + new_type = item->type | cJSON_StringIsConst; + } + else + { + new_key = (char*)cJSON_strdup((const unsigned char*)string, hooks); + if (new_key == NULL) + { + return false; + } + + new_type = item->type & ~cJSON_StringIsConst; + } + + if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) + { + hooks->deallocate(item->string); + } + + item->string = new_key; + item->type = new_type; + + return add_item_to_array(object, item); +} + +CJSON_PUBLIC(void) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) +{ + add_item_to_object(object, string, item, &global_hooks, false); +} + +/* Add an item to an object with constant string as key */ +CJSON_PUBLIC(void) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) +{ + add_item_to_object(object, string, item, &global_hooks, true); +} + +CJSON_PUBLIC(void) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) +{ + if (array == NULL) + { + return; + } + + add_item_to_array(array, create_reference(item, &global_hooks)); +} + +CJSON_PUBLIC(void) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) +{ + if ((object == NULL) || (string == NULL)) + { + return; + } + + add_item_to_object(object, string, create_reference(item, &global_hooks), &global_hooks, false); +} + +CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name) +{ + cJSON *null = cJSON_CreateNull(); + if (add_item_to_object(object, name, null, &global_hooks, false)) + { + return null; + } + + cJSON_Delete(null); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name) +{ + cJSON *true_item = cJSON_CreateTrue(); + if (add_item_to_object(object, name, true_item, &global_hooks, false)) + { + return true_item; + } + + cJSON_Delete(true_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name) +{ + cJSON *false_item = cJSON_CreateFalse(); + if (add_item_to_object(object, name, false_item, &global_hooks, false)) + { + return false_item; + } + + cJSON_Delete(false_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean) +{ + cJSON *bool_item = cJSON_CreateBool(boolean); + if (add_item_to_object(object, name, bool_item, &global_hooks, false)) + { + return bool_item; + } + + cJSON_Delete(bool_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number) +{ + cJSON *number_item = cJSON_CreateNumber(number); + if (add_item_to_object(object, name, number_item, &global_hooks, false)) + { + return number_item; + } + + cJSON_Delete(number_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string) +{ + cJSON *string_item = cJSON_CreateString(string); + if (add_item_to_object(object, name, string_item, &global_hooks, false)) + { + return string_item; + } + + cJSON_Delete(string_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw) +{ + cJSON *raw_item = cJSON_CreateRaw(raw); + if (add_item_to_object(object, name, raw_item, &global_hooks, false)) + { + return raw_item; + } + + cJSON_Delete(raw_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name) +{ + cJSON *object_item = cJSON_CreateObject(); + if (add_item_to_object(object, name, object_item, &global_hooks, false)) + { + return object_item; + } + + cJSON_Delete(object_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name) +{ + cJSON *array = cJSON_CreateArray(); + if (add_item_to_object(object, name, array, &global_hooks, false)) + { + return array; + } + + cJSON_Delete(array); + return NULL; +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item) +{ + if ((parent == NULL) || (item == NULL)) + { + return NULL; + } + + if (item->prev != NULL) + { + /* not the first element */ + item->prev->next = item->next; + } + if (item->next != NULL) + { + /* not the last element */ + item->next->prev = item->prev; + } + + if (item == parent->child) + { + /* first element */ + parent->child = item->next; + } + /* make sure the detached item doesn't point anywhere anymore */ + item->prev = NULL; + item->next = NULL; + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which) +{ + if (which < 0) + { + return NULL; + } + + return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which)); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which) +{ + cJSON_Delete(cJSON_DetachItemFromArray(array, which)); +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string) +{ + cJSON *to_detach = cJSON_GetObjectItem(object, string); + + return cJSON_DetachItemViaPointer(object, to_detach); +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string) +{ + cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string); + + return cJSON_DetachItemViaPointer(object, to_detach); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string) +{ + cJSON_Delete(cJSON_DetachItemFromObject(object, string)); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string) +{ + cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string)); +} + +/* Replace array/object items with new ones. */ +CJSON_PUBLIC(void) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) +{ + cJSON *after_inserted = NULL; + + if (which < 0) + { + return; + } + + after_inserted = get_array_item(array, (size_t)which); + if (after_inserted == NULL) + { + add_item_to_array(array, newitem); + return; + } + + newitem->next = after_inserted; + newitem->prev = after_inserted->prev; + after_inserted->prev = newitem; + if (after_inserted == array->child) + { + array->child = newitem; + } + else + { + newitem->prev->next = newitem; + } +} + +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement) +{ + if ((parent == NULL) || (replacement == NULL) || (item == NULL)) + { + return false; + } + + if (replacement == item) + { + return true; + } + + replacement->next = item->next; + replacement->prev = item->prev; + + if (replacement->next != NULL) + { + replacement->next->prev = replacement; + } + if (replacement->prev != NULL) + { + replacement->prev->next = replacement; + } + if (parent->child == item) + { + parent->child = replacement; + } + + item->next = NULL; + item->prev = NULL; + cJSON_Delete(item); + + return true; +} + +CJSON_PUBLIC(void) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) +{ + if (which < 0) + { + return; + } + + cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem); +} + +static cJSON_bool replace_item_in_object(cJSON *object, const char *string, cJSON *replacement, cJSON_bool case_sensitive) +{ + if ((replacement == NULL) || (string == NULL)) + { + return false; + } + + /* replace the name in the replacement */ + if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL)) + { + cJSON_free(replacement->string); + } + replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); + replacement->type &= ~cJSON_StringIsConst; + + cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement); + + return true; +} + +CJSON_PUBLIC(void) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) +{ + replace_item_in_object(object, string, newitem, false); +} + +CJSON_PUBLIC(void) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem) +{ + replace_item_in_object(object, string, newitem, true); +} + +/* Create basic types: */ +CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_NULL; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_True; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_False; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool b) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = b ? cJSON_True : cJSON_False; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_Number; + item->valuedouble = num; + + /* use saturation in case of overflow */ + if (num >= INT_MAX) + { + item->valueint = INT_MAX; + } + else if (num <= INT_MIN) + { + item->valueint = INT_MIN; + } + else + { + item->valueint = (int)num; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_String; + item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); + if(!item->valuestring) + { + cJSON_Delete(item); + return NULL; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) + { + item->type = cJSON_String | cJSON_IsReference; + item->valuestring = (char*)cast_away_const(string); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Object | cJSON_IsReference; + item->child = (cJSON*)cast_away_const(child); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Array | cJSON_IsReference; + item->child = (cJSON*)cast_away_const(child); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_Raw; + item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks); + if(!item->valuestring) + { + cJSON_Delete(item); + return NULL; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type=cJSON_Array; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = cJSON_Object; + } + + return item; +} + +/* Create Arrays: */ +CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + for(i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber(numbers[i]); + if (!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for(i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber((double)numbers[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for(i = 0;a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber(numbers[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char **strings, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (strings == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for (i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateString(strings[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p,n); + } + p = n; + } + + return a; +} + +/* Duplication */ +CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse) +{ + cJSON *newitem = NULL; + cJSON *child = NULL; + cJSON *next = NULL; + cJSON *newchild = NULL; + + /* Bail on bad ptr */ + if (!item) + { + goto fail; + } + /* Create new item */ + newitem = cJSON_New_Item(&global_hooks); + if (!newitem) + { + goto fail; + } + /* Copy over all vars */ + newitem->type = item->type & (~cJSON_IsReference); + newitem->valueint = item->valueint; + newitem->valuedouble = item->valuedouble; + if (item->valuestring) + { + newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks); + if (!newitem->valuestring) + { + goto fail; + } + } + if (item->string) + { + newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks); + if (!newitem->string) + { + goto fail; + } + } + /* If non-recursive, then we're done! */ + if (!recurse) + { + return newitem; + } + /* Walk the ->next chain for the child. */ + child = item->child; + while (child != NULL) + { + newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */ + if (!newchild) + { + goto fail; + } + if (next != NULL) + { + /* If newitem->child already set, then crosswire ->prev and ->next and move on */ + next->next = newchild; + newchild->prev = next; + next = newchild; + } + else + { + /* Set newitem->child and move to it */ + newitem->child = newchild; + next = newchild; + } + child = child->next; + } + + return newitem; + +fail: + if (newitem != NULL) + { + cJSON_Delete(newitem); + } + + return NULL; +} + +CJSON_PUBLIC(void) cJSON_Minify(char *json) +{ + unsigned char *into = (unsigned char*)json; + + if (json == NULL) + { + return; + } + + while (*json) + { + if (*json == ' ') + { + json++; + } + else if (*json == '\t') + { + /* Whitespace characters. */ + json++; + } + else if (*json == '\r') + { + json++; + } + else if (*json=='\n') + { + json++; + } + else if ((*json == '/') && (json[1] == '/')) + { + /* double-slash comments, to end of line. */ + while (*json && (*json != '\n')) + { + json++; + } + } + else if ((*json == '/') && (json[1] == '*')) + { + /* multiline comments. */ + while (*json && !((*json == '*') && (json[1] == '/'))) + { + json++; + } + json += 2; + } + else if (*json == '\"') + { + /* string literals, which are \" sensitive. */ + *into++ = (unsigned char)*json++; + while (*json && (*json != '\"')) + { + if (*json == '\\') + { + *into++ = (unsigned char)*json++; + } + *into++ = (unsigned char)*json++; + } + *into++ = (unsigned char)*json++; + } + else + { + /* All other characters. */ + *into++ = (unsigned char)*json++; + } + } + + /* and null-terminate. */ + *into = '\0'; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Invalid; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_False; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xff) == cJSON_True; +} + + +CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & (cJSON_True | cJSON_False)) != 0; +} +CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_NULL; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Number; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_String; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Array; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Object; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Raw; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive) +{ + if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF)) || cJSON_IsInvalid(a)) + { + return false; + } + + /* check if type is valid */ + switch (a->type & 0xFF) + { + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + case cJSON_Number: + case cJSON_String: + case cJSON_Raw: + case cJSON_Array: + case cJSON_Object: + break; + + default: + return false; + } + + /* identical objects are equal */ + if (a == b) + { + return true; + } + + switch (a->type & 0xFF) + { + /* in these cases and equal type is enough */ + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + return true; + + case cJSON_Number: + if (a->valuedouble == b->valuedouble) + { + return true; + } + return false; + + case cJSON_String: + case cJSON_Raw: + if ((a->valuestring == NULL) || (b->valuestring == NULL)) + { + return false; + } + if (strcmp(a->valuestring, b->valuestring) == 0) + { + return true; + } + + return false; + + case cJSON_Array: + { + cJSON *a_element = a->child; + cJSON *b_element = b->child; + + for (; (a_element != NULL) && (b_element != NULL);) + { + if (!cJSON_Compare(a_element, b_element, case_sensitive)) + { + return false; + } + + a_element = a_element->next; + b_element = b_element->next; + } + + /* one of the arrays is longer than the other */ + if (a_element != b_element) { + return false; + } + + return true; + } + + case cJSON_Object: + { + cJSON *a_element = NULL; + cJSON *b_element = NULL; + cJSON_ArrayForEach(a_element, a) + { + /* TODO This has O(n^2) runtime, which is horrible! */ + b_element = get_object_item(b, a_element->string, case_sensitive); + if (b_element == NULL) + { + return false; + } + + if (!cJSON_Compare(a_element, b_element, case_sensitive)) + { + return false; + } + } + + /* doing this twice, once on a and b to prevent true comparison if a subset of b + * TODO: Do this the proper way, this is just a fix for now */ + cJSON_ArrayForEach(b_element, b) + { + a_element = get_object_item(a, b_element->string, case_sensitive); + if (a_element == NULL) + { + return false; + } + + if (!cJSON_Compare(b_element, a_element, case_sensitive)) + { + return false; + } + } + + return true; + } + + default: + return false; + } +} + +CJSON_PUBLIC(void *) cJSON_malloc(size_t size) +{ + return global_hooks.allocate(size); +} + +CJSON_PUBLIC(void) cJSON_free(void *object) +{ + global_hooks.deallocate(object); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/cJSON/cJSON.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/cJSON/cJSON.h new file mode 100644 index 0000000000..8d45390219 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/cJSON/cJSON.h @@ -0,0 +1,285 @@ +/* + Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. +*/ + +#ifndef cJSON__h +#define cJSON__h + +#ifdef __cplusplus +extern "C" +{ +#endif + +#if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32)) +#define __WINDOWS__ +#endif + +#ifdef __WINDOWS__ + +/* When compiling for windows, we specify a specific calling convention to avoid issues where we are being called from a project with a different default calling convention. For windows you have 3 define options: + +CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever dllexport symbols +CJSON_EXPORT_SYMBOLS - Define this on library build when you want to dllexport symbols (default) +CJSON_IMPORT_SYMBOLS - Define this if you want to dllimport symbol + +For *nix builds that support visibility attribute, you can define similar behavior by + +setting default visibility to hidden by adding +-fvisibility=hidden (for gcc) +or +-xldscope=hidden (for sun cc) +to CFLAGS + +then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJSON_EXPORT_SYMBOLS does + +*/ + +#define CJSON_CDECL __cdecl +#define CJSON_STDCALL __stdcall + +/* export symbols by default, this is necessary for copy pasting the C and header file */ +#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && !defined(CJSON_EXPORT_SYMBOLS) +#define CJSON_EXPORT_SYMBOLS +#endif + +#if defined(CJSON_HIDE_SYMBOLS) +#define CJSON_PUBLIC(type) type CJSON_STDCALL +#elif defined(CJSON_EXPORT_SYMBOLS) +#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL +#elif defined(CJSON_IMPORT_SYMBOLS) +#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL +#endif +#else /* !__WINDOWS__ */ +#define CJSON_CDECL +#define CJSON_STDCALL + +#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(CJSON_API_VISIBILITY) +#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type +#else +#define CJSON_PUBLIC(type) type +#endif +#endif + +/* project version */ +#define CJSON_VERSION_MAJOR 1 +#define CJSON_VERSION_MINOR 7 +#define CJSON_VERSION_PATCH 8 + +#include + +/* cJSON Types: */ +#define cJSON_Invalid (0) +#define cJSON_False (1 << 0) +#define cJSON_True (1 << 1) +#define cJSON_NULL (1 << 2) +#define cJSON_Number (1 << 3) +#define cJSON_String (1 << 4) +#define cJSON_Array (1 << 5) +#define cJSON_Object (1 << 6) +#define cJSON_Raw (1 << 7) /* raw json */ + +#define cJSON_IsReference 256 +#define cJSON_StringIsConst 512 + +/* The cJSON structure: */ +typedef struct cJSON +{ + /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */ + struct cJSON *next; + struct cJSON *prev; + /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */ + struct cJSON *child; + + /* The type of the item, as above. */ + int type; + + /* The item's string, if type==cJSON_String and type == cJSON_Raw */ + char *valuestring; + /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */ + int valueint; + /* The item's number, if type==cJSON_Number */ + double valuedouble; + + /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */ + char *string; +} cJSON; + +typedef struct cJSON_Hooks +{ + /* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */ + void *(CJSON_CDECL *malloc_fn)(size_t sz); + void (CJSON_CDECL *free_fn)(void *ptr); +} cJSON_Hooks; + +typedef int cJSON_bool; + +/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them. + * This is to prevent stack overflows. */ +#ifndef CJSON_NESTING_LIMIT +#define CJSON_NESTING_LIMIT 1000 +#endif + +/* returns the version of cJSON as a string */ +CJSON_PUBLIC(const char*) cJSON_Version(void); + +/* Supply malloc, realloc and free functions to cJSON */ +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks); + +/* Memory Management: the caller is always responsible to free the results from all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is cJSON_PrintPreallocated, where the caller has full responsibility of the buffer. */ +/* Supply a block of JSON, and this returns a cJSON object you can interrogate. */ +CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value); +/* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */ +/* If you supply a ptr in return_parse_end and parsing fails, then return_parse_end will contain a pointer to the error so will match cJSON_GetErrorPtr(). */ +CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated); + +/* Render a cJSON entity to text for transfer/storage. */ +CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item); +/* Render a cJSON entity to text for transfer/storage without any formatting. */ +CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item); +/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */ +CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt); +/* Render a cJSON entity to text using a buffer already allocated in memory with given length. Returns 1 on success and 0 on failure. */ +/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will use, so to be safe allocate 5 bytes more than you actually need */ +CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format); +/* Delete a cJSON entity and all subentities. */ +CJSON_PUBLIC(void) cJSON_Delete(cJSON *c); + +/* Returns the number of items in an array (or object). */ +CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array); +/* Retrieve item number "index" from array "array". Returns NULL if unsuccessful. */ +CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index); +/* Get item "string" from object. Case insensitive. */ +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string); +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string); +CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string); +/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void); + +/* Check if the item is a string and return its valuestring */ +CJSON_PUBLIC(char *) cJSON_GetStringValue(cJSON *item); + +/* These functions check the type of an item */ +CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item); + +/* These calls create a cJSON item of the appropriate type. */ +CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean); +CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num); +CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string); +/* raw json */ +CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw); +CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void); + +/* Create a string where valuestring references a string so + * it will not be freed by cJSON_Delete */ +CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string); +/* Create an object/arrray that only references it's elements so + * they will not be freed by cJSON_Delete */ +CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child); +CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child); + +/* These utilities create an Array of count items. */ +CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char **strings, int count); + +/* Append item to the specified array/object. */ +CJSON_PUBLIC(void) cJSON_AddItemToArray(cJSON *array, cJSON *item); +CJSON_PUBLIC(void) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item); +/* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object. + * WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before + * writing to `item->string` */ +CJSON_PUBLIC(void) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item); +/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */ +CJSON_PUBLIC(void) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); +CJSON_PUBLIC(void) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item); + +/* Remove/Detatch items from Arrays/Objects. */ +CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item); +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which); +CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which); +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string); +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string); +CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string); +CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string); + +/* Update array items. */ +CJSON_PUBLIC(void) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */ +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement); +CJSON_PUBLIC(void) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem); +CJSON_PUBLIC(void) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem); +CJSON_PUBLIC(void) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,const char *string,cJSON *newitem); + +/* Duplicate a cJSON item */ +CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse); +/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will +need to be released. With recurse!=0, it will duplicate any children connected to the item. +The item->next and ->prev pointers are always zero on return from Duplicate. */ +/* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal. + * case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */ +CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive); + + +CJSON_PUBLIC(void) cJSON_Minify(char *json); + +/* Helper functions for creating and adding items to an object at the same time. + * They return the added item or NULL on failure. */ +CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name); +CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name); +CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name); +CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean); +CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number); +CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string); +CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw); +CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name); +CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name); + +/* When assigning an integer value, it needs to be propagated to valuedouble too. */ +#define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number)) +/* helper for the cJSON_SetNumberValue macro */ +CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number); +#define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number)) + +/* Macro for iterating over an array or object */ +#define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next) + +/* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */ +CJSON_PUBLIC(void *) cJSON_malloc(size_t size); +CJSON_PUBLIC(void) cJSON_free(void *object); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/file-utils.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/file-utils.c new file mode 100644 index 0000000000..484b4a639c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/file-utils.c @@ -0,0 +1,173 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define FILE_BUFFER_INCREMENT (128*1024) + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "container-executor.h" +#include "file-utils.h" +#include "util.h" + +/** + * Read the contents of the specified file into an allocated buffer and return + * the contents as a NUL-terminated string. NOTE: The file contents must not + * contain a NUL character or the result will appear to be truncated. + * + * Returns a pointer to the allocated, NUL-terminated string or NULL on error. + */ +char* read_file_to_string(const char* filename) { + char* buff = NULL; + int rc = -1; + int fd = open(filename, O_RDONLY); + if (fd < 0) { + fprintf(ERRORFILE, "Error opening %s : %s\n", filename, strerror(errno)); + goto cleanup; + } + + struct stat filestat; + if (fstat(fd, &filestat) != 0) { + fprintf(ERRORFILE, "Error examining %s : %s\n", filename, strerror(errno)); + goto cleanup; + } + + size_t buff_size = FILE_BUFFER_INCREMENT; + if (S_ISREG(filestat.st_mode)) { + buff_size = filestat.st_size + 1; // +1 for terminating NUL + } + buff = malloc(buff_size); + if (buff == NULL) { + fprintf(ERRORFILE, "Unable to allocate %ld bytes\n", buff_size); + goto cleanup; + } + + int bytes_left = buff_size; + char* cp = buff; + int bytes_read; + while ((bytes_read = read(fd, cp, bytes_left)) > 0) { + cp += bytes_read; + bytes_left -= bytes_read; + if (bytes_left == 0) { + buff_size += FILE_BUFFER_INCREMENT; + bytes_left += FILE_BUFFER_INCREMENT; + buff = realloc(buff, buff_size); + if (buff == NULL) { + fprintf(ERRORFILE, "Unable to allocate %ld bytes\n", buff_size); + goto cleanup; + } + } + } + if (bytes_left < 0) { + fprintf(ERRORFILE, "Error reading %s : %s\n", filename, strerror(errno)); + goto cleanup; + } + + *cp = '\0'; + rc = 0; + +cleanup: + if (fd != -1) { + close(fd); + } + if (rc != 0) { + free(buff); + buff = NULL; + } + return buff; +} + +/** + * Read a file to a string as the YARN nodemanager user and returns the + * result as a string. See read_file_to_string for more details. + * + * Returns a pointer to the allocated, NUL-terminated string or NULL on error. + */ +char* read_file_to_string_as_nm_user(const char* filename) { + uid_t user = geteuid(); + gid_t group = getegid(); + if (change_effective_user_to_nm() != 0) { + fputs("Cannot change to nm user\n", ERRORFILE); + return NULL; + } + + char* buff = read_file_to_string(filename); + if (change_effective_user(user, group) != 0) { + fputs("Cannot revert to previous user\n", ERRORFILE); + free(buff); + return NULL; + } + return buff; +} + +/** + * Write a sequence of bytes to a new file as the YARN nodemanager user. + * + * Returns true on success or false on error. + */ +bool write_file_as_nm(const char* path, const void* data, size_t count) { + bool result = false; + int fd = -1; + uid_t orig_user = geteuid(); + gid_t orig_group = getegid(); + if (change_effective_user_to_nm() != 0) { + fputs("Error changing to NM user and group\n", ERRORFILE); + return false; + } + + fd = open(path, O_CREAT | O_EXCL | O_WRONLY, S_IRUSR | S_IWUSR); + if (fd == -1) { + fprintf(ERRORFILE, "Error creating %s : %s\n", path, strerror(errno)); + goto cleanup; + } + + const uint8_t* bp = (const uint8_t*)data; + while (count > 0) { + ssize_t bytes_written = write(fd, bp, count); + if (bytes_written == -1) { + fprintf(ERRORFILE, "Error writing to %s : %s\n", path, strerror(errno)); + goto cleanup; + } + bp += bytes_written; + count -= bytes_written; + } + + result = true; + +cleanup: + if (fd != -1) { + if (close(fd) == -1) { + fprintf(ERRORFILE, "Error writing to %s : %s\n", path, strerror(errno)); + result = false; + } + } + + if (change_effective_user(orig_user, orig_group) != 0) { + fputs("Cannot restore original user/group\n", ERRORFILE); + result = false; + } + + return result; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/file-utils.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/file-utils.h new file mode 100644 index 0000000000..abfc069db3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/file-utils.h @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef UTILS_FILE_UTILS_H +#define UTILS_FILE_UTILS_H + +#include + +/** + * Read the contents of the specified file into an allocated buffer and return + * the contents as a NUL-terminated string. NOTE: The file contents must not + * contain a NUL character or the result will appear to be truncated. + * + * Returns a pointer to the allocated, NUL-terminated string or NULL on error. + */ +char* read_file_to_string(const char* filename); + +/** + * Read a file to a string as the YARN nodemanager user and returns the + * result as a string. See read_file_to_string for more details. + * + * Returns a pointer to the allocated, NUL-terminated string or NULL on error. + */ +char* read_file_to_string_as_nm_user(const char* filename); + +/** + * Write a sequence of bytes to a new file as the YARN nodemanager user. + * + * Returns true on success or false on error. + */ +bool write_file_as_nm(const char* path, const void* data, size_t count); + +#endif /* UTILS_FILE_UTILS_H */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c index 68857a9721..62d54a9ea6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c @@ -22,11 +22,14 @@ #include #include #include +#include #include #include #include #include +#include "string-utils.h" + /* * if all chars in the input str are numbers * return true/false @@ -189,3 +192,172 @@ int str_ends_with(const char *s, const char *suffix) { size_t suffix_len = strlen(suffix); return suffix_len <= slen && !strcmp(s + slen - suffix_len, suffix); } + +/* Returns the corresponding hexadecimal character for a nibble. */ +static char nibble_to_hex(unsigned char nib) { + return nib < 10 ? '0' + nib : 'a' + nib - 10; +} + +/** + * Converts a sequence of bytes into a hexadecimal string. + * + * Returns a pointer to the allocated string on success or NULL on error. + */ +char* to_hexstring(unsigned char* bytes, unsigned int len) { + char* hexstr = malloc(len * 2 + 1); + if (hexstr == NULL) { + return NULL; + } + unsigned char* src = bytes; + char* dest = hexstr; + for (unsigned int i = 0; i < len; ++i) { + unsigned char val = *src++; + *dest++ = nibble_to_hex((val >> 4) & 0xF); + *dest++ = nibble_to_hex(val & 0xF); + } + *dest = '\0'; + return hexstr; +} + +/** + * Initialize an uninitialized strbuf with the specified initial capacity. + * + * Returns true on success or false if memory could not be allocated. + */ +bool strbuf_init(strbuf* sb, size_t initial_capacity) { + memset(sb, 0, sizeof(*sb)); + char* new_buffer = malloc(initial_capacity); + if (new_buffer == NULL) { + return false; + } + sb->buffer = new_buffer; + sb->capacity = initial_capacity; + sb->length = 0; + return true; +} + +/** + * Allocate and initialize a strbuf with the specified initial capacity. + * + * Returns a pointer to the allocated and initialized strbuf or NULL on error. + */ +strbuf* strbuf_alloc(size_t initial_capacity) { + strbuf* sb = malloc(sizeof(*sb)); + if (sb != NULL) { + if (!strbuf_init(sb, initial_capacity)) { + free(sb); + sb = NULL; + } + } + return sb; +} + +/** + * Detach the underlying character buffer from a string buffer. + * + * Returns the heap-allocated, NULL-terminated character buffer. + * NOTE: The caller is responsible for freeing the result. + */ +char* strbuf_detach_buffer(strbuf* sb) { + char* result = NULL; + if (sb != NULL) { + result = sb->buffer; + sb->buffer = NULL; + sb->length = 0; + sb->capacity = 0; + } + return result; +} + +/** + * Release memory associated with a strbuf but not the strbuf structure itself. + * Useful for stack-allocated strbuf objects or structures that embed a strbuf. + * Use strbuf_free for heap-allocated string buffers. + */ +void strbuf_destroy(strbuf* sb) { + if (sb != NULL) { + free(sb->buffer); + sb->buffer = NULL; + sb->capacity = 0; + sb->length = 0; + } +} + +/** + * Free a strbuf and all memory associated with it. + */ +void strbuf_free(strbuf* sb) { + if (sb != NULL) { + strbuf_destroy(sb); + free(sb); + } +} + +/** + * Resize a strbuf to the specified new capacity. + * + * Returns true on success or false if there was an error. + */ +bool strbuf_realloc(strbuf* sb, size_t new_capacity) { + if (new_capacity < sb->length + 1) { + // New capacity would result in a truncation of the existing string. + return false; + } + + char* new_buffer = realloc(sb->buffer, new_capacity); + if (!new_buffer) { + return false; + } + + sb->buffer = new_buffer; + sb->capacity = new_capacity; + return true; +} + +/** + * Append a formatted string to the current contents of a strbuf. + * + * Returns true on success or false if there was an error. + */ +bool strbuf_append_fmt(strbuf* sb, size_t realloc_extra, + const char* fmt, ...) { + if (sb->length > sb->capacity) { + return false; + } + + if (sb->length == sb->capacity) { + size_t incr = (realloc_extra == 0) ? 1024 : realloc_extra; + if (!strbuf_realloc(sb, sb->capacity + incr)) { + return false; + } + } + + size_t remain = sb->capacity - sb->length; + va_list vargs; + va_start(vargs, fmt); + int needed = vsnprintf(sb->buffer + sb->length, remain, fmt, vargs); + va_end(vargs); + if (needed == -1) { + return false; + } + + needed += 1; // vsnprintf result does NOT include terminating NUL + if (needed > remain) { + // result was truncated so need to realloc and reprint + size_t new_size = sb->length + needed + realloc_extra; + if (!strbuf_realloc(sb, new_size)) { + return false; + } + remain = sb->capacity - sb->length; + va_start(vargs, fmt); + needed = vsnprintf(sb->buffer + sb->length, remain, fmt, vargs); + va_end(vargs); + if (needed == -1) { + return false; + } + needed += 1; // vsnprintf result does NOT include terminating NUL + } + + sb->length += needed - 1; // length does not include terminating NUL + return true; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h index 995cdf3d73..25c3a8209c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h @@ -23,6 +23,16 @@ #ifndef _UTILS_STRING_UTILS_H_ #define _UTILS_STRING_UTILS_H_ +#include +#include + +typedef struct strbuf_struct { + char* buffer; // points to beginning of the string + size_t length; // strlen of buffer (sans trailing NUL) + size_t capacity; // capacity of the buffer +} strbuf; + + /* * Get numbers split by comma from a input string * return false/true @@ -44,4 +54,61 @@ char *make_string(const char *fmt, ...); * return 1 if succeeded */ int str_ends_with(const char *s, const char *suffix); + +/** + * Converts a sequence of bytes into a hexadecimal string. + * + * Returns a pointer to the allocated string on success or NULL on error. + */ +char* to_hexstring(unsigned char* bytes, unsigned int len); + +/** + * Allocate and initialize a strbuf with the specified initial capacity. + * + * Returns a pointer to the allocated and initialized strbuf or NULL on error. + */ +strbuf* strbuf_alloc(size_t initial_capacity); + +/** + * Initialize an uninitialized strbuf with the specified initial capacity. + * + * Returns true on success or false if memory could not be allocated. + */ +bool strbuf_init(strbuf* sb, size_t initial_capacity); + +/** + * Resize a strbuf to the specified new capacity. + * + * Returns true on success or false if there was an error. + */ +bool strbuf_realloc(strbuf* sb, size_t new_capacity); + +/** + * Detach the underlying character buffer from a string buffer. + * + * Returns the heap-allocated, NULL-terminated character buffer. + * NOTE: The caller is responsible for freeing the result. + */ +char* strbuf_detach_buffer(strbuf* sb); + +/** + * Releases the memory underneath a string buffer but does NOT free the + * strbuf structure itself. This is particularly useful for stack-allocated + * strbuf objects or structures that embed a strbuf structure. + * strbuf_free should be used for heap-allocated string buffers. + */ +void strbuf_destroy(strbuf* sb); + +/** + * Free a strbuf and all memory associated with it. + */ +void strbuf_free(strbuf* sb); + +/** + * Append a formatted string to the current contents of a strbuf. + * + * Returns true on success or false if there was an error. + */ +bool strbuf_append_fmt(strbuf* sb, size_t realloc_extra, const char* fmt, ...); + #endif diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_main.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_main.cc index 44c9b1bc5c..91fc3bfb45 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_main.cc +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_main.cc @@ -17,16 +17,86 @@ */ #include -#include
#include +#include extern "C" { #include "util.h" +#include "container-executor.h" +} + +#define TMPDIR "/tmp" +#define TEST_ROOT TMPDIR "/test-container-executor" + +int write_config_file(const char *file_name, int banned) { + FILE *file; + file = fopen(file_name, "w"); + if (file == NULL) { + printf("Failed to open %s.\n", file_name); + return EXIT_FAILURE; + } + if (banned != 0) { + fprintf(file, "banned.users=bannedUser\n"); + fprintf(file, "min.user.id=500\n"); + } else { + fprintf(file, "min.user.id=0\n"); + } + fprintf(file, "allowed.system.users=allowedUser,daemon\n"); + fprintf(file, "feature.yarn.sysfs.enabled=1\n"); + fclose(file); + return 0; } int main(int argc, char **argv) { ERRORFILE = stderr; LOGFILE = stdout; + + printf("\nMaking test dir\n"); + if (mkdirs(TEST_ROOT, 0755) != 0) { + exit(1); + } + if (chmod(TEST_ROOT, 0755) != 0) { // in case of umask + exit(1); + } + + // We need a valid config before the test really starts for the check_user + // and set_user calls + printf("\nCreating test.cfg\n"); + if (write_config_file(TEST_ROOT "/test.cfg", 1) != 0) { + exit(1); + } + printf("\nLoading test.cfg\n"); + read_executor_config(TEST_ROOT "/test.cfg"); + + printf("\nDetermining user details\n"); + char* username = strdup(getpwuid(getuid())->pw_name); + struct passwd *username_info = check_user(username); + printf("\nSetting NM UID\n"); + set_nm_uid(username_info->pw_uid, username_info->pw_gid); + + // Make sure that username owns all the files now + printf("\nEnsuring ownership of test dir\n"); + if (chown(TEST_ROOT, username_info->pw_uid, username_info->pw_gid) != 0) { + exit(1); + } + if (chown(TEST_ROOT "/test.cfg", + username_info->pw_uid, username_info->pw_gid) != 0) { + exit(1); + } + + printf("\nSetting effective user\n"); + if (set_user(username)) { + exit(1); + } + testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); + int rc = RUN_ALL_TESTS(); + + printf("Attempting to clean up from any previous runs\n"); + // clean up any junk from previous run + if (system("chmod -R u=rwx " TEST_ROOT "; rm -fr " TEST_ROOT)) { + exit(1); + + return rc; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-string-utils.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-string-utils.cc index 138e32ac92..a70a70479e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-string-utils.cc +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-string-utils.cc @@ -29,6 +29,7 @@ #include #include + #include extern "C" { #include "utils/string-utils.h" @@ -36,97 +37,230 @@ namespace ContainerExecutor { - class TestStringUtils : public ::testing::Test { - protected: - virtual void SetUp() { + class TestStringUtils : public ::testing::Test { + protected: + virtual void SetUp() { - } + } - virtual void TearDown() { + virtual void TearDown() { - } - }; + } + }; - TEST_F(TestStringUtils, test_get_numbers_split_by_comma) { - const char* input = ",1,2,3,-1,,1,,0,"; - int* numbers; - size_t n_numbers; - int rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); + TEST_F(TestStringUtils, test_get_numbers_split_by_comma) { + const char* input = ",1,2,3,-1,,1,,0,"; + int* numbers; + size_t n_numbers; + int rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); - std::cout << "Testing input=" << input << "\n"; - ASSERT_EQ(0, rc) << "Should succeeded\n"; - ASSERT_EQ(6, n_numbers); - ASSERT_EQ(1, numbers[0]); - ASSERT_EQ(-1, numbers[3]); - ASSERT_EQ(0, numbers[5]); - free(numbers); + std::cout << "Testing input=" << input << "\n"; + ASSERT_EQ(0, rc) << "Should succeeded\n"; + ASSERT_EQ(6, n_numbers); + ASSERT_EQ(1, numbers[0]); + ASSERT_EQ(-1, numbers[3]); + ASSERT_EQ(0, numbers[5]); + free(numbers); - input = "3"; - rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); - std::cout << "Testing input=" << input << "\n"; - ASSERT_EQ(0, rc) << "Should succeeded\n"; - ASSERT_EQ(1, n_numbers); - ASSERT_EQ(3, numbers[0]); - free(numbers); + input = "3"; + rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); + std::cout << "Testing input=" << input << "\n"; + ASSERT_EQ(0, rc) << "Should succeeded\n"; + ASSERT_EQ(1, n_numbers); + ASSERT_EQ(3, numbers[0]); + free(numbers); - input = ""; - rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); - std::cout << "Testing input=" << input << "\n"; - ASSERT_EQ(0, rc) << "Should succeeded\n"; - ASSERT_EQ(0, n_numbers); - free(numbers); + input = ""; + rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); + std::cout << "Testing input=" << input << "\n"; + ASSERT_EQ(0, rc) << "Should succeeded\n"; + ASSERT_EQ(0, n_numbers); + free(numbers); - input = ",,"; - rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); - std::cout << "Testing input=" << input << "\n"; - ASSERT_EQ(0, rc) << "Should succeeded\n"; - ASSERT_EQ(0, n_numbers); - free(numbers); + input = ",,"; + rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); + std::cout << "Testing input=" << input << "\n"; + ASSERT_EQ(0, rc) << "Should succeeded\n"; + ASSERT_EQ(0, n_numbers); + free(numbers); - input = "1,2,aa,bb"; - rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); - std::cout << "Testing input=" << input << "\n"; - ASSERT_TRUE(0 != rc) << "Should failed\n"; - free(numbers); + input = "1,2,aa,bb"; + rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); + std::cout << "Testing input=" << input << "\n"; + ASSERT_TRUE(0 != rc) << "Should failed\n"; + free(numbers); - input = "1,2,3,-12312312312312312312321311231231231"; - rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); - std::cout << "Testing input=" << input << "\n"; - ASSERT_TRUE(0 != rc) << "Should failed\n"; - free(numbers); - } + input = "1,2,3,-12312312312312312312321311231231231"; + rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); + std::cout << "Testing input=" << input << "\n"; + ASSERT_TRUE(0 != rc) << "Should failed\n"; + free(numbers); + } - TEST_F(TestStringUtils, test_validate_container_id) { + TEST_F(TestStringUtils, test_validate_container_id) { - const char *good_input[] = { - "container_e134_1499953498516_50875_01_000007", - "container_1499953498516_50875_01_000007", - "container_e1_12312_11111_02_000001" - }; + const char *good_input[] = { + "container_e134_1499953498516_50875_01_000007", + "container_1499953498516_50875_01_000007", + "container_e1_12312_11111_02_000001" + }; - const char *bad_input[] = { - "CONTAINER", - "container_e1_12312_11111_02_000001 | /tmp/file" - "container_e1_12312_11111_02_000001 || # /tmp/file", - "container_e1_12312_11111_02_000001 # /tmp/file", - "container_e1_12312_11111_02_000001' || touch /tmp/file #", - "ubuntu || touch /tmp/file #", - "''''''''" - }; + const char *bad_input[] = { + "CONTAINER", + "container_e1_12312_11111_02_000001 | /tmp/file" + "container_e1_12312_11111_02_000001 || # /tmp/file", + "container_e1_12312_11111_02_000001 # /tmp/file", + "container_e1_12312_11111_02_000001' || touch /tmp/file #", + "ubuntu || touch /tmp/file #", + "''''''''" + }; - int good_input_size = sizeof(good_input) / sizeof(char *); - int i = 0; - for (i = 0; i < good_input_size; i++) { - int op = validate_container_id(good_input[i]); - ASSERT_EQ(1, op); - } + int good_input_size = sizeof(good_input) / sizeof(char *); + int i = 0; + for (i = 0; i < good_input_size; i++) { + int op = validate_container_id(good_input[i]); + ASSERT_EQ(1, op); + } - int bad_input_size = sizeof(bad_input) / sizeof(char *); - int j = 0; - for (j = 0; j < bad_input_size; j++) { - int op = validate_container_id(bad_input[j]); - ASSERT_EQ(0, op); - } - } + int bad_input_size = sizeof(bad_input) / sizeof(char *); + int j = 0; + for (j = 0; j < bad_input_size; j++) { + int op = validate_container_id(bad_input[j]); + ASSERT_EQ(0, op); + } + } + + TEST_F(TestStringUtils, test_to_hexstring) { + const char* input = "hello"; + char* digest = NULL; + unsigned char raw_digest[EVP_MAX_MD_SIZE]; + unsigned int raw_digest_len = 0; + int rc = 0; + + EVP_MD_CTX* mdctx = EVP_MD_CTX_create(); + ASSERT_NE(nullptr, mdctx) << "Unable to create EVP MD context\n"; + + rc = EVP_DigestInit_ex(mdctx, EVP_sha256(), NULL); + ASSERT_EQ(1, rc) << "Unable to initialize SHA256 digester\n"; + + rc = EVP_DigestFinal_ex(mdctx, raw_digest, &raw_digest_len); + ASSERT_EQ(1, rc) << "Unable to compute digest\n"; + + rc = EVP_DigestUpdate(mdctx, input, strlen(input)); + ASSERT_EQ(1, rc) << "Unable to compute digest\n"; + + digest = to_hexstring(raw_digest, raw_digest_len); + + ASSERT_STREQ("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + digest) << "Digest is not equal to expected hash\n"; + + EVP_MD_CTX_destroy(mdctx); + free(digest); + } + + TEST_F(TestStringUtils, test_strbuf_on_stack) { + const int sb_incr = 16; + strbuf sb; + bool rc; + + rc = strbuf_init(&sb, sb_incr); + ASSERT_EQ(true, rc) << "Unable to init strbuf\n"; + + rc = strbuf_append_fmt(&sb, sb_incr, "%s%s%s", "hello", "foo", "bar"); + ASSERT_EQ(true, rc) << "Unable to append format to strbuf\n"; + + ASSERT_STREQ("hellofoobar", sb.buffer); + + rc = strbuf_append_fmt(&sb, sb_incr, "%s%s%s", "some longer strings", + " that will cause the strbuf", " to have to realloc"); + ASSERT_EQ(true, rc) << "Unable to append format to strbuf\n"; + + ASSERT_STREQ("hellofoobarsome longer strings that will cause the strbuf to have to realloc", sb.buffer); + + strbuf_destroy(&sb); + } + + TEST_F(TestStringUtils, test_strbuf_in_heap) { + const int sb_incr = 16; + strbuf *sb = NULL; + bool rc; + + sb = strbuf_alloc(sb_incr); + ASSERT_NE(nullptr, sb) << "Unable to init strbuf\n"; + + rc = strbuf_append_fmt(sb, sb_incr, "%s%s%s", "hello", "foo", "bar"); + ASSERT_EQ(true, rc) << "Unable to append format to strbuf"; + + ASSERT_STREQ("hellofoobar", sb->buffer); + + rc = strbuf_append_fmt(sb, sb_incr, "%s%s%s", "some longer strings", + " that will cause the strbuf", " to have to realloc"); + ASSERT_EQ(true, rc) << "Unable to append format to strbuf\n"; + + ASSERT_STREQ("hellofoobarsome longer strings that will cause the strbuf to have to realloc", sb->buffer); + + strbuf_free(sb); + } + + TEST_F(TestStringUtils, test_strbuf_detach) { + const int sb_incr = 16; + strbuf sb; + char *buf; + bool rc; + + rc = strbuf_init(&sb, sb_incr); + ASSERT_EQ(true, rc) << "Unable to init strbuf\n"; + + rc = strbuf_append_fmt(&sb, sb_incr, "%s%s%s", "hello", "foo", "bar"); + ASSERT_EQ(true, rc) << "Unable to append format to strbuf\n"; + + ASSERT_STREQ("hellofoobar", sb.buffer); + + rc = strbuf_append_fmt(&sb, sb_incr, "%s%s%s", "some longer strings", + " that will cause the strbuf", " to have to realloc"); + ASSERT_EQ(true, rc) << "Unable to append format to strbuf\n"; + + ASSERT_STREQ("hellofoobarsome longer strings that will cause the strbuf to have to realloc", sb.buffer); + + buf = strbuf_detach_buffer(&sb); + ASSERT_NE(nullptr, buf) << "Unable to detach char buf from strbuf\n"; + + + rc = strbuf_append_fmt(&sb, sb_incr, "%s%s%s", "Buffer detached", + " so this should allocate", " a new buffer in strbuf"); + ASSERT_EQ(true, rc) << "Unable to append format to strbuf\n"; + + ASSERT_STREQ("Buffer detached so this should allocate a new buffer in strbuf", sb.buffer); + ASSERT_STREQ("hellofoobarsome longer strings that will cause the strbuf to have to realloc", buf); + + free(buf); + strbuf_destroy(&sb); + } + + TEST_F(TestStringUtils, test_strbuf_realloc) { + const int sb_incr = 5; + strbuf sb; + char buf[] = "1234567890"; + bool rc; + + int len = strlen(buf); + + rc = strbuf_init(&sb, sb_incr); + ASSERT_EQ(true, rc) << "Unable to init strbuf\n"; + ASSERT_NE(nullptr, sb.buffer) << "Unable to init strbuf buffer\n"; + ASSERT_EQ(5, sb.capacity) << "Unable to init strbuf capacity\n"; + ASSERT_EQ(0, sb.length) << "Unable to init strbuf length\n"; + + rc = strbuf_append_fmt(&sb, sb_incr, "%s", buf); + ASSERT_EQ(true, rc) << "Unable to append format to strbuf\n"; + ASSERT_NE(nullptr, sb.buffer) << "Unable to append strbuf buffer\n"; + ASSERT_EQ(len + sb_incr + 1, sb.capacity) << "Unable to update strbuf capacity\n"; + ASSERT_EQ(len, sb.length) << "Unable to update strbuf length\n"; + + rc = strbuf_realloc(&sb, 10); + ASSERT_EQ(false, rc) << "realloc to smaller capacity succeeded and has truncated existing string\n"; + + strbuf_destroy(&sb); + } } // namespace ContainerExecutor diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_runc_util.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_runc_util.cc new file mode 100644 index 0000000000..f2fdd9e74d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_runc_util.cc @@ -0,0 +1,752 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "errno.h" +#include "configuration.h" +#include +#include + +extern "C" { + #include "container-executor.h" + #include "runc/runc_launch_cmd.h" + #include "runc/runc_write_config.h" + #include "utils/mount-utils.h" +} + +#define STARTING_JSON_BUFFER_SIZE (128*1024) + +namespace ContainerExecutor { + + class TestRunc : public ::testing::Test { + protected: + virtual void SetUp() { + container_executor_cfg_file = "container-executor.cfg"; + std::string container_executor_cfg_contents = "[runc]\n " + "runc.allowed.rw-mounts=/opt,/var,/usr/bin/cut,/usr/bin/awk\n " + "runc.allowed.ro-mounts=/etc/passwd"; + + int ret = setup_container_executor_cfg(container_executor_cfg_contents); + ASSERT_EQ(ret, 0) << "Container executor cfg setup failed\n"; + } + + virtual void TearDown() { + remove(runc_config_file); + remove(container_executor_cfg_file.c_str()); + delete_ce_file(); + free_executor_configurations(); + } + + const char *runc_config_file = "runc-config.json"; + std::string container_executor_cfg_file; + + + void write_file(const std::string fname, const std::string contents) { + std::ofstream config_file; + config_file.open(fname.c_str()); + config_file << contents; + config_file.close(); + } + + int create_ce_file() { + int ret = 0; + const char *fname = HADOOP_CONF_DIR "/" CONF_FILENAME; + struct stat buffer; + + if (stat(fname, &buffer) == 0) { + return ret; + } + + if (strcmp("../etc/hadoop/container-executor.cfg", fname) == 0) { + ret = mkdir("../etc", 0755); + if (ret == 0 || errno == EEXIST) { + ret = mkdir("../etc/hadoop", 0755); + if (ret == 0 || errno == EEXIST) { + write_file("../etc/hadoop/container-executor.cfg", ""); + } else { + std::cerr << "Could not create ../etc/hadoop, " << strerror(errno) << std::endl; + } + } else { + std::cerr << "Could not create ../etc, " << strerror(errno) << std::endl; + } + } else { + // Don't want to create directories all over. Make a simple attempt to + // write the file. + write_file(fname, ""); + } + + if (stat(fname, &buffer) != 0) { + std::cerr << "Could not create " << fname << strerror(errno) << std::endl; + ret = 1; + } + + return ret; + } + + void delete_ce_file() { + const char *fname = HADOOP_CONF_DIR "/" CONF_FILENAME; + if (strcmp("../etc/hadoop/container-executor.cfg", fname) == 0) { + struct stat buffer; + if (stat(fname, &buffer) == 0) { + remove("../etc/hadoop/container-executor.cfg"); + rmdir("../etc/hadoop"); + rmdir("../etc"); + } + } + } + + void write_container_executor_cfg(const std::string contents) { + write_file(container_executor_cfg_file, contents); + } + + void write_config_file(const std::string contents) { + write_file(runc_config_file, contents); + } + + int setup_container_executor_cfg(const std::string container_executor_cfg_contents) { + write_container_executor_cfg(container_executor_cfg_contents); + read_executor_config(container_executor_cfg_file.c_str()); + + return create_ce_file(); + } + + static cJSON* build_layer_array(const rlc_layer_spec *layers_array, int num_layers) { + cJSON* layer_json_array = cJSON_CreateArray(); + if (layer_json_array == NULL) { return NULL; } + + int i; + + for (i = 0; i < num_layers; i++) { + const rlc_layer_spec *layer = &layers_array[i]; + cJSON* layer_json = cJSON_CreateObject(); + if (layer_json == NULL) { goto fail; } + cJSON_AddStringToObject(layer_json, "mediaType", layer->media_type); + cJSON_AddStringToObject(layer_json, "path", layer->path); + cJSON_AddItemToArray(layer_json_array, layer_json); + } + return layer_json_array; + +fail: + cJSON_Delete(layer_json_array); + return NULL; + } + + static cJSON* build_string_array(char const* const* dirs_array) { + int i; + cJSON* dirs_json_array = cJSON_CreateArray(); + if (dirs_json_array == NULL) { return NULL; } + + for (i = 0; dirs_array[i] != NULL; i++) { + const char *dir = dirs_array[i]; + cJSON* layer_json = cJSON_CreateString(dir); + if (layer_json == NULL) { goto fail; } + cJSON_AddItemToArray(dirs_json_array, layer_json); + } + + return dirs_json_array; + +fail: + cJSON_Delete(dirs_json_array); + return NULL; + } + + static bool build_process_struct(runc_config_process *const process, char const* const* args, char const* const cwd, + char const* const* env) { + cJSON *args_array = build_string_array(args); + if (args_array == NULL) { return false; } + cJSON *cwd_string = cJSON_CreateString(cwd); + if (cwd_string == NULL) { return false; } + cJSON *env_array = build_string_array(env); + if (env_array == NULL) { return false; } + + process->args = args_array; + process->cwd = cwd_string; + process->env = env_array; + + return true; + } + + static cJSON* build_process_json(runc_config_process const* const process) { + cJSON* process_json = cJSON_CreateObject(); + if (process_json == NULL) { return NULL; } + + cJSON_AddItemReferenceToObject(process_json, "args", process->args); + cJSON_AddItemReferenceToObject(process_json, "cwd", process->cwd); + cJSON_AddItemReferenceToObject(process_json, "env", process->env); + + return process_json; + } + + static cJSON* build_config_json(runc_config const* const config) { + cJSON *config_json = cJSON_CreateObject(); + if (config_json == NULL) { return NULL; } + + runc_config_process const *const process = &config->process; + cJSON *process_json = build_process_json(process); + if (process_json == NULL) { goto fail; } + + + cJSON_AddItemToObject(config_json, "process", process_json); + cJSON_AddItemReferenceToObject(config_json, "linux", config->linux_config); + cJSON_AddItemReferenceToObject(config_json, "mounts", config->mounts); + + return config_json; + +fail: + cJSON_Delete(config_json); + return NULL; + } + + static cJSON* build_runc_config_json(runc_launch_cmd const* const rlc) { + cJSON *local_dir_json; + cJSON *log_dir_json; + cJSON *layer_dir_json; + cJSON *runc_config_json = cJSON_CreateObject(); + if (runc_config_json == NULL) { return NULL; } + + runc_config const* const config = &rlc->config; + + cJSON* config_json = build_config_json(config); + if (config_json == NULL) { goto fail; } + + cJSON_AddItemToObject(runc_config_json, "ociRuntimeConfig", config_json); + cJSON_AddStringToObject(runc_config_json, "runAsUser", rlc->run_as_user); + cJSON_AddStringToObject(runc_config_json, "username", rlc->username); + cJSON_AddStringToObject(runc_config_json, "applicationId", rlc->app_id); + cJSON_AddStringToObject(runc_config_json, "containerId", rlc->container_id); + cJSON_AddStringToObject(runc_config_json, "pidFile", rlc->pid_file); + cJSON_AddStringToObject(runc_config_json, "containerScriptPath", rlc->script_path); + cJSON_AddStringToObject(runc_config_json, "containerCredentialsPath", rlc->cred_path); + cJSON_AddNumberToObject(runc_config_json, "https", rlc->https); + + local_dir_json = build_string_array(rlc->local_dirs); + log_dir_json = build_string_array(rlc->log_dirs); + layer_dir_json = build_layer_array(rlc->layers, rlc->num_layers); + + cJSON_AddItemToObject(runc_config_json, "layers", layer_dir_json); + cJSON_AddItemToObject(runc_config_json, "localDirs", local_dir_json); + cJSON_AddItemToObject(runc_config_json, "logDirs", log_dir_json); + + cJSON_AddNumberToObject(runc_config_json, "reapLayerKeepCount", rlc->num_reap_layers_keep); + return runc_config_json; + +fail: + cJSON_Delete(runc_config_json); + return NULL; + } + + static cJSON* build_mount_json(const char *src, const char *dest, const char *input_options[]) { + cJSON* mount_json = cJSON_CreateObject(); + if (mount_json == NULL) { return NULL; } + + cJSON_AddStringToObject(mount_json, "source", src); + cJSON_AddStringToObject(mount_json, "destination", dest); + cJSON_AddStringToObject(mount_json, "type", "bind"); + + cJSON* options_array = build_string_array(input_options); + cJSON_AddItemToObject(mount_json, "options", options_array); + + return mount_json; + } + + + static cJSON* build_mounts_json() { + cJSON* mount_json = NULL; + + cJSON* mounts_json_array = cJSON_CreateArray(); + if (mounts_json_array == NULL) { + return NULL; + } + + const char *options_rw[10] = {"rw", "rprivate", "rbind"}; + const char *options_ro[10] = {"ro", "rprivate", "rbind"}; + + mount_json = build_mount_json("/opt", "/opt", options_rw); + if (mount_json == NULL) { goto fail; } + cJSON_AddItemToArray(mounts_json_array, mount_json); + + mount_json = build_mount_json("/var/", "/var/", options_rw); + if (mount_json == NULL) { goto fail; } + cJSON_AddItemToArray(mounts_json_array, mount_json); + + mount_json = build_mount_json("/etc/passwd", "/etc/passwd", options_ro); + if (mount_json == NULL) { goto fail; } + cJSON_AddItemToArray(mounts_json_array, mount_json); + + return mounts_json_array; + +fail: + cJSON_Delete(mounts_json_array); + return NULL; + + } + + static runc_launch_cmd* build_default_runc_launch_cmd() { + char* run_as_user = strdup(getpwuid(getuid())->pw_name); + char* username = strdup(getpwuid(getuid())->pw_name); + char const* const application_id = "application_1571614753172_3182915"; + char const* const container_id = "container_e14_1571614753172_3182915_01_000001"; + char const* const pid_file = "/tmp/container_e14_1571614753172_3182915_01_000001.pid"; + char const* const container_script_path = "/tmp/launch_container.sh"; + char const* const container_credentials_path = "/tmp/container_e14_1571614753172_3182915_01_000001.tokens"; + char const* const log_dirs[] = {"/log1", "/log2", NULL}; + char const* const local_dirs[] = {"/local1", "/local2", NULL}; + char const* const layer_media_type = "application/vnd.squashfs"; + char const* const args[] = {"bash", "/tmp/launch_container.sh", NULL}; + char const* const cwd = "/tmp"; + char const* const env[] = {"HADOOP_PREFIX=/tmp", "PATH=/tmp", NULL}; + char const* const hostname = "hostname"; + int num_reap_layers_keep = 10; + unsigned int num_layers = 2; + int https = 1; + int i; + + runc_launch_cmd *rlc_input = NULL; + rlc_layer_spec *layers_input = NULL; + cJSON *hostname_json = NULL; + cJSON *linux_config_json = NULL; + cJSON *mounts_json = NULL; + + rlc_input = (runc_launch_cmd*) calloc(1, sizeof(*rlc_input)); + if (rlc_input == NULL) { return NULL; } + + runc_config *config = &rlc_input->config; + + rlc_input->run_as_user = run_as_user; + rlc_input->username = username; + rlc_input->app_id = strdup(application_id); + rlc_input->container_id = strdup(container_id); + rlc_input->pid_file = strdup(pid_file); + rlc_input->script_path = strdup(container_script_path); + rlc_input->cred_path = strdup(container_credentials_path); + rlc_input->https = https; + + rlc_input->local_dirs = (char **) calloc(sizeof(local_dirs)/sizeof(local_dirs[0]) + 1, sizeof(*local_dirs)); + for (i = 0; local_dirs[i] != NULL; i++) { + rlc_input->local_dirs[i] = strdup(local_dirs[i]); + } + rlc_input->local_dirs[i] = NULL; + + rlc_input->log_dirs = (char **) calloc(sizeof(log_dirs)/sizeof(log_dirs[0]) + 1, sizeof(*log_dirs)); + for (i = 0; log_dirs[i] != NULL; i++) { + rlc_input->log_dirs[i] = strdup(log_dirs[i]); + } + rlc_input->log_dirs[i] = NULL; + + rlc_input->num_reap_layers_keep = num_reap_layers_keep; + rlc_input->num_layers = num_layers; + + layers_input = (rlc_layer_spec*) calloc(num_layers, sizeof(*layers_input)); + (&layers_input[0])->media_type = strdup(layer_media_type); + (&layers_input[0])->path = strdup("/foo"); + (&layers_input[1])->media_type = strdup(layer_media_type); + (&layers_input[1])->path = strdup("/bar"); + + rlc_input->layers = layers_input; + + build_process_struct(&config->process, args, cwd, env); + + hostname_json = cJSON_CreateString(hostname); + if (hostname_json == NULL) { goto fail; } + config->hostname = hostname_json; + + linux_config_json = cJSON_CreateObject(); + if (linux_config_json == NULL) { goto fail; } + config->linux_config = linux_config_json; + + mounts_json = build_mounts_json(); + if (mounts_json == NULL) { goto fail; } + config->mounts = mounts_json; + + return rlc_input; +fail: + free_runc_launch_cmd(rlc_input); + return NULL; + } + + + static void test_runc_launch_cmd(runc_launch_cmd const* const rlc_input, runc_launch_cmd const* const rlc_parsed) { + unsigned int i; + + ASSERT_STREQ(rlc_parsed->run_as_user, rlc_input->run_as_user); + ASSERT_STREQ(rlc_parsed->username, rlc_input->username); + ASSERT_STREQ(rlc_parsed->app_id, rlc_input->app_id); + ASSERT_STREQ(rlc_parsed->container_id, rlc_input->container_id); + ASSERT_STREQ(rlc_parsed->pid_file, rlc_input->pid_file); + ASSERT_STREQ(rlc_parsed->script_path, rlc_input->script_path); + ASSERT_STREQ(rlc_parsed->cred_path, rlc_input->cred_path); + ASSERT_EQ(rlc_parsed->https, rlc_input->https); + + for (i = 0; rlc_input->local_dirs[i] != NULL; i++) { + ASSERT_NE(rlc_input->local_dirs[i], nullptr); + ASSERT_NE(rlc_parsed->local_dirs[i], nullptr); + ASSERT_STREQ(rlc_parsed->local_dirs[i], rlc_input->local_dirs[i]); + } + + for (i = 0; rlc_input->log_dirs[i] != NULL; i++) { + ASSERT_NE(rlc_input->log_dirs[i], nullptr); + ASSERT_NE(rlc_parsed->log_dirs[i], nullptr); + ASSERT_STREQ(rlc_parsed->log_dirs[i], rlc_input->log_dirs[i]); + } + + for (i = 0; i < rlc_input->num_layers; i++) { + rlc_layer_spec *layer_input = &rlc_input->layers[i]; + rlc_layer_spec *layer_parsed = &rlc_parsed->layers[i]; + ASSERT_NE(layer_input, nullptr); + ASSERT_NE(layer_parsed, nullptr); + ASSERT_STREQ(layer_parsed->media_type, layer_input->media_type); + ASSERT_STREQ(layer_parsed->path, layer_input->path); + } + + ASSERT_EQ(rlc_parsed->num_layers, rlc_input->num_layers); + ASSERT_EQ(rlc_parsed->num_reap_layers_keep, rlc_input->num_reap_layers_keep); + } + }; + + static mount* build_mounts(std::string mounts_string, + unsigned int num_mounts) { + + mount* mounts = (mount*) calloc(num_mounts, sizeof(*mounts)); + std::istringstream comma_iss(mounts_string); + std::string comma_token; + int i = 0; + while (std::getline(comma_iss, comma_token, ',')) + { + mount_options *options = (mount_options*) calloc(1, sizeof(*options)); + mounts[i].options = options; + std::istringstream colon_iss(comma_token); + std::string colon_token; + + std::getline(colon_iss, colon_token, ':'); + mounts[i].src = strdup(colon_token.c_str()); + + std::getline(colon_iss, colon_token, ':'); + mounts[i].dest = strdup(colon_token.c_str()); + + std::getline(colon_iss, colon_token, ':'); + std::istringstream plus_iss(colon_token); + std::string plus_token; + + unsigned int num_opts = std::count(colon_token.begin(), colon_token.end(), '+') + 1; + int j = 0; + char **opts = (char**) calloc(num_opts + 1, sizeof(*opts)); + mounts[i].options->opts = opts; + while(std::getline(plus_iss, plus_token, '+')) { + char *mount_option = strdup(plus_token.c_str()); + if (strcmp("rw", mount_option) == 0) { + options->rw = 1; + } else if (strcmp("ro", mount_option) == 0) { + options->rw = 0; + } + options->opts[j] = mount_option; + j++; + } + options->opts[j] = NULL; + options->num_opts = num_opts; + i++; + } + return mounts; + } + + TEST_F(TestRunc, test_parse_runc_launch_cmd_valid) { + runc_launch_cmd *rlc_input = NULL; + runc_launch_cmd *rlc_parsed = NULL; + cJSON *runc_config_json = NULL; + char* json_data = NULL; + int ret = 0; + + rlc_input = build_default_runc_launch_cmd(); + ASSERT_NE(rlc_input, nullptr); + + runc_config_json = build_runc_config_json(rlc_input); + ASSERT_NE(runc_config_json, nullptr); + + json_data = cJSON_PrintBuffered(runc_config_json, STARTING_JSON_BUFFER_SIZE, false); + write_config_file(json_data); + + rlc_parsed = parse_runc_launch_cmd(runc_config_file); + ASSERT_NE(rlc_parsed, nullptr); + + ret = is_valid_runc_launch_cmd(rlc_parsed); + ASSERT_NE(ret, false); + + test_runc_launch_cmd(rlc_input, rlc_parsed); + + cJSON_Delete(runc_config_json); + free_runc_launch_cmd(rlc_input); + free(json_data); + free_runc_launch_cmd(rlc_parsed); + } + + TEST_F(TestRunc, test_parse_runc_launch_cmd_bad_container_id) { + runc_launch_cmd *rlc_input = NULL; + runc_launch_cmd *rlc_parsed = NULL; + cJSON *runc_config_json = NULL; + char* json_data = NULL; + int ret = 0; + + rlc_input = build_default_runc_launch_cmd(); + ASSERT_NE(rlc_input, nullptr); + + free(rlc_input->container_id); + rlc_input->container_id = strdup("foobar"); + + runc_config_json = build_runc_config_json(rlc_input); + ASSERT_NE(runc_config_json, nullptr); + + json_data = cJSON_PrintBuffered(runc_config_json, STARTING_JSON_BUFFER_SIZE, false); + write_config_file(json_data); + + rlc_parsed = parse_runc_launch_cmd(runc_config_file); + ASSERT_NE(rlc_parsed, nullptr); + + ret = is_valid_runc_launch_cmd(rlc_parsed); + // An invalid container_id should cause an error and the parse function should return null + ASSERT_EQ(ret, false); + + cJSON_Delete(runc_config_json); + free_runc_launch_cmd(rlc_input); + free(json_data); + free_runc_launch_cmd(rlc_parsed); + } + + TEST_F(TestRunc, test_parse_runc_launch_cmd_existing_pidfile) { + runc_launch_cmd *rlc_input = NULL; + runc_launch_cmd *rlc_parsed = NULL; + cJSON *runc_config_json = NULL; + char* json_data = NULL; + int ret = 0; + + rlc_input = build_default_runc_launch_cmd(); + ASSERT_NE(rlc_input, nullptr); + + free(rlc_input->pid_file); + const char* pid_file = "/tmp/foo"; + rlc_input->pid_file = strdup(pid_file); + write_file(pid_file, ""); + + runc_config_json = build_runc_config_json(rlc_input); + ASSERT_NE(runc_config_json, nullptr); + + json_data = cJSON_PrintBuffered(runc_config_json, STARTING_JSON_BUFFER_SIZE, false); + write_config_file(json_data); + + rlc_parsed = parse_runc_launch_cmd(runc_config_file); + ASSERT_NE(rlc_parsed, nullptr); + + ret = is_valid_runc_launch_cmd(rlc_parsed); + // A pid file that already exists should cause an error and the parse function should return null + ASSERT_EQ(ret, false); + + remove(pid_file); + cJSON_Delete(runc_config_json); + free_runc_launch_cmd(rlc_input); + free(json_data); + free_runc_launch_cmd(rlc_parsed); + } + + TEST_F(TestRunc, test_parse_runc_launch_cmd_invalid_media_type) { + runc_launch_cmd *rlc_input = NULL; + runc_launch_cmd *rlc_parsed = NULL; + cJSON *runc_config_json = NULL; + char* json_data = NULL; + int ret = 0; + + rlc_input = build_default_runc_launch_cmd(); + ASSERT_NE(rlc_input, nullptr); + + free(rlc_input->layers[0].media_type); + rlc_input->layers[0].media_type = strdup("bad media type"); + + runc_config_json = build_runc_config_json(rlc_input); + ASSERT_NE(runc_config_json, nullptr); + + json_data = cJSON_PrintBuffered(runc_config_json, STARTING_JSON_BUFFER_SIZE, false); + write_config_file(json_data); + + rlc_parsed = parse_runc_launch_cmd(runc_config_file); + ASSERT_NE(rlc_parsed, nullptr); + + ret = is_valid_runc_launch_cmd(rlc_parsed); + // A bad layer media type should cause an error and the parse function should return null + ASSERT_EQ(ret, false); + + cJSON_Delete(runc_config_json); + free_runc_launch_cmd(rlc_input); + free(json_data); + free_runc_launch_cmd(rlc_parsed); + } + + TEST_F(TestRunc, test_parse_runc_launch_cmd_invalid_num_reap_layers_keep) { + runc_launch_cmd *rlc_input = NULL; + runc_launch_cmd *rlc_parsed = NULL; + cJSON *runc_config_json = NULL; + char* json_data = NULL; + int ret = 0; + + rlc_input = build_default_runc_launch_cmd(); + ASSERT_NE(rlc_input, nullptr); + + rlc_input->num_reap_layers_keep = -1; + + runc_config_json = build_runc_config_json(rlc_input); + ASSERT_NE(runc_config_json, nullptr); + + json_data = cJSON_PrintBuffered(runc_config_json, STARTING_JSON_BUFFER_SIZE, false); + write_config_file(json_data); + + rlc_parsed = parse_runc_launch_cmd(runc_config_file); + ASSERT_NE(rlc_parsed, nullptr); + + ret = is_valid_runc_launch_cmd(rlc_parsed); + // A negative num_reap_layers_keep value should cause an error and the parse function should return null + ASSERT_EQ(ret, false); + + cJSON_Delete(runc_config_json); + free_runc_launch_cmd(rlc_input); + free(json_data); + free_runc_launch_cmd(rlc_parsed); + } + + TEST_F(TestRunc, test_parse_runc_launch_cmd_valid_mounts) { + runc_launch_cmd *rlc_input = NULL; + runc_launch_cmd *rlc_parsed = NULL; + cJSON *runc_config_json = NULL; + char* json_data = NULL; + int ret = 0; + + std::vector mounts_string_vec; + + mounts_string_vec.push_back(std::string("/var:/var:rw+rbind+rprivate")); + mounts_string_vec.push_back(std::string("/var:/var:rw+rbind+rprivate")); + mounts_string_vec.push_back(std::string("/var/:/var/:rw+rbind+rprivate")); + mounts_string_vec.push_back(std::string("/usr/bin/cut:/usr/bin/cut:rw+rbind+rprivate")); + mounts_string_vec.push_back(std::string( + "/usr/bin/awk:/awk:rw+shared+rbind+rprivate,/etc/passwd:/etc/passwd:ro+rbind+rprivate")); + mounts_string_vec.push_back(std::string( + "/var:/var:ro+rprivate+rbind,/etc/passwd:/etc/passwd:ro+rshared+rbind+rprivate")); + + mount *mounts = NULL; + std::vector::const_iterator itr; + + for (itr = mounts_string_vec.begin(); itr != mounts_string_vec.end(); ++itr) { + rlc_input = build_default_runc_launch_cmd(); + ASSERT_NE(rlc_input, nullptr); + + cJSON* mounts_json_array = cJSON_CreateArray(); + ASSERT_NE(mounts_json_array, nullptr); + + unsigned int num_mounts = std::count(itr->begin(), itr->end(), ',') + 1; + mounts = build_mounts(*itr, num_mounts); + for (unsigned int i = 0; i < num_mounts; i++) { + cJSON *mount_json = build_mount_json(mounts[i].src, + mounts[i].dest, + (const char**) mounts[i].options->opts); + ASSERT_NE(mount_json, nullptr); + cJSON_AddItemToArray(mounts_json_array, mount_json); + } + + cJSON_Delete(rlc_input->config.mounts); + rlc_input->config.mounts = mounts_json_array; + + runc_config_json = build_runc_config_json(rlc_input); + ASSERT_NE(runc_config_json, nullptr); + + json_data = cJSON_PrintBuffered(runc_config_json, STARTING_JSON_BUFFER_SIZE, false); + write_config_file(json_data); + + rlc_parsed = parse_runc_launch_cmd(runc_config_file); + ASSERT_NE(rlc_parsed, nullptr); + + ret = is_valid_runc_launch_cmd(rlc_parsed); + ASSERT_NE(ret, false); + + test_runc_launch_cmd(rlc_input, rlc_parsed ); + + cJSON_Delete(runc_config_json); + free_mounts(mounts, num_mounts); + free_runc_launch_cmd(rlc_input); + free(json_data); + free_runc_launch_cmd(rlc_parsed); + } + } + + TEST_F(TestRunc, test_parse_runc_launch_cmd_invalid_mounts) { + runc_launch_cmd *rlc_input = NULL; + runc_launch_cmd *rlc_parsed = NULL; + cJSON *runc_config_json = NULL; + char* json_data = NULL; + int ret = 0; + + std::vector mounts_string_vec; + + mounts_string_vec.push_back(std::string("/lib:/lib:rw+rbind+rprivate")); + mounts_string_vec.push_back(std::string("/lib:/lib:rw+rbind+rprivate")); + mounts_string_vec.push_back(std::string("/usr/bin/:/usr/bin:rw+rbind+rprivate")); + mounts_string_vec.push_back(std::string("/blah:/blah:rw+rbind+rprivate")); + mounts_string_vec.push_back(std::string("/tmp:/tmp:shared")); + mounts_string_vec.push_back(std::string("/lib:/lib")); + mounts_string_vec.push_back(std::string("/lib:/lib:other")); + + mount *mounts = NULL; + std::vector::const_iterator itr; + + for (itr = mounts_string_vec.begin(); itr != mounts_string_vec.end(); ++itr) { + rlc_input = build_default_runc_launch_cmd(); + ASSERT_NE(rlc_input, nullptr); + + cJSON* mounts_json_array = cJSON_CreateArray(); + ASSERT_NE(mounts_json_array, nullptr); + + unsigned int num_mounts = std::count(itr->begin(), itr->end(), ',') + 1; + mounts = build_mounts(*itr, num_mounts); + for (unsigned int i = 0; i < num_mounts; i++) { + cJSON *mount_json = build_mount_json(mounts[i].src, + mounts[i].dest, + (const char**) mounts[i].options->opts); + ASSERT_NE(mount_json, nullptr); + cJSON_AddItemToArray(mounts_json_array, mount_json); + } + + cJSON_Delete(rlc_input->config.mounts); + rlc_input->config.mounts = mounts_json_array; + + runc_config_json = build_runc_config_json(rlc_input); + ASSERT_NE(runc_config_json, nullptr); + + json_data = cJSON_PrintBuffered(runc_config_json, STARTING_JSON_BUFFER_SIZE, false); + write_config_file(json_data); + + rlc_parsed = parse_runc_launch_cmd(runc_config_file); + ASSERT_NE(rlc_parsed, nullptr); + + ret = is_valid_runc_launch_cmd(rlc_parsed); + // A invalid mount should cause an error and the parse function should return null + ASSERT_EQ(ret, false); + + cJSON_Delete(runc_config_json); + free_mounts(mounts, num_mounts); + free_runc_launch_cmd(rlc_input); + free(json_data); + free_runc_launch_cmd(rlc_parsed); + } + } +} +