HDFS-15944. Prevent truncation by snprintf (#2853)

This commit is contained in:
Gautham B A 2021-04-02 22:07:33 +05:30 committed by GitHub
parent bc7689abf5
commit 70536ba1f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 32 additions and 7 deletions

View File

@ -184,7 +184,11 @@ static int testOpenTrunc(const char *base)
const char * const SAMPLE2 = "this is the second file that we wrote. "
"It's #2!";
snprintf(path, sizeof(path), "%s/trunc.txt", base);
int szToWrite = snprintf(NULL, 0, "%s/trunc.txt", base);
EXPECT_INT_LT(szToWrite, PATH_MAX);
int szWritten = snprintf(path, sizeof(path), "%s/trunc.txt", base);
EXPECT_NONNEGATIVE(szWritten);
fd = open(path, O_CREAT | O_TRUNC | O_WRONLY, 0644);
if (fd < 0) {
err = errno;
@ -252,7 +256,10 @@ int runFuseWorkloadImpl(const char *root, const char *pcomp,
EXPECT_NONZERO(S_ISDIR(stBuf.st_mode));
// mkdir <base>/a
snprintf(tmp, sizeof(tmp), "%s/a", base);
int szToWrite = snprintf(NULL, 0, "%s/a", base);
EXPECT_INT_LT(szToWrite, PATH_MAX);
int szWritten = snprintf(tmp, sizeof(tmp), "%s/a", base);
EXPECT_NONNEGATIVE(szWritten);
RETRY_ON_EINTR_GET_ERRNO(ret, mkdir(tmp, 0755));
EXPECT_ZERO(ret);
@ -260,7 +267,10 @@ int runFuseWorkloadImpl(const char *root, const char *pcomp,
EXPECT_INT_EQ(1, testReadDir(base, expectDirs, DIRS_A_AND_B));
// mkdir <base>/b
snprintf(tmp, sizeof(tmp), "%s/b", base);
szToWrite = snprintf(NULL, 0, "%s/b", base);
EXPECT_INT_LT(szToWrite, PATH_MAX);
szWritten = snprintf(tmp, sizeof(tmp), "%s/b", base);
EXPECT_NONNEGATIVE(szWritten);
RETRY_ON_EINTR_GET_ERRNO(ret, mkdir(tmp, 0755));
EXPECT_ZERO(ret);
@ -268,8 +278,16 @@ int runFuseWorkloadImpl(const char *root, const char *pcomp,
EXPECT_INT_EQ(2, testReadDir(base, expectDirs, DIRS_A_AND_B));
// rename a -> c
snprintf(src, sizeof(src), "%s/a", base);
snprintf(dst, sizeof(dst), "%s/c", base);
szToWrite = snprintf(NULL, 0, "%s/a", base);
EXPECT_INT_LT(szToWrite, PATH_MAX);
szWritten = snprintf(src, sizeof(src), "%s/a", base);
EXPECT_NONNEGATIVE(szWritten);
szToWrite = snprintf(NULL, 0, "%s/c", base);
EXPECT_INT_LT(szToWrite, PATH_MAX);
szWritten = snprintf(dst, sizeof(dst), "%s/c", base);
EXPECT_NONNEGATIVE(szWritten);
EXPECT_ZERO(rename(src, dst));
// readdir c and b
@ -294,7 +312,11 @@ int runFuseWorkloadImpl(const char *root, const char *pcomp,
// open some files and write to them
for (i = 0; i < NUM_FILE_CTX; i++) {
snprintf(tmp, sizeof(tmp), "%s/b/%d", base, i);
szToWrite = snprintf(NULL, 0, "%s/b/%d", base, i);
EXPECT_INT_LT(szToWrite, PATH_MAX);
szWritten = snprintf(tmp, sizeof(tmp), "%s/b/%d", base, i);
EXPECT_NONNEGATIVE(szWritten);
ctx[i].path = strdup(tmp);
if (!ctx[i].path) {
fprintf(stderr, "FUSE_WORKLOAD: OOM on line %d\n", __LINE__);

View File

@ -223,7 +223,10 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
int nFile;
for (nFile = 0; nFile < 10000; nFile++) {
char filename[PATH_MAX];
snprintf(filename, PATH_MAX, "%s/many_files_%d", listDirTest, nFile);
int szToWrite = snprintf(NULL, 0, "%s/many_files_%d", listDirTest, nFile);
EXPECT_INT_LT(szToWrite, PATH_MAX);
int szWritten = snprintf(filename, PATH_MAX, "%s/many_files_%d", listDirTest, nFile);
EXPECT_NONNEGATIVE(szWritten);
file = hdfsOpenFile(fs, filename, O_WRONLY, 0, 0, 0);
EXPECT_NONNULL(file);
EXPECT_ZERO(hdfsCloseFile(fs, file));