HDFS-6912. SharedFileDescriptorFactory should not allocate sparse files (cmccabe)

This commit is contained in:
Colin Patrick Mccabe 2014-09-15 14:47:27 -07:00
parent 2f847b3357
commit 8008f0e819
2 changed files with 33 additions and 2 deletions

View File

@ -708,6 +708,9 @@ Release 2.6.0 - UNRELEASED
HADOOP-11056. OsSecureRandom.setConf() might leak file descriptors (yzhang
via cmccabe)
HDFS-6912. SharedFileDescriptorFactory should not allocate sparse files
(cmccabe)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HADOOP-10734. Implement high-performance secure random number sources.

View File

@ -37,6 +37,8 @@
#include <sys/types.h>
#include <unistd.h>
#define ZERO_FULLY_BUF_SIZE 8192
static pthread_mutex_t g_rand_lock = PTHREAD_MUTEX_INITIALIZER;
JNIEXPORT void JNICALL
@ -83,6 +85,24 @@ done:
}
}
static int zero_fully(int fd, jint length)
{
char buf[ZERO_FULLY_BUF_SIZE];
int res;
memset(buf, 0, sizeof(buf));
while (length > 0) {
res = write(fd, buf,
(length > ZERO_FULLY_BUF_SIZE) ? ZERO_FULLY_BUF_SIZE : length);
if (res < 0) {
if (errno == EINTR) continue;
return errno;
}
length -= res;
}
return 0;
}
JNIEXPORT jobject JNICALL
Java_org_apache_hadoop_io_nativeio_SharedFileDescriptorFactory_createDescriptor0(
JNIEnv *env, jclass clazz, jstring jprefix, jstring jpath, jint length)
@ -136,12 +156,20 @@ Java_org_apache_hadoop_io_nativeio_SharedFileDescriptorFactory_createDescriptor0
(*env)->Throw(env, jthr);
goto done;
}
if (ftruncate(fd, length) < 0) {
jthr = newIOException(env, "ftruncate(%s, %d) failed: error %d (%s)",
ret = zero_fully(fd, length);
if (ret) {
jthr = newIOException(env, "zero_fully(%s, %d) failed: error %d (%s)",
path, length, ret, terror(ret));
(*env)->Throw(env, jthr);
goto done;
}
if (lseek(fd, 0, SEEK_SET) < 0) {
ret = errno;
jthr = newIOException(env, "lseek(%s, 0, SEEK_SET) failed: error %d (%s)",
path, ret, terror(ret));
(*env)->Throw(env, jthr);
goto done;
}
jret = fd_create(env, fd); // throws exception on error.
done: