HDFS-6984. Serialize FileStatus via protobuf.

This commit is contained in:
Chris Douglas 2017-08-02 12:12:48 -07:00
parent 1a1bf6b7d0
commit 12e44e7bda
41 changed files with 1053 additions and 365 deletions

View File

@ -323,6 +323,10 @@
<!-- protobuf generated code --> <!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.tracing\.TraceAdminPB.*"/> <Class name="~org\.apache\.hadoop\.tracing\.TraceAdminPB.*"/>
</Match> </Match>
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.fs\.FSProto.*"/>
</Match>
<!-- <!--
Manually checked, misses child thread manually syncing on parent's intrinsic lock. Manually checked, misses child thread manually syncing on parent's intrinsic lock.

View File

@ -393,6 +393,7 @@
<include>RefreshUserMappingsProtocol.proto</include> <include>RefreshUserMappingsProtocol.proto</include>
<include>RefreshCallQueueProtocol.proto</include> <include>RefreshCallQueueProtocol.proto</include>
<include>GenericRefreshProtocol.proto</include> <include>GenericRefreshProtocol.proto</include>
<include>FSProtos.proto</include>
</includes> </includes>
</source> </source>
</configuration> </configuration>

View File

@ -17,6 +17,8 @@
*/ */
package org.apache.hadoop.fs; package org.apache.hadoop.fs;
import java.io.Serializable;
import org.apache.commons.codec.binary.Hex; import org.apache.commons.codec.binary.Hex;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CipherSuite;
@ -30,7 +32,9 @@
* an encrypted file. * an encrypted file.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class FileEncryptionInfo { public class FileEncryptionInfo implements Serializable {
private static final long serialVersionUID = 0x156abe03;
private final CipherSuite cipherSuite; private final CipherSuite cipherSuite;
private final CryptoProtocolVersion version; private final CryptoProtocolVersion version;

View File

@ -23,11 +23,15 @@
import java.io.InvalidObjectException; import java.io.InvalidObjectException;
import java.io.ObjectInputValidation; import java.io.ObjectInputValidation;
import java.io.Serializable; import java.io.Serializable;
import java.util.Collections;
import java.util.EnumSet;
import java.util.Set;
import org.apache.hadoop.fs.FSProtos.FileStatusProto;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.Text; import org.apache.hadoop.fs.protocolPB.PBHelper;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
/** Interface that represents the client side information for a file. /** Interface that represents the client side information for a file.
@ -50,6 +54,30 @@ public class FileStatus implements Writable, Comparable<Object>,
private String owner; private String owner;
private String group; private String group;
private Path symlink; private Path symlink;
private Set<AttrFlags> attr;
private enum AttrFlags {
HAS_ACL,
HAS_CRYPT,
HAS_EC,
};
private static final Set<AttrFlags> NONE = Collections.<AttrFlags>emptySet();
private static Set<AttrFlags> flags(boolean acl, boolean crypt, boolean ec) {
if (!(acl || crypt || ec)) {
return NONE;
}
EnumSet<AttrFlags> ret = EnumSet.noneOf(AttrFlags.class);
if (acl) {
ret.add(AttrFlags.HAS_ACL);
}
if (crypt) {
ret.add(AttrFlags.HAS_CRYPT);
}
if (ec) {
ret.add(AttrFlags.HAS_EC);
}
return ret;
}
public FileStatus() { this(0, false, 0, 0, 0, 0, null, null, null, null); } public FileStatus() { this(0, false, 0, 0, 0, 0, null, null, null, null); }
@ -79,6 +107,15 @@ public FileStatus(long length, boolean isdir,
FsPermission permission, String owner, String group, FsPermission permission, String owner, String group,
Path symlink, Path symlink,
Path path) { Path path) {
this(length, isdir, block_replication, blocksize, modification_time,
access_time, permission, owner, group, symlink, path,
false, false, false);
}
public FileStatus(long length, boolean isdir, int block_replication,
long blocksize, long modification_time, long access_time,
FsPermission permission, String owner, String group, Path symlink,
Path path, boolean hasAcl, boolean isEncrypted, boolean isErasureCoded) {
this.length = length; this.length = length;
this.isdir = isdir; this.isdir = isdir;
this.block_replication = (short)block_replication; this.block_replication = (short)block_replication;
@ -89,7 +126,7 @@ public FileStatus(long length, boolean isdir,
this.permission = permission; this.permission = permission;
} else if (isdir) { } else if (isdir) {
this.permission = FsPermission.getDirDefault(); this.permission = FsPermission.getDirDefault();
} else if (symlink!=null) { } else if (symlink != null) {
this.permission = FsPermission.getDefault(); this.permission = FsPermission.getDefault();
} else { } else {
this.permission = FsPermission.getFileDefault(); this.permission = FsPermission.getFileDefault();
@ -98,6 +135,8 @@ public FileStatus(long length, boolean isdir,
this.group = (group == null) ? "" : group; this.group = (group == null) ? "" : group;
this.symlink = symlink; this.symlink = symlink;
this.path = path; this.path = path;
attr = flags(hasAcl, isEncrypted, isErasureCoded);
// The variables isdir and symlink indicate the type: // The variables isdir and symlink indicate the type:
// 1. isdir implies directory, in which case symlink must be null. // 1. isdir implies directory, in which case symlink must be null.
// 2. !isdir implies a file or symlink, symlink != null implies a // 2. !isdir implies a file or symlink, symlink != null implies a
@ -213,7 +252,7 @@ public FsPermission getPermission() {
* @return true if the underlying file or directory has ACLs set. * @return true if the underlying file or directory has ACLs set.
*/ */
public boolean hasAcl() { public boolean hasAcl() {
return permission.getAclBit(); return attr.contains(AttrFlags.HAS_ACL);
} }
/** /**
@ -222,7 +261,7 @@ public boolean hasAcl() {
* @return true if the underlying file is encrypted. * @return true if the underlying file is encrypted.
*/ */
public boolean isEncrypted() { public boolean isEncrypted() {
return permission.getEncryptedBit(); return attr.contains(AttrFlags.HAS_CRYPT);
} }
/** /**
@ -231,7 +270,7 @@ public boolean isEncrypted() {
* @return true if the underlying file or directory is erasure coded. * @return true if the underlying file or directory is erasure coded.
*/ */
public boolean isErasureCoded() { public boolean isErasureCoded() {
return permission.getErasureCodedBit(); return attr.contains(AttrFlags.HAS_EC);
} }
/** /**
@ -305,47 +344,6 @@ public void setSymlink(final Path p) {
symlink = p; symlink = p;
} }
//////////////////////////////////////////////////
// Writable
//////////////////////////////////////////////////
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, getPath().toString(), Text.DEFAULT_MAX_LEN);
out.writeLong(getLen());
out.writeBoolean(isDirectory());
out.writeShort(getReplication());
out.writeLong(getBlockSize());
out.writeLong(getModificationTime());
out.writeLong(getAccessTime());
getPermission().write(out);
Text.writeString(out, getOwner(), Text.DEFAULT_MAX_LEN);
Text.writeString(out, getGroup(), Text.DEFAULT_MAX_LEN);
out.writeBoolean(isSymlink());
if (isSymlink()) {
Text.writeString(out, getSymlink().toString(), Text.DEFAULT_MAX_LEN);
}
}
@Override
public void readFields(DataInput in) throws IOException {
String strPath = Text.readString(in, Text.DEFAULT_MAX_LEN);
this.path = new Path(strPath);
this.length = in.readLong();
this.isdir = in.readBoolean();
this.block_replication = in.readShort();
blocksize = in.readLong();
modification_time = in.readLong();
access_time = in.readLong();
permission.readFields(in);
owner = Text.readString(in, Text.DEFAULT_MAX_LEN);
group = Text.readString(in, Text.DEFAULT_MAX_LEN);
if (in.readBoolean()) {
this.symlink = new Path(Text.readString(in, Text.DEFAULT_MAX_LEN));
} else {
this.symlink = null;
}
}
/** /**
* Compare this FileStatus to another FileStatus * Compare this FileStatus to another FileStatus
* @param o the FileStatus to be compared. * @param o the FileStatus to be compared.
@ -377,15 +375,12 @@ public int compareTo(Object o) {
*/ */
@Override @Override
public boolean equals(Object o) { public boolean equals(Object o) {
if (o == null) { if (!(o instanceof FileStatus)) {
return false; return false;
} }
if (this == o) { if (this == o) {
return true; return true;
} }
if (!(o instanceof FileStatus)) {
return false;
}
FileStatus other = (FileStatus)o; FileStatus other = (FileStatus)o;
return this.getPath().equals(other.getPath()); return this.getPath().equals(other.getPath());
} }
@ -420,7 +415,11 @@ public String toString() {
sb.append("; permission=" + permission); sb.append("; permission=" + permission);
sb.append("; isSymlink=" + isSymlink()); sb.append("; isSymlink=" + isSymlink());
if(isSymlink()) { if(isSymlink()) {
sb.append("; symlink=" + symlink); try {
sb.append("; symlink=" + getSymlink());
} catch (IOException e) {
throw new RuntimeException("Unexpected exception", e);
}
} }
sb.append("; hasAcl=" + hasAcl()); sb.append("; hasAcl=" + hasAcl());
sb.append("; isEncrypted=" + isEncrypted()); sb.append("; isEncrypted=" + isEncrypted());
@ -429,6 +428,55 @@ public String toString() {
return sb.toString(); return sb.toString();
} }
/**
* Read instance encoded as protobuf from stream.
* @param in Input stream
* @see PBHelper#convert(FileStatus)
* @deprecated Use the {@link PBHelper} and protobuf serialization directly.
*/
@Override
@Deprecated
public void readFields(DataInput in) throws IOException {
int size = in.readInt();
if (size < 0) {
throw new IOException("Can't read FileStatusProto with negative " +
"size of " + size);
}
byte[] buf = new byte[size];
in.readFully(buf);
FileStatusProto proto = FileStatusProto.parseFrom(buf);
FileStatus other = PBHelper.convert(proto);
isdir = other.isDirectory();
length = other.getLen();
isdir = other.isDirectory();
block_replication = other.getReplication();
blocksize = other.getBlockSize();
modification_time = other.getModificationTime();
access_time = other.getAccessTime();
setPermission(other.getPermission());
setOwner(other.getOwner());
setGroup(other.getGroup());
setSymlink((other.isSymlink() ? other.getSymlink() : null));
setPath(other.getPath());
attr = flags(other.hasAcl(), other.isEncrypted(), other.isErasureCoded());
assert (isDirectory() && getSymlink() == null) || !isDirectory();
}
/**
* Write instance encoded as protobuf to stream.
* @param out Output stream
* @see PBHelper#convert(FileStatus)
* @deprecated Use the {@link PBHelper} and protobuf serialization directly.
*/
@Override
@Deprecated
public void write(DataOutput out) throws IOException {
FileStatusProto proto = PBHelper.convert(this);
int size = proto.getSerializedSize();
out.writeInt(size);
out.write(proto.toByteArray());
}
@Override @Override
public void validateObject() throws InvalidObjectException { public void validateObject() throws InvalidObjectException {
if (null == path) { if (null == path) {

View File

@ -30,6 +30,9 @@
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class LocatedFileStatus extends FileStatus { public class LocatedFileStatus extends FileStatus {
private static final long serialVersionUID = 0x17339920;
private BlockLocation[] locations; private BlockLocation[] locations;
@ -42,14 +45,18 @@ public LocatedFileStatus() {
* @param stat a file status * @param stat a file status
* @param locations a file's block locations * @param locations a file's block locations
*/ */
public LocatedFileStatus(FileStatus stat, BlockLocation[] locations) public LocatedFileStatus(FileStatus stat, BlockLocation[] locations) {
throws IOException {
this(stat.getLen(), stat.isDirectory(), stat.getReplication(), this(stat.getLen(), stat.isDirectory(), stat.getReplication(),
stat.getBlockSize(), stat.getModificationTime(), stat.getBlockSize(), stat.getModificationTime(),
stat.getAccessTime(), stat.getPermission(), stat.getOwner(), stat.getAccessTime(), stat.getPermission(),
stat.getGroup(), null, stat.getPath(), locations); stat.getOwner(), stat.getGroup(), null, stat.getPath(),
stat.hasAcl(), stat.isEncrypted(), stat.isErasureCoded(), locations);
if (stat.isSymlink()) { if (stat.isSymlink()) {
setSymlink(stat.getSymlink()); try {
setSymlink(stat.getSymlink());
} catch (IOException e) {
throw new RuntimeException("Unexpected exception", e);
}
} }
} }
@ -69,16 +76,47 @@ public LocatedFileStatus(FileStatus stat, BlockLocation[] locations)
* @param path the path's qualified name * @param path the path's qualified name
* @param locations a file's block locations * @param locations a file's block locations
*/ */
@Deprecated
public LocatedFileStatus(long length, boolean isdir, public LocatedFileStatus(long length, boolean isdir,
int block_replication, int block_replication,
long blocksize, long modification_time, long access_time, long blocksize, long modification_time, long access_time,
FsPermission permission, String owner, String group, FsPermission permission, String owner, String group,
Path symlink, Path symlink, Path path, BlockLocation[] locations) {
Path path, this(length, isdir, block_replication, blocksize, modification_time,
BlockLocation[] locations) { access_time, permission, owner, group, symlink, path,
super(length, isdir, block_replication, blocksize, modification_time, permission.getAclBit(), permission.getEncryptedBit(),
access_time, permission, owner, group, symlink, path); permission.getErasureCodedBit(), locations);
this.locations = locations; }
/**
* Constructor.
*
* @param length a file's length
* @param isdir if the path is a directory
* @param block_replication the file's replication factor
* @param blocksize a file's block size
* @param modification_time a file's modification time
* @param access_time a file's access time
* @param permission a file's permission
* @param owner a file's owner
* @param group a file's group
* @param symlink symlink if the path is a symbolic link
* @param path the path's qualified name
* @param hasAcl entity has associated ACLs
* @param isEncrypted entity is encrypted
* @param isErasureCoded entity is erasure coded
* @param locations a file's block locations
*/
public LocatedFileStatus(long length, boolean isdir,
int block_replication, long blocksize, long modification_time,
long access_time, FsPermission permission, String owner, String group,
Path symlink, Path path,
boolean hasAcl, boolean isEncrypted, boolean isErasureCoded,
BlockLocation[] locations) {
super(length, isdir, block_replication, blocksize, modification_time,
access_time, permission, owner, group, symlink, path,
hasAcl, isEncrypted, isErasureCoded);
this.locations = locations;
} }
/** /**
@ -86,7 +124,7 @@ public LocatedFileStatus(long length, boolean isdir,
* @return the file's block locations * @return the file's block locations
*/ */
public BlockLocation[] getBlockLocations() { public BlockLocation[] getBlockLocations() {
return locations; return locations;
} }
/** /**

View File

@ -133,11 +133,13 @@ public void fromShort(short n) {
} }
@Override @Override
@Deprecated
public void write(DataOutput out) throws IOException { public void write(DataOutput out) throws IOException {
out.writeShort(toShort()); out.writeShort(toShort());
} }
@Override @Override
@Deprecated
public void readFields(DataInput in) throws IOException { public void readFields(DataInput in) throws IOException {
fromShort(in.readShort()); fromShort(in.readShort());
} }
@ -184,6 +186,7 @@ public short toShort() {
* *
* @return short extended short representation of this permission * @return short extended short representation of this permission
*/ */
@Deprecated
public short toExtendedShort() { public short toExtendedShort() {
return toShort(); return toShort();
} }
@ -299,7 +302,10 @@ public boolean getStickyBit() {
* Returns true if there is also an ACL (access control list). * Returns true if there is also an ACL (access control list).
* *
* @return boolean true if there is also an ACL (access control list). * @return boolean true if there is also an ACL (access control list).
* @deprecated Get acl bit from the {@link org.apache.hadoop.fs.FileStatus}
* object.
*/ */
@Deprecated
public boolean getAclBit() { public boolean getAclBit() {
// File system subclasses that support the ACL bit would override this. // File system subclasses that support the ACL bit would override this.
return false; return false;
@ -307,14 +313,20 @@ public boolean getAclBit() {
/** /**
* Returns true if the file is encrypted or directory is in an encryption zone * Returns true if the file is encrypted or directory is in an encryption zone
* @deprecated Get encryption bit from the
* {@link org.apache.hadoop.fs.FileStatus} object.
*/ */
@Deprecated
public boolean getEncryptedBit() { public boolean getEncryptedBit() {
return false; return false;
} }
/** /**
* Returns true if the file or directory is erasure coded. * Returns true if the file or directory is erasure coded.
* @deprecated Get ec bit from the {@link org.apache.hadoop.fs.FileStatus}
* object.
*/ */
@Deprecated
public boolean getErasureCodedBit() { public boolean getErasureCodedBit() {
return false; return false;
} }

View File

@ -0,0 +1,131 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.protocolPB;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import java.io.IOException;
import static org.apache.hadoop.fs.FSProtos.*;
/**
* Utility methods aiding conversion of fs data structures.
*/
public final class PBHelper {
private PBHelper() {
// prevent construction
}
public static FsPermission convert(FsPermissionProto proto)
throws IOException {
return new FsPermission((short)proto.getPerm());
}
public static FsPermissionProto convert(FsPermission p) throws IOException {
FsPermissionProto.Builder bld = FsPermissionProto.newBuilder();
bld.setPerm(p.toShort());
return bld.build();
}
public static FileStatus convert(FileStatusProto proto) throws IOException {
final Path path;
final long length;
final boolean isdir;
final short blockReplication;
final long blocksize;
final long mtime;
final long atime;
final String owner;
final String group;
final FsPermission permission;
final Path symlink;
switch (proto.getFileType()) {
case FT_DIR:
isdir = true;
symlink = null;
blocksize = 0;
length = 0;
blockReplication = 0;
break;
case FT_SYMLINK:
isdir = false;
symlink = new Path(proto.getSymlink());
blocksize = 0;
length = 0;
blockReplication = 0;
break;
case FT_FILE:
isdir = false;
symlink = null;
blocksize = proto.getBlockSize();
length = proto.getLength();
int brep = proto.getBlockReplication();
if ((brep & 0xffff0000) != 0) {
throw new IOException(String.format("Block replication 0x%08x " +
"doesn't fit in 16 bits.", brep));
}
blockReplication = (short)brep;
break;
default:
throw new IllegalStateException("Unknown type: " + proto.getFileType());
}
path = new Path(proto.getPath());
mtime = proto.getModificationTime();
atime = proto.getAccessTime();
permission = convert(proto.getPermission());
owner = proto.getOwner();
group = proto.getGroup();
int flags = proto.getFlags();
return new FileStatus(length, isdir, blockReplication, blocksize,
mtime, atime, permission, owner, group, symlink, path,
(flags & FileStatusProto.Flags.HAS_ACL_VALUE) != 0,
(flags & FileStatusProto.Flags.HAS_CRYPT_VALUE) != 0,
(flags & FileStatusProto.Flags.HAS_EC_VALUE) != 0);
}
public static FileStatusProto convert(FileStatus stat) throws IOException {
FileStatusProto.Builder bld = FileStatusProto.newBuilder();
bld.setPath(stat.getPath().toString());
if (stat.isDirectory()) {
bld.setFileType(FileStatusProto.FileType.FT_DIR);
} else if (stat.isSymlink()) {
bld.setFileType(FileStatusProto.FileType.FT_SYMLINK)
.setSymlink(stat.getSymlink().toString());
} else {
bld.setFileType(FileStatusProto.FileType.FT_FILE)
.setLength(stat.getLen())
.setBlockReplication(stat.getReplication())
.setBlockSize(stat.getBlockSize());
}
bld.setAccessTime(stat.getAccessTime())
.setModificationTime(stat.getModificationTime())
.setOwner(stat.getOwner())
.setGroup(stat.getGroup())
.setPermission(convert(stat.getPermission()));
int flags = 0;
flags |= stat.hasAcl() ? FileStatusProto.Flags.HAS_ACL_VALUE : 0;
flags |= stat.isEncrypted() ? FileStatusProto.Flags.HAS_CRYPT_VALUE : 0;
flags |= stat.isErasureCoded() ? FileStatusProto.Flags.HAS_EC_VALUE : 0;
bld.setFlags(flags);
return bld.build();
}
}

View File

@ -0,0 +1,18 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.protocolPB;

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.io.erasurecode; package org.apache.hadoop.io.erasurecode;
import java.io.Serializable;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
@ -31,7 +32,10 @@
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Evolving @InterfaceStability.Evolving
public final class ECSchema { public final class ECSchema implements Serializable {
private static final long serialVersionUID = 0x10953aa0;
public static final String NUM_DATA_UNITS_KEY = "numDataUnits"; public static final String NUM_DATA_UNITS_KEY = "numDataUnits";
public static final String NUM_PARITY_UNITS_KEY = "numParityUnits"; public static final String NUM_PARITY_UNITS_KEY = "numParityUnits";
public static final String CODEC_NAME_KEY = "codec"; public static final String CODEC_NAME_KEY = "codec";

View File

@ -0,0 +1,69 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* These .proto interfaces are private and stable.
* Please see http://wiki.apache.org/hadoop/Compatibility
* for what changes are allowed for a *stable* .proto interface.
*/
option java_package = "org.apache.hadoop.fs";
option java_outer_classname = "FSProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
package hadoop.fs;
message FsPermissionProto {
required uint32 perm = 1; // UNIX-style mode bits
}
/*
* FileStatus encoding. Field IDs match those from HdfsFileStatusProto, but
* cross-serialization is not an explicitly supported use case. Unlike HDFS,
* most fields are optional and do not define defaults.
*/
message FileStatusProto {
enum FileType {
FT_DIR = 1;
FT_FILE = 2;
FT_SYMLINK = 3;
}
enum Flags {
HAS_ACL = 0x01; // has ACLs
HAS_CRYPT = 0x02; // encrypted
HAS_EC = 0x04; // erasure coded
}
required FileType fileType = 1;
required string path = 2;
optional uint64 length = 3;
optional FsPermissionProto permission = 4;
optional string owner = 5;
optional string group = 6;
optional uint64 modification_time = 7;
optional uint64 access_time = 8;
optional string symlink = 9;
optional uint32 block_replication = 10;
optional uint64 block_size = 11;
// locations = 12
// alias = 13
// childrenNum = 14
optional bytes encryption_data = 15;
// storagePolicy = 16
optional bytes ec_data = 17;
optional uint32 flags = 18 [default = 0];
}

View File

@ -36,6 +36,7 @@
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -0,0 +1,85 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.protocolPB;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import static org.apache.hadoop.fs.FSProtos.*;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Verify PB serialization of FS data structures.
*/
public class TestFSSerialization {
@Test
@SuppressWarnings("deprecation")
public void testWritableFlagSerialization() throws Exception {
final Path p = new Path("hdfs://yaks:4344/dingos/f");
for (int i = 0; i < 0x8; ++i) {
final boolean acl = 0 != (i & 0x1);
final boolean crypt = 0 != (i & 0x2);
final boolean ec = 0 != (i & 0x4);
FileStatus stat = new FileStatus(1024L, false, 3, 1L << 31,
12345678L, 87654321L, FsPermission.getFileDefault(),
"hadoop", "unqbbc", null, p, acl, crypt, ec);
DataOutputBuffer dob = new DataOutputBuffer();
stat.write(dob);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), 0, dob.getLength());
FileStatus fstat = new FileStatus();
fstat.readFields(dib);
assertEquals(stat, fstat);
checkFields(stat, fstat);
}
}
@Test
public void testUtilitySerialization() throws Exception {
final Path p = new Path("hdfs://yaks:4344/dingos/f");
FileStatus stat = new FileStatus(1024L, false, 3, 1L << 31,
12345678L, 87654321L, FsPermission.createImmutable((short)0111),
"hadoop", "unqbbc", null, p);
FileStatusProto fsp = PBHelper.convert(stat);
FileStatus stat2 = PBHelper.convert(fsp);
assertEquals(stat, stat2);
checkFields(stat, stat2);
}
private static void checkFields(FileStatus expected, FileStatus actual) {
assertEquals(expected.getPath(), actual.getPath());
assertEquals(expected.isDirectory(), actual.isDirectory());
assertEquals(expected.getLen(), actual.getLen());
assertEquals(expected.getPermission(), actual.getPermission());
assertEquals(expected.getOwner(), actual.getOwner());
assertEquals(expected.getGroup(), actual.getGroup());
assertEquals(expected.getModificationTime(), actual.getModificationTime());
assertEquals(expected.getAccessTime(), actual.getAccessTime());
assertEquals(expected.getReplication(), actual.getReplication());
assertEquals(expected.getBlockSize(), actual.getBlockSize());
assertEquals(expected.hasAcl(), actual.hasAcl());
assertEquals(expected.isEncrypted(), actual.isEncrypted());
assertEquals(expected.isErasureCoded(), actual.isErasureCoded());
}
}

View File

@ -17,6 +17,8 @@
*/ */
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import java.io.Serializable;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.commons.lang.builder.HashCodeBuilder;
@ -29,11 +31,13 @@
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Evolving @InterfaceStability.Evolving
public final class ErasureCodingPolicy { public final class ErasureCodingPolicy implements Serializable {
private static final long serialVersionUID = 0x0079fe4e;
private String name;
private final ECSchema schema; private final ECSchema schema;
private final int cellSize; private final int cellSize;
private String name;
private byte id; private byte id;
public ErasureCodingPolicy(String name, ECSchema schema, public ErasureCodingPolicy(String name, ECSchema schema,

View File

@ -27,6 +27,11 @@
* done for backwards compatibility in case any existing clients assume the * done for backwards compatibility in case any existing clients assume the
* value of FsPermission is in a particular range. * value of FsPermission is in a particular range.
*/ */
/**
* @deprecated ACLs, encryption, and erasure coding are managed on FileStatus.
*/
@Deprecated
@InterfaceAudience.Private @InterfaceAudience.Private
public class FsPermissionExtension extends FsPermission { public class FsPermissionExtension extends FsPermission {
private static final long serialVersionUID = 0x13c298a4; private static final long serialVersionUID = 0x13c298a4;

View File

@ -17,7 +17,9 @@
*/ */
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -31,24 +33,15 @@
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class HdfsFileStatus { public class HdfsFileStatus extends FileStatus {
private static final long serialVersionUID = 0x126eb82a;
// local name of the inode that's encoded in java UTF8 // local name of the inode that's encoded in java UTF8
private final byte[] path; private byte[] uPath;
private final byte[] symlink; // symlink target encoded in java UTF8 or null private byte[] uSymlink; // symlink target encoded in java UTF8/null
private final long length;
private final boolean isdir;
private final short block_replication;
private final long blocksize;
private final long modification_time;
private final long access_time;
private final FsPermission permission;
private final String owner;
private final String group;
private final long fileId; private final long fileId;
private final FileEncryptionInfo feInfo; private final FileEncryptionInfo feInfo;
private final ErasureCodingPolicy ecPolicy; private final ErasureCodingPolicy ecPolicy;
// Used by dir, not including dot and dotdot. Always zero for a regular file. // Used by dir, not including dot and dotdot. Always zero for a regular file.
@ -57,12 +50,22 @@ public class HdfsFileStatus {
public static final byte[] EMPTY_NAME = new byte[0]; public static final byte[] EMPTY_NAME = new byte[0];
/**
* Set of features potentially active on an instance.
*/
public enum Flags {
HAS_ACL,
HAS_CRYPT,
HAS_EC;
}
private final EnumSet<Flags> flags;
/** /**
* Constructor. * Constructor.
* @param length the number of bytes the file has * @param length the number of bytes the file has
* @param isdir if the path is a directory * @param isdir if the path is a directory
* @param block_replication the replication factor * @param block_replication the replication factor
* @param blocksize the block size * @param blocksize the block size
* @param modification_time modification time * @param modification_time modification time
* @param access_time access time * @param access_time access time
* @param permission permission * @param permission permission
@ -77,25 +80,18 @@ public class HdfsFileStatus {
* @param ecPolicy the erasure coding policy * @param ecPolicy the erasure coding policy
*/ */
public HdfsFileStatus(long length, boolean isdir, int block_replication, public HdfsFileStatus(long length, boolean isdir, int block_replication,
long blocksize, long modification_time, long access_time, long blocksize, long modification_time,
FsPermission permission, String owner, String group, byte[] symlink, long access_time, FsPermission permission,
byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo, EnumSet<Flags> flags, String owner, String group,
byte storagePolicy, ErasureCodingPolicy ecPolicy) { byte[] symlink, byte[] path, long fileId,
this.length = length; int childrenNum, FileEncryptionInfo feInfo,
this.isdir = isdir; byte storagePolicy, ErasureCodingPolicy ecPolicy) {
this.block_replication = (short) block_replication; super(length, isdir, block_replication, blocksize, modification_time,
this.blocksize = blocksize; access_time, convert(isdir, symlink != null, permission, flags),
this.modification_time = modification_time; owner, group, null, null);
this.access_time = access_time; this.flags = flags;
this.permission = (permission == null) ? this.uSymlink = symlink;
((isdir || symlink!=null) ? this.uPath = path;
FsPermission.getDefault() :
FsPermission.getFileDefault()) :
permission;
this.owner = (owner == null) ? "" : owner;
this.group = (group == null) ? "" : group;
this.symlink = symlink;
this.path = path;
this.fileId = fileId; this.fileId = fileId;
this.childrenNum = childrenNum; this.childrenNum = childrenNum;
this.feInfo = feInfo; this.feInfo = feInfo;
@ -104,83 +100,48 @@ public HdfsFileStatus(long length, boolean isdir, int block_replication,
} }
/** /**
* Get the length of this file, in bytes. * Set redundant flags for compatibility with existing applications.
* @return the length of this file, in bytes.
*/ */
public final long getLen() { protected static FsPermission convert(boolean isdir, boolean symlink,
return length; FsPermission p, EnumSet<Flags> f) {
if (p instanceof FsPermissionExtension) {
// verify flags are set consistently
assert p.getAclBit() == f.contains(HdfsFileStatus.Flags.HAS_ACL);
assert p.getEncryptedBit() == f.contains(HdfsFileStatus.Flags.HAS_CRYPT);
assert p.getErasureCodedBit() == f.contains(HdfsFileStatus.Flags.HAS_EC);
return p;
}
if (null == p) {
if (isdir) {
p = FsPermission.getDirDefault();
} else if (symlink) {
p = FsPermission.getDefault();
} else {
p = FsPermission.getFileDefault();
}
}
return new FsPermissionExtension(p, f.contains(Flags.HAS_ACL),
f.contains(Flags.HAS_CRYPT), f.contains(Flags.HAS_EC));
} }
/** @Override
* Is this a directory?
* @return true if this is a directory
*/
public final boolean isDir() {
return isdir;
}
/**
* Is this a symbolic link?
* @return true if this is a symbolic link
*/
public boolean isSymlink() { public boolean isSymlink() {
return symlink != null; return uSymlink != null;
} }
/** @Override
* Get the block size of the file. public boolean hasAcl() {
* @return the number of bytes return flags.contains(Flags.HAS_ACL);
*/
public final long getBlockSize() {
return blocksize;
} }
/** @Override
* Get the replication factor of a file. public boolean isEncrypted() {
* @return the replication factor of a file. return flags.contains(Flags.HAS_CRYPT);
*/
public final short getReplication() {
return block_replication;
} }
/** @Override
* Get the modification time of the file. public boolean isErasureCoded() {
* @return the modification time of file in milliseconds since January 1, 1970 UTC. return flags.contains(Flags.HAS_EC);
*/
public final long getModificationTime() {
return modification_time;
}
/**
* Get the access time of the file.
* @return the access time of file in milliseconds since January 1, 1970 UTC.
*/
public final long getAccessTime() {
return access_time;
}
/**
* Get FsPermission associated with the file.
* @return permission
*/
public final FsPermission getPermission() {
return permission;
}
/**
* Get the owner of the file.
* @return owner of the file
*/
public final String getOwner() {
return owner;
}
/**
* Get the group associated with the file.
* @return group for the file.
*/
public final String getGroup() {
return group;
} }
/** /**
@ -188,7 +149,7 @@ public final String getGroup() {
* @return true if the name is empty * @return true if the name is empty
*/ */
public final boolean isEmptyLocalName() { public final boolean isEmptyLocalName() {
return path.length == 0; return uPath.length == 0;
} }
/** /**
@ -196,7 +157,7 @@ public final boolean isEmptyLocalName() {
* @return the local name in string * @return the local name in string
*/ */
public final String getLocalName() { public final String getLocalName() {
return DFSUtilClient.bytes2String(path); return DFSUtilClient.bytes2String(uPath);
} }
/** /**
@ -204,7 +165,7 @@ public final String getLocalName() {
* @return the local name in java UTF8 * @return the local name in java UTF8
*/ */
public final byte[] getLocalNameInBytes() { public final byte[] getLocalNameInBytes() {
return path; return uPath;
} }
/** /**
@ -238,16 +199,24 @@ public final Path getFullPath(final Path parent) {
return new Path(parent, getLocalName()); return new Path(parent, getLocalName());
} }
/** @Override
* Get the string representation of the symlink. public Path getSymlink() throws IOException {
* @return the symlink as a string. if (isSymlink()) {
*/ return new Path(DFSUtilClient.bytes2String(uSymlink));
public final String getSymlink() { }
return DFSUtilClient.bytes2String(symlink); throw new IOException("Path " + getPath() + " is not a symbolic link");
} }
@Override
public void setSymlink(Path sym) {
uSymlink = DFSUtilClient.string2Bytes(sym.toString());
}
/**
* Opaque referant for the symlink, to be resolved at the client.
*/
public final byte[] getSymlinkInBytes() { public final byte[] getSymlinkInBytes() {
return symlink; return uSymlink;
} }
public final long getFileId() { public final long getFileId() {
@ -275,13 +244,30 @@ public final byte getStoragePolicy() {
return storagePolicy; return storagePolicy;
} }
public final FileStatus makeQualified(URI defaultUri, Path path) { @Override
return new FileStatus(getLen(), isDir(), getReplication(), public boolean equals(Object o) {
getBlockSize(), getModificationTime(), // satisfy findbugs
getAccessTime(), return super.equals(o);
getPermission(), getOwner(), getGroup(),
isSymlink() ? new Path(getSymlink()) : null,
(getFullPath(path)).makeQualified(
defaultUri, null)); // fully-qualify path
} }
@Override
public int hashCode() {
// satisfy findbugs
return super.hashCode();
}
/**
* Resolve the short name of the Path given the URI, parent provided. This
* FileStatus reference will not contain a valid Path until it is resolved
* by this method.
* @param defaultUri FileSystem to fully qualify HDFS path.
* @param parent Parent path of this element.
* @return Reference to this instance.
*/
public final FileStatus makeQualified(URI defaultUri, Path parent) {
// fully-qualify path
setPath(getFullPath(parent).makeQualified(defaultUri, null));
return this; // API compatibility
}
} }

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import java.net.URI; import java.net.URI;
import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -34,7 +35,14 @@
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class HdfsLocatedFileStatus extends HdfsFileStatus { public class HdfsLocatedFileStatus extends HdfsFileStatus {
private final LocatedBlocks locations;
private static final long serialVersionUID = 0x23c73328;
/**
* Left transient, because {@link #makeQualifiedLocated(URI,Path)}
* is the user-facing type.
*/
private transient LocatedBlocks locations;
/** /**
* Constructor * Constructor
@ -56,12 +64,12 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
*/ */
public HdfsLocatedFileStatus(long length, boolean isdir, public HdfsLocatedFileStatus(long length, boolean isdir,
int block_replication, long blocksize, long modification_time, int block_replication, long blocksize, long modification_time,
long access_time, FsPermission permission, String owner, String group, long access_time, FsPermission permission, EnumSet<Flags> flags,
byte[] symlink, byte[] path, long fileId, LocatedBlocks locations, String owner, String group, byte[] symlink, byte[] path, long fileId,
int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy, LocatedBlocks locations, int childrenNum, FileEncryptionInfo feInfo,
ErasureCodingPolicy ecPolicy) { byte storagePolicy, ErasureCodingPolicy ecPolicy) {
super(length, isdir, block_replication, blocksize, modification_time, super(length, isdir, block_replication, blocksize, modification_time,
access_time, permission, owner, group, symlink, path, fileId, access_time, permission, flags, owner, group, symlink, path, fileId,
childrenNum, feInfo, storagePolicy, ecPolicy); childrenNum, feInfo, storagePolicy, ecPolicy);
this.locations = locations; this.locations = locations;
} }
@ -72,13 +80,21 @@ public LocatedBlocks getBlockLocations() {
public final LocatedFileStatus makeQualifiedLocated(URI defaultUri, public final LocatedFileStatus makeQualifiedLocated(URI defaultUri,
Path path) { Path path) {
return new LocatedFileStatus(getLen(), isDir(), getReplication(), makeQualified(defaultUri, path);
getBlockSize(), getModificationTime(), return new LocatedFileStatus(this,
getAccessTime(),
getPermission(), getOwner(), getGroup(),
isSymlink() ? new Path(getSymlink()) : null,
(getFullPath(path)).makeQualified(
defaultUri, null), // fully-qualify path
DFSUtilClient.locatedBlocks2Locations(getBlockLocations())); DFSUtilClient.locatedBlocks2Locations(getBlockLocations()));
} }
@Override
public boolean equals(Object o) {
// satisfy findbugs
return super.equals(o);
}
@Override
public int hashCode() {
// satisfy findbugs
return super.hashCode();
}
} }

View File

@ -21,6 +21,7 @@
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.Comparator; import java.util.Comparator;
import java.util.Date; import java.util.Date;
import java.util.EnumSet;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
@ -57,11 +58,12 @@ public int compare(SnapshottableDirectoryStatus left,
private final byte[] parentFullPath; private final byte[] parentFullPath;
public SnapshottableDirectoryStatus(long modification_time, long access_time, public SnapshottableDirectoryStatus(long modification_time, long access_time,
FsPermission permission, String owner, String group, byte[] localName, FsPermission permission, EnumSet<HdfsFileStatus.Flags> flags,
long inodeId, int childrenNum, String owner, String group, byte[] localName, long inodeId,
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) { int childrenNum, int snapshotNumber, int snapshotQuota,
byte[] parentFullPath) {
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time, this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
access_time, permission, owner, group, null, localName, inodeId, access_time, permission, flags, owner, group, null, localName, inodeId,
childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
null); null);
this.snapshotNumber = snapshotNumber; this.snapshotNumber = snapshotNumber;

View File

@ -104,6 +104,7 @@
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
@ -149,7 +150,6 @@
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType;
@ -1142,7 +1142,7 @@ public static FsAction convert(FsActionProto v) {
} }
public static FsPermission convert(FsPermissionProto p) { public static FsPermission convert(FsPermissionProto p) {
return new FsPermissionExtension((short)p.getPerm()); return new FsPermission((short)p.getPerm());
} }
private static Event.CreateEvent.INodeType createTypeConvert( private static Event.CreateEvent.INodeType createTypeConvert(
@ -1501,10 +1501,14 @@ public static SnapshottableDirectoryStatus convert(
return null; return null;
} }
final HdfsFileStatusProto status = sdirStatusProto.getDirStatus(); final HdfsFileStatusProto status = sdirStatusProto.getDirStatus();
EnumSet<HdfsFileStatus.Flags> flags = status.hasFlags()
? convertFlags(status.getFlags())
: convertFlags(status.getPermission());
return new SnapshottableDirectoryStatus( return new SnapshottableDirectoryStatus(
status.getModificationTime(), status.getModificationTime(),
status.getAccessTime(), status.getAccessTime(),
convert(status.getPermission()), convert(status.getPermission()),
flags,
status.getOwner(), status.getOwner(),
status.getGroup(), status.getGroup(),
status.getPath().toByteArray(), status.getPath().toByteArray(),
@ -1546,17 +1550,23 @@ public static DatanodeIDProto[] convert(DatanodeID[] did) {
} }
public static FsPermissionProto convert(FsPermission p) { public static FsPermissionProto convert(FsPermission p) {
return FsPermissionProto.newBuilder().setPerm(p.toExtendedShort()).build(); return FsPermissionProto.newBuilder().setPerm(p.toShort()).build();
} }
public static HdfsFileStatus convert(HdfsFileStatusProto fs) { public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
if (fs == null) if (fs == null) {
return null; return null;
}
EnumSet<HdfsFileStatus.Flags> flags = fs.hasFlags()
? convertFlags(fs.getFlags())
: convertFlags(fs.getPermission());
return new HdfsLocatedFileStatus( return new HdfsLocatedFileStatus(
fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), fs.getLength(), fs.getFileType().equals(FileType.IS_DIR),
fs.getBlockReplication(), fs.getBlocksize(), fs.getBlockReplication(), fs.getBlocksize(),
fs.getModificationTime(), fs.getAccessTime(), fs.getModificationTime(), fs.getAccessTime(),
convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), convert(fs.getPermission()),
flags,
fs.getOwner(), fs.getGroup(),
fs.getFileType().equals(FileType.IS_SYMLINK) ? fs.getFileType().equals(FileType.IS_SYMLINK) ?
fs.getSymlink().toByteArray() : null, fs.getSymlink().toByteArray() : null,
fs.getPath().toByteArray(), fs.getPath().toByteArray(),
@ -1569,6 +1579,47 @@ public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
fs.hasEcPolicy() ? convertErasureCodingPolicy(fs.getEcPolicy()) : null); fs.hasEcPolicy() ? convertErasureCodingPolicy(fs.getEcPolicy()) : null);
} }
private static EnumSet<HdfsFileStatus.Flags> convertFlags(int flags) {
EnumSet<HdfsFileStatus.Flags> f =
EnumSet.noneOf(HdfsFileStatus.Flags.class);
for (HdfsFileStatusProto.Flags pbf : HdfsFileStatusProto.Flags.values()) {
if ((pbf.getNumber() & flags) != 0) {
switch (pbf) {
case HAS_ACL:
f.add(HdfsFileStatus.Flags.HAS_ACL);
break;
case HAS_CRYPT:
f.add(HdfsFileStatus.Flags.HAS_CRYPT);
break;
case HAS_EC:
f.add(HdfsFileStatus.Flags.HAS_EC);
break;
default:
// ignore unknown
break;
}
}
}
return f;
}
private static EnumSet<HdfsFileStatus.Flags> convertFlags(
FsPermissionProto pbp) {
EnumSet<HdfsFileStatus.Flags> f =
EnumSet.noneOf(HdfsFileStatus.Flags.class);
FsPermission p = new FsPermissionExtension((short)pbp.getPerm());
if (p.getAclBit()) {
f.add(HdfsFileStatus.Flags.HAS_ACL);
}
if (p.getEncryptedBit()) {
f.add(HdfsFileStatus.Flags.HAS_CRYPT);
}
if (p.getErasureCodedBit()) {
f.add(HdfsFileStatus.Flags.HAS_EC);
}
return f;
}
public static CorruptFileBlocks convert(CorruptFileBlocksProto c) { public static CorruptFileBlocks convert(CorruptFileBlocksProto c) {
if (c == null) if (c == null)
return null; return null;
@ -2082,6 +2133,10 @@ public static HdfsFileStatusProto convert(HdfsFileStatus fs) {
builder.setEcPolicy(convertErasureCodingPolicy( builder.setEcPolicy(convertErasureCodingPolicy(
fs.getErasureCodingPolicy())); fs.getErasureCodingPolicy()));
} }
int flags = fs.hasAcl() ? HdfsFileStatusProto.Flags.HAS_ACL_VALUE : 0;
flags |= fs.isEncrypted() ? HdfsFileStatusProto.Flags.HAS_CRYPT_VALUE : 0;
flags |= fs.isErasureCoded() ? HdfsFileStatusProto.Flags.HAS_EC_VALUE : 0;
builder.setFlags(flags);
return builder.build(); return builder.build();
} }

View File

@ -41,7 +41,6 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -61,6 +60,7 @@
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -97,17 +97,8 @@ static Token<BlockTokenIdentifier> toBlockToken(
} }
/** Convert a string to a FsPermission object. */ /** Convert a string to a FsPermission object. */
static FsPermission toFsPermission( static FsPermission toFsPermission(final String s) {
final String s, Boolean aclBit, Boolean encBit, Boolean erasureBit) { return null == s ? null : new FsPermission(Short.parseShort(s, 8));
FsPermission perm = new FsPermission(Short.parseShort(s, 8));
final boolean aBit = (aclBit != null) ? aclBit : false;
final boolean eBit = (encBit != null) ? encBit : false;
final boolean ecBit = (erasureBit != null) ? erasureBit : false;
if (aBit || eBit || ecBit) {
return new FsPermissionExtension(perm, aBit, eBit, ecBit);
} else {
return perm;
}
} }
/** Convert a Json map to a HdfsFileStatus object. */ /** Convert a Json map to a HdfsFileStatus object. */
@ -128,10 +119,23 @@ static HdfsFileStatus toFileStatus(final Map<?, ?> json,
final long len = ((Number) m.get("length")).longValue(); final long len = ((Number) m.get("length")).longValue();
final String owner = (String) m.get("owner"); final String owner = (String) m.get("owner");
final String group = (String) m.get("group"); final String group = (String) m.get("group");
final FsPermission permission = toFsPermission((String) m.get("permission"), final FsPermission permission = toFsPermission((String)m.get("permission"));
(Boolean) m.get("aclBit"),
(Boolean) m.get("encBit"), Boolean aclBit = (Boolean) m.get("aclBit");
(Boolean) m.get("ecBit")); Boolean encBit = (Boolean) m.get("encBit");
Boolean erasureBit = (Boolean) m.get("ecBit");
EnumSet<HdfsFileStatus.Flags> f =
EnumSet.noneOf(HdfsFileStatus.Flags.class);
if (aclBit != null && aclBit) {
f.add(HdfsFileStatus.Flags.HAS_ACL);
}
if (encBit != null && encBit) {
f.add(HdfsFileStatus.Flags.HAS_CRYPT);
}
if (erasureBit != null && erasureBit) {
f.add(HdfsFileStatus.Flags.HAS_EC);
}
final long aTime = ((Number) m.get("accessTime")).longValue(); final long aTime = ((Number) m.get("accessTime")).longValue();
final long mTime = ((Number) m.get("modificationTime")).longValue(); final long mTime = ((Number) m.get("modificationTime")).longValue();
final long blockSize = ((Number) m.get("blockSize")).longValue(); final long blockSize = ((Number) m.get("blockSize")).longValue();
@ -143,11 +147,11 @@ static HdfsFileStatus toFileStatus(final Map<?, ?> json,
final byte storagePolicy = m.containsKey("storagePolicy") ? final byte storagePolicy = m.containsKey("storagePolicy") ?
(byte) ((Number) m.get("storagePolicy")).longValue() : (byte) ((Number) m.get("storagePolicy")).longValue() :
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, return new HdfsFileStatus(len,
replication, blockSize, mTime, aTime, permission, owner, group, type == WebHdfsConstants.PathType.DIRECTORY, replication, blockSize,
symlink, DFSUtilClient.string2Bytes(localName), mTime, aTime, permission, f, owner, group, symlink,
fileId, childrenNum, null, DFSUtilClient.string2Bytes(localName), fileId, childrenNum,
storagePolicy, null); null, storagePolicy, null);
} }
static HdfsFileStatus[] toHdfsFileStatusArray(final Map<?, ?> json) { static HdfsFileStatus[] toHdfsFileStatusArray(final Map<?, ?> json) {
@ -465,9 +469,7 @@ static AclStatus toAclStatus(final Map<?, ?> json) {
aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit")); aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit"));
String permString = (String) m.get("permission"); String permString = (String) m.get("permission");
if (permString != null) { if (permString != null) {
final FsPermission permission = toFsPermission(permString, final FsPermission permission = toFsPermission(permString);
(Boolean) m.get("aclBit"), (Boolean) m.get("encBit"),
(Boolean) m.get("ecBit"));
aclStatusBuilder.setPermission(permission); aclStatusBuilder.setPermission(permission);
} }
final List<?> entries = (List<?>) m.get("entries"); final List<?> entries = (List<?>) m.get("entries");

View File

@ -32,7 +32,13 @@ enum PathType {
FILE, DIRECTORY, SYMLINK; FILE, DIRECTORY, SYMLINK;
static PathType valueOf(HdfsFileStatus status) { static PathType valueOf(HdfsFileStatus status) {
return status.isDir()? DIRECTORY: status.isSymlink()? SYMLINK: FILE; if (status.isDirectory()) {
return DIRECTORY;
}
if (status.isSymlink()) {
return SYMLINK;
}
return FILE;
} }
} }
} }

View File

@ -1016,15 +1016,7 @@ HdfsFileStatus decodeResponse(Map<?,?> json) {
public FileStatus getFileStatus(Path f) throws IOException { public FileStatus getFileStatus(Path f) throws IOException {
statistics.incrementReadOps(1); statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_STATUS); storageStatistics.incrementOpCounter(OpType.GET_FILE_STATUS);
return makeQualified(getHdfsFileStatus(f), f); return getHdfsFileStatus(f).makeQualified(getUri(), f);
}
private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
f.getPermission(), f.getOwner(), f.getGroup(),
f.isSymlink() ? new Path(f.getSymlink()) : null,
f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
} }
@Override @Override
@ -1507,6 +1499,7 @@ public FileStatus[] listStatus(final Path f) throws IOException {
statistics.incrementReadOps(1); statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.LIST_STATUS); storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
final URI fsUri = getUri();
final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS; final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
return new FsPathResponseRunner<FileStatus[]>(op, f) { return new FsPathResponseRunner<FileStatus[]>(op, f) {
@Override @Override
@ -1515,7 +1508,7 @@ FileStatus[] decodeResponse(Map<?,?> json) {
JsonUtilClient.toHdfsFileStatusArray(json); JsonUtilClient.toHdfsFileStatusArray(json);
final FileStatus[] statuses = new FileStatus[hdfsStatuses.length]; final FileStatus[] statuses = new FileStatus[hdfsStatuses.length];
for (int i = 0; i < hdfsStatuses.length; i++) { for (int i = 0; i < hdfsStatuses.length; i++) {
statuses[i] = makeQualified(hdfsStatuses[i], f); statuses[i] = hdfsStatuses[i].makeQualified(fsUri, f);
} }
return statuses; return statuses;
@ -1541,10 +1534,11 @@ DirectoryListing decodeResponse(Map<?, ?> json) throws IOException {
} }
}.run(); }.run();
// Qualify the returned FileStatus array // Qualify the returned FileStatus array
final URI fsUri = getUri();
final HdfsFileStatus[] statuses = listing.getPartialListing(); final HdfsFileStatus[] statuses = listing.getPartialListing();
FileStatus[] qualified = new FileStatus[statuses.length]; FileStatus[] qualified = new FileStatus[statuses.length];
for (int i = 0; i < statuses.length; i++) { for (int i = 0; i < statuses.length; i++) {
qualified[i] = makeQualified(statuses[i], f); qualified[i] = statuses[i].makeQualified(fsUri, f);
} }
return new DirectoryEntries(qualified, listing.getLastName(), return new DirectoryEntries(qualified, listing.getLastName(),
listing.hasMore()); listing.hasMore());

View File

@ -21,7 +21,12 @@ option java_outer_classname = "AclProtos";
option java_generate_equals_and_hash = true; option java_generate_equals_and_hash = true;
package hadoop.hdfs; package hadoop.hdfs;
import "hdfs.proto"; /**
* File or Directory permision - same spec as posix
*/
message FsPermissionProto {
required uint32 perm = 1; // Actually a short - only 16bits used
}
message AclEntryProto { message AclEntryProto {
enum AclEntryScopeProto { enum AclEntryScopeProto {

View File

@ -32,6 +32,7 @@ option java_generate_equals_and_hash = true;
package hadoop.hdfs; package hadoop.hdfs;
import "Security.proto"; import "Security.proto";
import "acl.proto";
/** /**
* Extended block idenfies a block * Extended block idenfies a block
@ -196,13 +197,6 @@ message CorruptFileBlocksProto {
required string cookie = 2; required string cookie = 2;
} }
/**
* File or Directory permision - same spec as posix
*/
message FsPermissionProto {
required uint32 perm = 1; // Actually a short - only 16bits used
}
/** /**
* Types of recognized storage media. * Types of recognized storage media.
*/ */
@ -388,6 +382,11 @@ message HdfsFileStatusProto {
IS_FILE = 2; IS_FILE = 2;
IS_SYMLINK = 3; IS_SYMLINK = 3;
} }
enum Flags {
HAS_ACL = 0x01; // has ACLs
HAS_CRYPT = 0x02; // encrypted
HAS_EC = 0x04; // erasure coded
}
required FileType fileType = 1; required FileType fileType = 1;
required bytes path = 2; // local name of inode encoded java UTF8 required bytes path = 2; // local name of inode encoded java UTF8
required uint64 length = 3; required uint64 length = 3;
@ -415,6 +414,9 @@ message HdfsFileStatusProto {
// Optional field for erasure coding // Optional field for erasure coding
optional ErasureCodingPolicyProto ecPolicy = 17; optional ErasureCodingPolicyProto ecPolicy = 17;
// Set of flags
optional uint32 flags = 18 [default = 0];
} }
/** /**

View File

@ -1047,18 +1047,7 @@ public AclStatus getAclStatus(Path path) throws IOException {
/** Convert a string to a FsPermission object. */ /** Convert a string to a FsPermission object. */
static FsPermission toFsPermission(JSONObject json) { static FsPermission toFsPermission(JSONObject json) {
final String s = (String) json.get(PERMISSION_JSON); final String s = (String) json.get(PERMISSION_JSON);
final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON); return new FsPermission(Short.parseShort(s, 8));
final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON);
final Boolean erasureBit = (Boolean) json.get(EC_BIT_JSON);
FsPermission perm = new FsPermission(Short.parseShort(s, 8));
final boolean aBit = (aclBit != null) ? aclBit : false;
final boolean eBit = (encBit != null) ? encBit : false;
final boolean ecBit = (erasureBit != null) ? erasureBit : false;
if (aBit || eBit || ecBit) {
return new FsPermissionExtension(perm, aBit, eBit, ecBit);
} else {
return perm;
}
} }
private FileStatus createFileStatus(Path parent, JSONObject json) { private FileStatus createFileStatus(Path parent, JSONObject json) {
@ -1073,23 +1062,23 @@ private FileStatus createFileStatus(Path parent, JSONObject json) {
long mTime = (Long) json.get(MODIFICATION_TIME_JSON); long mTime = (Long) json.get(MODIFICATION_TIME_JSON);
long blockSize = (Long) json.get(BLOCK_SIZE_JSON); long blockSize = (Long) json.get(BLOCK_SIZE_JSON);
short replication = ((Long) json.get(REPLICATION_JSON)).shortValue(); short replication = ((Long) json.get(REPLICATION_JSON)).shortValue();
FileStatus fileStatus = null;
switch (type) { final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON);
case FILE: final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON);
case DIRECTORY: final Boolean erasureBit = (Boolean) json.get(EC_BIT_JSON);
fileStatus = new FileStatus(len, (type == FILE_TYPE.DIRECTORY), final boolean aBit = (aclBit != null) ? aclBit : false;
replication, blockSize, mTime, aTime, final boolean eBit = (encBit != null) ? encBit : false;
permission, owner, group, path); final boolean ecBit = (erasureBit != null) ? erasureBit : false;
break; if (aBit || eBit || ecBit) {
case SYMLINK: // include this for compatibility with 2.x
Path symLink = null; FsPermissionExtension deprecatedPerm =
fileStatus = new FileStatus(len, false, new FsPermissionExtension(permission, aBit, eBit, ecBit);
replication, blockSize, mTime, aTime, return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
permission, owner, group, symLink, replication, blockSize, mTime, aTime, deprecatedPerm, owner, group,
path); null, path, aBit, eBit, ecBit);
} }
return fileStatus; return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
replication, blockSize, mTime, aTime, permission, owner, group, path);
} }
/** /**

View File

@ -852,10 +852,11 @@ private void assertSameAcls(AclStatus a, AclStatus b) throws Exception {
} }
} }
private static void assertSameAclBit(FileSystem expected, FileSystem actual, private static void assertSameAcls(FileSystem expected, FileSystem actual,
Path path) throws IOException { Path path) throws IOException {
FileStatus expectedFileStatus = expected.getFileStatus(path); FileStatus expectedFileStatus = expected.getFileStatus(path);
FileStatus actualFileStatus = actual.getFileStatus(path); FileStatus actualFileStatus = actual.getFileStatus(path);
assertEquals(actualFileStatus.hasAcl(), expectedFileStatus.hasAcl());
assertEquals(actualFileStatus.getPermission().getAclBit(), assertEquals(actualFileStatus.getPermission().getAclBit(),
expectedFileStatus.getPermission().getAclBit()); expectedFileStatus.getPermission().getAclBit());
} }
@ -888,31 +889,31 @@ private void testFileAcls() throws Exception {
AclStatus proxyAclStat = proxyFs.getAclStatus(path); AclStatus proxyAclStat = proxyFs.getAclStatus(path);
AclStatus httpfsAclStat = httpfs.getAclStatus(path); AclStatus httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat); assertSameAcls(httpfsAclStat, proxyAclStat);
assertSameAclBit(httpfs, proxyFs, path); assertSameAcls(httpfs, proxyFs, path);
httpfs.setAcl(path, AclEntry.parseAclSpec(aclSet,true)); httpfs.setAcl(path, AclEntry.parseAclSpec(aclSet,true));
proxyAclStat = proxyFs.getAclStatus(path); proxyAclStat = proxyFs.getAclStatus(path);
httpfsAclStat = httpfs.getAclStatus(path); httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat); assertSameAcls(httpfsAclStat, proxyAclStat);
assertSameAclBit(httpfs, proxyFs, path); assertSameAcls(httpfs, proxyFs, path);
httpfs.modifyAclEntries(path, AclEntry.parseAclSpec(aclUser2, true)); httpfs.modifyAclEntries(path, AclEntry.parseAclSpec(aclUser2, true));
proxyAclStat = proxyFs.getAclStatus(path); proxyAclStat = proxyFs.getAclStatus(path);
httpfsAclStat = httpfs.getAclStatus(path); httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat); assertSameAcls(httpfsAclStat, proxyAclStat);
assertSameAclBit(httpfs, proxyFs, path); assertSameAcls(httpfs, proxyFs, path);
httpfs.removeAclEntries(path, AclEntry.parseAclSpec(rmAclUser1, false)); httpfs.removeAclEntries(path, AclEntry.parseAclSpec(rmAclUser1, false));
proxyAclStat = proxyFs.getAclStatus(path); proxyAclStat = proxyFs.getAclStatus(path);
httpfsAclStat = httpfs.getAclStatus(path); httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat); assertSameAcls(httpfsAclStat, proxyAclStat);
assertSameAclBit(httpfs, proxyFs, path); assertSameAcls(httpfs, proxyFs, path);
httpfs.removeAcl(path); httpfs.removeAcl(path);
proxyAclStat = proxyFs.getAclStatus(path); proxyAclStat = proxyFs.getAclStatus(path);
httpfsAclStat = httpfs.getAclStatus(path); httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat); assertSameAcls(httpfsAclStat, proxyAclStat);
assertSameAclBit(httpfs, proxyFs, path); assertSameAcls(httpfs, proxyFs, path);
} }
/** /**
@ -935,21 +936,21 @@ private void testDirAcls() throws Exception {
AclStatus proxyAclStat = proxyFs.getAclStatus(dir); AclStatus proxyAclStat = proxyFs.getAclStatus(dir);
AclStatus httpfsAclStat = httpfs.getAclStatus(dir); AclStatus httpfsAclStat = httpfs.getAclStatus(dir);
assertSameAcls(httpfsAclStat, proxyAclStat); assertSameAcls(httpfsAclStat, proxyAclStat);
assertSameAclBit(httpfs, proxyFs, dir); assertSameAcls(httpfs, proxyFs, dir);
/* Set a default ACL on the directory */ /* Set a default ACL on the directory */
httpfs.setAcl(dir, (AclEntry.parseAclSpec(defUser1,true))); httpfs.setAcl(dir, (AclEntry.parseAclSpec(defUser1,true)));
proxyAclStat = proxyFs.getAclStatus(dir); proxyAclStat = proxyFs.getAclStatus(dir);
httpfsAclStat = httpfs.getAclStatus(dir); httpfsAclStat = httpfs.getAclStatus(dir);
assertSameAcls(httpfsAclStat, proxyAclStat); assertSameAcls(httpfsAclStat, proxyAclStat);
assertSameAclBit(httpfs, proxyFs, dir); assertSameAcls(httpfs, proxyFs, dir);
/* Remove the default ACL */ /* Remove the default ACL */
httpfs.removeDefaultAcl(dir); httpfs.removeDefaultAcl(dir);
proxyAclStat = proxyFs.getAclStatus(dir); proxyAclStat = proxyFs.getAclStatus(dir);
httpfsAclStat = httpfs.getAclStatus(dir); httpfsAclStat = httpfs.getAclStatus(dir);
assertSameAcls(httpfsAclStat, proxyAclStat); assertSameAcls(httpfsAclStat, proxyAclStat);
assertSameAclBit(httpfs, proxyFs, dir); assertSameAcls(httpfs, proxyFs, dir);
} }
private void testEncryption() throws Exception { private void testEncryption() throws Exception {

View File

@ -252,4 +252,16 @@
<Class name="org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture" /> <Class name="org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture" />
<Bug pattern="NS_DANGEROUS_NON_SHORT_CIRCUIT" /> <Bug pattern="NS_DANGEROUS_NON_SHORT_CIRCUIT" />
</Match> </Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil$1" />
<Method name="visitFile" />
<Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
</Match>
<!-- HdfsFileStatus is user-facing, but HdfsLocatedFileStatus is not.
Defensible compatibility choices over time create odd corners. -->
<Match>
<Class name="org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus" />
<Field name="locations" />
<Bug pattern="SE_TRANSIENT_FIELD_NOT_RESTORED" />
</Match>
</FindBugsFilter> </FindBugsFilter>

View File

@ -19,7 +19,7 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
/** /**

View File

@ -0,0 +1,18 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;

View File

@ -31,7 +31,6 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
@ -47,6 +46,7 @@
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.EnumSet;
import static org.apache.hadoop.util.Time.now; import static org.apache.hadoop.util.Time.now;
@ -384,7 +384,6 @@ static HdfsFileStatus createFileStatusForEditLog(
* @param child for a directory listing of the iip, else null * @param child for a directory listing of the iip, else null
* @param storagePolicy for the path or closest ancestor * @param storagePolicy for the path or closest ancestor
* @param needLocation if block locations need to be included or not * @param needLocation if block locations need to be included or not
* @param includeStoragePolicy if storage policy should be returned
* @return a file status * @return a file status
* @throws java.io.IOException if any error occurs * @throws java.io.IOException if any error occurs
*/ */
@ -439,7 +438,19 @@ private static HdfsFileStatus createFileStatus(
int childrenNum = node.isDirectory() ? int childrenNum = node.isDirectory() ?
node.asDirectory().getChildrenNum(snapshot) : 0; node.asDirectory().getChildrenNum(snapshot) : 0;
EnumSet<HdfsFileStatus.Flags> flags =
EnumSet.noneOf(HdfsFileStatus.Flags.class);
INodeAttributes nodeAttrs = fsd.getAttributes(iip); INodeAttributes nodeAttrs = fsd.getAttributes(iip);
boolean hasAcl = nodeAttrs.getAclFeature() != null;
if (hasAcl) {
flags.add(HdfsFileStatus.Flags.HAS_ACL);
}
if (isEncrypted) {
flags.add(HdfsFileStatus.Flags.HAS_CRYPT);
}
if (isErasureCoded) {
flags.add(HdfsFileStatus.Flags.HAS_EC);
}
return createFileStatus( return createFileStatus(
size, size,
node.isDirectory(), node.isDirectory(),
@ -447,7 +458,8 @@ private static HdfsFileStatus createFileStatus(
blocksize, blocksize,
node.getModificationTime(snapshot), node.getModificationTime(snapshot),
node.getAccessTime(snapshot), node.getAccessTime(snapshot),
getPermissionForFileStatus(nodeAttrs, isEncrypted, isErasureCoded), nodeAttrs.getFsPermission(),
flags,
nodeAttrs.getUserName(), nodeAttrs.getUserName(),
nodeAttrs.getGroupName(), nodeAttrs.getGroupName(),
node.isSymlink() ? node.asSymlink().getSymlink() : null, node.isSymlink() ? node.asSymlink().getSymlink() : null,
@ -460,44 +472,24 @@ private static HdfsFileStatus createFileStatus(
loc); loc);
} }
private static HdfsFileStatus createFileStatus(long length, boolean isdir, private static HdfsFileStatus createFileStatus(
int replication, long blocksize, long mtime, long length, boolean isdir,
long atime, FsPermission permission, String owner, String group, int replication, long blocksize, long mtime, long atime,
byte[] symlink, byte[] path, long fileId, int childrenNum, FsPermission permission, EnumSet<HdfsFileStatus.Flags> flags,
FileEncryptionInfo feInfo, byte storagePolicy, String owner, String group, byte[] symlink, byte[] path, long fileId,
int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
ErasureCodingPolicy ecPolicy, LocatedBlocks locations) { ErasureCodingPolicy ecPolicy, LocatedBlocks locations) {
if (locations == null) { if (locations == null) {
return new HdfsFileStatus(length, isdir, replication, blocksize, return new HdfsFileStatus(length, isdir, replication, blocksize,
mtime, atime, permission, owner, group, symlink, path, fileId, mtime, atime, permission, flags, owner, group, symlink, path,
childrenNum, feInfo, storagePolicy, ecPolicy); fileId, childrenNum, feInfo, storagePolicy, ecPolicy);
} else { } else {
return new HdfsLocatedFileStatus(length, isdir, replication, blocksize, return new HdfsLocatedFileStatus(length, isdir, replication, blocksize,
mtime, atime, permission, owner, group, symlink, path, fileId, mtime, atime, permission, flags, owner, group, symlink, path,
locations, childrenNum, feInfo, storagePolicy, ecPolicy); fileId, locations, childrenNum, feInfo, storagePolicy, ecPolicy);
} }
} }
/**
* Returns an inode's FsPermission for use in an outbound FileStatus. If the
* inode has an ACL or is for an encrypted file/dir, then this method will
* return an FsPermissionExtension.
*
* @param node INode to check
* @param isEncrypted boolean true if the file/dir is encrypted
* @return FsPermission from inode, with ACL bit on if the inode has an ACL
* and encrypted bit on if it represents an encrypted file/dir.
*/
private static FsPermission getPermissionForFileStatus(
INodeAttributes node, boolean isEncrypted, boolean isErasureCoded) {
FsPermission perm = node.getFsPermission();
boolean hasAcl = node.getAclFeature() != null;
if (hasAcl || isEncrypted || isErasureCoded) {
perm = new FsPermissionExtension(perm, hasAcl,
isEncrypted, isErasureCoded);
}
return perm;
}
private static ContentSummary getContentSummaryInt(FSDirectory fsd, private static ContentSummary getContentSummaryInt(FSDirectory fsd,
INodesInPath iip) throws IOException { INodesInPath iip) throws IOException {
fsd.readLock(); fsd.readLock();

View File

@ -72,12 +72,13 @@
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.concurrent.ForkJoinPool; import java.util.EnumSet;
import java.util.concurrent.RecursiveAction;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.SortedSet; import java.util.SortedSet;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.RecursiveAction;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES; import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES;
@ -135,11 +136,13 @@ private static INodeDirectory createRoot(FSNamesystem namesystem) {
public final static HdfsFileStatus DOT_RESERVED_STATUS = public final static HdfsFileStatus DOT_RESERVED_STATUS =
new HdfsFileStatus(0, true, 0, 0, 0, 0, new FsPermission((short) 01770), new HdfsFileStatus(0, true, 0, 0, 0, 0, new FsPermission((short) 01770),
null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null, EnumSet.noneOf(HdfsFileStatus.Flags.class), null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null); HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
public final static HdfsFileStatus DOT_SNAPSHOT_DIR_STATUS = public final static HdfsFileStatus DOT_SNAPSHOT_DIR_STATUS =
new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, new HdfsFileStatus(0, true, 0, 0, 0, 0, null,
EnumSet.noneOf(HdfsFileStatus.Flags.class), null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L, 0, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null); HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
@ -383,12 +386,15 @@ HdfsFileStatus[] getReservedStatuses() {
*/ */
void createReservedStatuses(long cTime) { void createReservedStatuses(long cTime) {
HdfsFileStatus inodes = new HdfsFileStatus(0, true, 0, 0, cTime, cTime, HdfsFileStatus inodes = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
new FsPermission((short) 0770), null, supergroup, null, new FsPermission((short) 0770),
EnumSet.noneOf(HdfsFileStatus.Flags.class), null, supergroup, null,
DOT_INODES, -1L, 0, null, DOT_INODES, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null); HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
HdfsFileStatus raw = new HdfsFileStatus(0, true, 0, 0, cTime, cTime, HdfsFileStatus raw = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
new FsPermission((short) 0770), null, supergroup, null, RAW, -1L, new FsPermission((short) 0770),
0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null); EnumSet.noneOf(HdfsFileStatus.Flags.class), null, supergroup, null,
RAW, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
reservedStatuses = new HdfsFileStatus[] {inodes, raw}; reservedStatuses = new HdfsFileStatus[] {inodes, raw};
} }

View File

@ -177,6 +177,7 @@
import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException; import org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException;
@ -371,9 +372,11 @@ private void logAuditEvent(boolean succeeded, String cmd, String src,
} }
FileStatus status = null; FileStatus status = null;
if (stat != null) { if (stat != null) {
Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null; Path symlink = stat.isSymlink()
? new Path(DFSUtilClient.bytes2String(stat.getSymlinkInBytes()))
: null;
Path path = new Path(src); Path path = new Path(src);
status = new FileStatus(stat.getLen(), stat.isDir(), status = new FileStatus(stat.getLen(), stat.isDirectory(),
stat.getReplication(), stat.getBlockSize(), stat.getReplication(), stat.getBlockSize(),
stat.getModificationTime(), stat.getModificationTime(),
stat.getAccessTime(), stat.getPermission(), stat.getOwner(), stat.getAccessTime(), stat.getPermission(), stat.getOwner(),

View File

@ -80,6 +80,7 @@
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.inotify.EventBatch; import org.apache.hadoop.hdfs.inotify.EventBatch;
import org.apache.hadoop.hdfs.inotify.EventBatchList; import org.apache.hadoop.hdfs.inotify.EventBatchList;
@ -1430,7 +1431,7 @@ public String getLinkTarget(String path) throws IOException {
} else if (!stat.isSymlink()) { } else if (!stat.isSymlink()) {
throw new IOException("Path " + path + " is not a symbolic link"); throw new IOException("Path " + path + " is not a symbolic link");
} }
return stat.getSymlink(); return DFSUtilClient.bytes2String(stat.getSymlinkInBytes());
} }

View File

@ -25,6 +25,7 @@
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -35,6 +36,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.SnapshotInfo; import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
@ -345,7 +347,8 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing(
if (userName == null || userName.equals(dir.getUserName())) { if (userName == null || userName.equals(dir.getUserName())) {
SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus( SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
dir.getModificationTime(), dir.getAccessTime(), dir.getModificationTime(), dir.getAccessTime(),
dir.getFsPermission(), dir.getUserName(), dir.getGroupName(), dir.getFsPermission(), EnumSet.noneOf(HdfsFileStatus.Flags.class),
dir.getUserName(), dir.getGroupName(),
dir.getLocalNameBytes(), dir.getId(), dir.getLocalNameBytes(), dir.getId(),
dir.getChildrenNum(Snapshot.CURRENT_STATE_ID), dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
dir.getDirectorySnapshottableFeature().getNumSnapshots(), dir.getDirectorySnapshottableFeature().getNumSnapshots(),

View File

@ -17,10 +17,18 @@
*/ */
package org.apache.hadoop.hdfs.web; package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
@ -110,21 +118,20 @@ private static Map<String, Object> toJsonMap(HdfsFileStatus status) {
m.put("pathSuffix", status.getLocalName()); m.put("pathSuffix", status.getLocalName());
m.put("type", WebHdfsConstants.PathType.valueOf(status)); m.put("type", WebHdfsConstants.PathType.valueOf(status));
if (status.isSymlink()) { if (status.isSymlink()) {
m.put("symlink", status.getSymlink()); m.put("symlink", DFSUtilClient.bytes2String(status.getSymlinkInBytes()));
} }
m.put("length", status.getLen()); m.put("length", status.getLen());
m.put("owner", status.getOwner()); m.put("owner", status.getOwner());
m.put("group", status.getGroup()); m.put("group", status.getGroup());
FsPermission perm = status.getPermission(); FsPermission perm = status.getPermission();
m.put("permission", toString(perm)); m.put("permission", toString(perm));
if (perm.getAclBit()) { if (status.hasAcl()) {
m.put("aclBit", true); m.put("aclBit", true);
} }
if (perm.getEncryptedBit()) { if (status.isEncrypted()) {
m.put("encBit", true); m.put("encBit", true);
} }
if (perm.getErasureCodedBit()) { if (status.isErasureCoded()) {
m.put("ecBit", true); m.put("ecBit", true);
} }
m.put("accessTime", status.getAccessTime()); m.put("accessTime", status.getAccessTime());
@ -373,15 +380,6 @@ public static String toJsonString(final AclStatus status) {
FsPermission perm = status.getPermission(); FsPermission perm = status.getPermission();
if (perm != null) { if (perm != null) {
m.put("permission", toString(perm)); m.put("permission", toString(perm));
if (perm.getAclBit()) {
m.put("aclBit", true);
}
if (perm.getEncryptedBit()) {
m.put("encBit", true);
}
if (perm.getErasureCodedBit()) {
m.put("ecBit", true);
}
} }
final Map<String, Map<String, Object>> finalMap = final Map<String, Map<String, Object>> finalMap =
new TreeMap<String, Map<String, Object>>(); new TreeMap<String, Map<String, Object>>();

View File

@ -259,12 +259,14 @@ public Object answer(InvocationOnMock invocation)
Mockito.doReturn( Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0], (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
"owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString()); 1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString());
Mockito.doReturn( Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0], (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
"owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0, null)) 1010, 0, null, (byte) 0, null))
.when(mockNN) .when(mockNN)
.create(anyString(), (FsPermission) anyObject(), anyString(), .create(anyString(), (FsPermission) anyObject(), anyString(),

View File

@ -891,7 +891,8 @@ private static void mockCreate(ClientProtocol mcp,
CipherSuite suite, CryptoProtocolVersion version) throws Exception { CipherSuite suite, CryptoProtocolVersion version) throws Exception {
Mockito.doReturn( Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0], (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
"owner", "group", new byte[0], new byte[0],
1010, 0, new FileEncryptionInfo(suite, 1010, 0, new FileEncryptionInfo(suite,
version, new byte[suite.getAlgorithmBlockSize()], version, new byte[suite.getAlgorithmBlockSize()],
new byte[suite.getAlgorithmBlockSize()], new byte[suite.getAlgorithmBlockSize()],

View File

@ -0,0 +1,153 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.net.URI;
import org.apache.hadoop.fs.FSProtos.FileStatusProto;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import com.google.protobuf.ByteString;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* Verify compatible FileStatus/HdfsFileStatus serialization.
*/
public class TestFileStatusSerialization {
private static void checkFields(FileStatus expected, FileStatus actual) {
assertEquals(expected.getPath(), actual.getPath());
assertEquals(expected.isDirectory(), actual.isDirectory());
assertEquals(expected.getLen(), actual.getLen());
assertEquals(expected.getPermission(), actual.getPermission());
assertEquals(expected.getOwner(), actual.getOwner());
assertEquals(expected.getGroup(), actual.getGroup());
assertEquals(expected.getModificationTime(), actual.getModificationTime());
assertEquals(expected.getAccessTime(), actual.getAccessTime());
assertEquals(expected.getReplication(), actual.getReplication());
assertEquals(expected.getBlockSize(), actual.getBlockSize());
}
/**
* Test API backwards-compatibility with 2.x applications w.r.t. FsPermission.
*/
@Test
@SuppressWarnings("deprecation")
public void testFsPermissionCompatibility() throws Exception {
final int flagmask = 0x8;
// flags compatible with 2.x; fixed as constant in this test to ensure
// compatibility is maintained. New flags are not part of the contract this
// test verifies.
for (int i = 0; i < flagmask; ++i) {
FsPermission perm = FsPermission.createImmutable((short) 0013);
HdfsFileStatusProto.Builder hspb = HdfsFileStatusProto.newBuilder()
.setFileType(FileType.IS_FILE)
.setPath(ByteString.copyFromUtf8("hdfs://foobar/dingos/zot"))
.setLength(4344)
.setPermission(PBHelperClient.convert(perm))
.setOwner("hadoop")
.setGroup("unqbbc")
.setModificationTime(12345678L)
.setAccessTime(87654321L)
.setBlockReplication(10)
.setBlocksize(1L << 33)
.setFlags(i);
HdfsFileStatus stat = PBHelperClient.convert(hspb.build());
stat.makeQualified(new URI("hdfs://foobar"), new Path("/dingos"));
assertEquals(new Path("hdfs://foobar/dingos/zot"), stat.getPath());
// verify deprecated FsPermissionExtension methods
FsPermission sp = stat.getPermission();
assertEquals(sp.getAclBit(), stat.hasAcl());
assertEquals(sp.getEncryptedBit(), stat.isEncrypted());
assertEquals(sp.getErasureCodedBit(), stat.isErasureCoded());
// verify Writable contract
DataOutputBuffer dob = new DataOutputBuffer();
stat.write(dob);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), 0, dob.getLength());
FileStatus fstat = new FileStatus();
fstat.readFields(dib);
checkFields(stat, fstat);
// FsPermisisonExtension used for HdfsFileStatus, not FileStatus,
// attribute flags should still be preserved
assertEquals(sp.getAclBit(), fstat.hasAcl());
assertEquals(sp.getEncryptedBit(), fstat.isEncrypted());
assertEquals(sp.getErasureCodedBit(), fstat.isErasureCoded());
}
}
// param for LocatedFileStatus, HttpFileStatus
@Test
public void testCrossSerializationProto() throws Exception {
FsPermission perm = FsPermission.getFileDefault();
for (FileType t : FileType.values()) {
HdfsFileStatusProto.Builder hspb = HdfsFileStatusProto.newBuilder()
.setFileType(t)
.setPath(ByteString.copyFromUtf8("hdfs://foobar/dingos"))
.setLength(4344)
.setPermission(PBHelperClient.convert(perm))
.setOwner("hadoop")
.setGroup("unqbbc")
.setModificationTime(12345678L)
.setAccessTime(87654321L)
.setBlockReplication(10)
.setBlocksize(1L << 33);
if (FileType.IS_SYMLINK.equals(t)) {
hspb.setSymlink(ByteString.copyFromUtf8("hdfs://yaks/dingos"));
}
if (FileType.IS_FILE.equals(t)) {
hspb.setFileId(4544);
}
HdfsFileStatusProto hsp = hspb.build();
byte[] src = hsp.toByteArray();
FileStatusProto fsp = FileStatusProto.parseFrom(src);
assertEquals(hsp.getPath().toStringUtf8(), fsp.getPath());
assertEquals(hsp.getLength(), fsp.getLength());
assertEquals(hsp.getPermission().getPerm(),
fsp.getPermission().getPerm());
assertEquals(hsp.getOwner(), fsp.getOwner());
assertEquals(hsp.getGroup(), fsp.getGroup());
assertEquals(hsp.getModificationTime(), fsp.getModificationTime());
assertEquals(hsp.getAccessTime(), fsp.getAccessTime());
assertEquals(hsp.getSymlink().toStringUtf8(), fsp.getSymlink());
assertEquals(hsp.getBlockReplication(), fsp.getBlockReplication());
assertEquals(hsp.getBlocksize(), fsp.getBlockSize());
assertEquals(hsp.getFileType().ordinal(), fsp.getFileType().ordinal());
// verify unknown fields preserved
byte[] dst = fsp.toByteArray();
HdfsFileStatusProto hsp2 = HdfsFileStatusProto.parseFrom(dst);
assertEquals(hsp, hsp2);
checkFields(PBHelperClient.convert(hsp), PBHelperClient.convert(hsp2));
}
}
}

View File

@ -30,6 +30,7 @@
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.EnumSet;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -353,12 +354,14 @@ public void testFactory() throws Exception {
Mockito.doReturn( Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0], (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
"owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0, null)).when(mcp).getFileInfo(anyString()); 1010, 0, null, (byte) 0, null)).when(mcp).getFileInfo(anyString());
Mockito Mockito
.doReturn( .doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0], (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
"owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0, null)) 1010, 0, null, (byte) 0, null))
.when(mcp) .when(mcp)
.create(anyString(), (FsPermission) anyObject(), anyString(), .create(anyString(), (FsPermission) anyObject(), anyString(),

View File

@ -21,6 +21,7 @@
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
@ -141,6 +142,11 @@ public static void assertFilePermissionGranted(FileSystem fs,
} }
} }
public static void assertPermission(FileSystem fs, Path pathToCheck,
short perm) throws IOException {
assertPermission(fs, pathToCheck, perm, (perm & (1 << 12)) != 0);
}
/** /**
* Asserts the value of the FsPermission bits on the inode of a specific path. * Asserts the value of the FsPermission bits on the inode of a specific path.
* *
@ -150,10 +156,11 @@ public static void assertFilePermissionGranted(FileSystem fs,
* @throws IOException thrown if there is an I/O error * @throws IOException thrown if there is an I/O error
*/ */
public static void assertPermission(FileSystem fs, Path pathToCheck, public static void assertPermission(FileSystem fs, Path pathToCheck,
short perm) throws IOException { short perm, boolean hasAcl) throws IOException {
short filteredPerm = (short)(perm & 01777); short filteredPerm = (short)(perm & 01777);
FsPermission fsPermission = fs.getFileStatus(pathToCheck).getPermission(); FileStatus stat = fs.getFileStatus(pathToCheck);
FsPermission fsPermission = stat.getPermission();
assertEquals(filteredPerm, fsPermission.toShort()); assertEquals(filteredPerm, fsPermission.toShort());
assertEquals(((perm & (1 << 12)) != 0), fsPermission.getAclBit()); assertEquals(hasAcl, stat.hasAcl());
} }
} }

View File

@ -48,6 +48,7 @@
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -1355,7 +1356,8 @@ public void testFsckFileNotFound() throws Exception {
byte storagePolicy = 0; byte storagePolicy = 0;
HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
blockSize, modTime, accessTime, perms, owner, group, symlink, blockSize, modTime, accessTime, perms,
EnumSet.noneOf(HdfsFileStatus.Flags.class), owner, group, symlink,
path, fileId, numChildren, null, storagePolicy, null); path, fileId, numChildren, null, storagePolicy, null);
Result replRes = new ReplicationResult(conf); Result replRes = new ReplicationResult(conf);
Result ecRes = new ErasureCodingResult(conf); Result ecRes = new ErasureCodingResult(conf);

View File

@ -23,6 +23,7 @@
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import java.io.IOException; import java.io.IOException;
import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
@ -37,6 +38,7 @@
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -51,10 +53,12 @@
public class TestJsonUtil { public class TestJsonUtil {
static FileStatus toFileStatus(HdfsFileStatus f, String parent) { static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(), return new FileStatus(f.getLen(), f.isDirectory(), f.getReplication(),
f.getBlockSize(), f.getModificationTime(), f.getAccessTime(), f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
f.getPermission(), f.getOwner(), f.getGroup(), f.getPermission(), f.getOwner(), f.getGroup(),
f.isSymlink() ? new Path(f.getSymlink()) : null, f.isSymlink()
? new Path(DFSUtilClient.bytes2String(f.getSymlinkInBytes()))
: null,
new Path(f.getFullName(parent))); new Path(f.getFullName(parent)));
} }
@ -63,7 +67,8 @@ public void testHdfsFileStatus() throws IOException {
final long now = Time.now(); final long now = Time.now();
final String parent = "/dir"; final String parent = "/dir";
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26, final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
now, now + 10, new FsPermission((short) 0644), "user", "group", now, now + 10, new FsPermission((short) 0644),
EnumSet.noneOf(HdfsFileStatus.Flags.class), "user", "group",
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"), DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null); HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null);
final FileStatus fstatus = toFileStatus(status, parent); final FileStatus fstatus = toFileStatus(status, parent);