HADOOP-11296. Nfs3FileAttributes should not change the values of rdev, nlink and size in the constructor. Contributed by Brandon Li.
This commit is contained in:
parent
68a0508728
commit
2cc868dede
@ -419,6 +419,9 @@ Release 2.7.0 - UNRELEASED
|
||||
|
||||
HADOOP-11289. Fix typo in RpcUtil log message. (Charles Lamb via wheat9)
|
||||
|
||||
HADOOP-11294. Nfs3FileAttributes should not change the values of rdev,
|
||||
nlink and size in the constructor. (Brandon Li via wheat9)
|
||||
|
||||
Release 2.6.0 - 2014-11-15
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -49,8 +49,6 @@ public class Nfs3FileAttributes {
|
||||
* values should be agreed upon by the client and server. If the client and
|
||||
* server do not agree upon the values, the client should treat these fields
|
||||
* as if they are set to 0.
|
||||
* <br>
|
||||
* For Hadoop, currently this field is always zero.
|
||||
*/
|
||||
public static class Specdata3 {
|
||||
final int specdata1;
|
||||
@ -82,20 +80,17 @@ public String toString() {
|
||||
}
|
||||
|
||||
public Nfs3FileAttributes() {
|
||||
this(NfsFileType.NFSREG, 0, (short)0, 0, 0, 0, 0, 0, 0, 0);
|
||||
this(NfsFileType.NFSREG, 1, (short)0, 0, 0, 0, 0, 0, 0, 0, new Specdata3());
|
||||
}
|
||||
|
||||
public Nfs3FileAttributes(NfsFileType nfsType, int nlink, short mode, int uid,
|
||||
int gid, long size, long fsid, long fileId, long mtime, long atime) {
|
||||
int gid, long size, long fsid, long fileId, long mtime, long atime, Specdata3 rdev) {
|
||||
this.type = nfsType.toValue();
|
||||
this.mode = mode;
|
||||
this.nlink = (type == NfsFileType.NFSDIR.toValue()) ? (nlink + 2) : 1;
|
||||
this.nlink = nlink;
|
||||
this.uid = uid;
|
||||
this.gid = gid;
|
||||
this.size = size;
|
||||
if(type == NfsFileType.NFSDIR.toValue()) {
|
||||
this.size = getDirSize(nlink);
|
||||
}
|
||||
this.used = this.size;
|
||||
this.rdev = new Specdata3();
|
||||
this.fsid = fsid;
|
||||
@ -103,6 +98,7 @@ public Nfs3FileAttributes(NfsFileType nfsType, int nlink, short mode, int uid,
|
||||
this.mtime = new NfsTime(mtime);
|
||||
this.atime = atime != 0 ? new NfsTime(atime) : this.mtime;
|
||||
this.ctime = this.mtime;
|
||||
this.rdev = rdev;
|
||||
}
|
||||
|
||||
public Nfs3FileAttributes(Nfs3FileAttributes other) {
|
||||
@ -147,10 +143,7 @@ public static Nfs3FileAttributes deserialize(XDR xdr) {
|
||||
attr.gid = xdr.readInt();
|
||||
attr.size = xdr.readHyper();
|
||||
attr.used = xdr.readHyper();
|
||||
// Ignore rdev
|
||||
xdr.readInt();
|
||||
xdr.readInt();
|
||||
attr.rdev = new Specdata3();
|
||||
attr.rdev = new Specdata3(xdr.readInt(), xdr.readInt());
|
||||
attr.fsid = xdr.readHyper();
|
||||
attr.fileId = xdr.readHyper();
|
||||
attr.atime = NfsTime.deserialize(xdr);
|
||||
@ -228,11 +221,11 @@ public int getGid() {
|
||||
return this.gid;
|
||||
}
|
||||
|
||||
/**
|
||||
* HDFS directory size is always zero. Try to return something meaningful
|
||||
* here. Assume each child take 32bytes.
|
||||
*/
|
||||
public static long getDirSize(int childNum) {
|
||||
return (childNum + 2) * 32;
|
||||
public Specdata3 getRdev() {
|
||||
return rdev;
|
||||
}
|
||||
|
||||
public void setRdev(Specdata3 rdev) {
|
||||
this.rdev = rdev;
|
||||
}
|
||||
}
|
||||
|
@ -67,11 +67,14 @@ public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
|
||||
*/
|
||||
NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
|
||||
fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
|
||||
|
||||
return new Nfs3FileAttributes(fileType, fs.getChildrenNum(), fs
|
||||
.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
|
||||
iug.getGidAllowingUnknown(fs.getGroup()), fs.getLen(), 0 /* fsid */,
|
||||
fs.getFileId(), fs.getModificationTime(), fs.getAccessTime());
|
||||
int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
|
||||
long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs
|
||||
.getChildrenNum()) : fs.getLen();
|
||||
return new Nfs3FileAttributes(fileType, nlink,
|
||||
fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
|
||||
iug.getGidAllowingUnknown(fs.getGroup()), size, 0 /* fsid */,
|
||||
fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(),
|
||||
new Nfs3FileAttributes.Specdata3());
|
||||
}
|
||||
|
||||
public static Nfs3FileAttributes getFileAttr(DFSClient client,
|
||||
@ -80,6 +83,14 @@ public static Nfs3FileAttributes getFileAttr(DFSClient client,
|
||||
return fs == null ? null : getNfs3FileAttrFromFileStatus(fs, iug);
|
||||
}
|
||||
|
||||
/**
|
||||
* HDFS directory size is always zero. Try to return something meaningful
|
||||
* here. Assume each child take 32bytes.
|
||||
*/
|
||||
public static long getDirSize(int childNum) {
|
||||
return (childNum + 2) * 32;
|
||||
}
|
||||
|
||||
public static WccAttr getWccAttr(DFSClient client, String fileIdPath)
|
||||
throws IOException {
|
||||
HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
|
||||
@ -87,8 +98,8 @@ public static WccAttr getWccAttr(DFSClient client, String fileIdPath)
|
||||
return null;
|
||||
}
|
||||
|
||||
long size = fstat.isDir() ? Nfs3FileAttributes.getDirSize(fstat
|
||||
.getChildrenNum()) : fstat.getLen();
|
||||
long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat
|
||||
.getLen();
|
||||
return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
|
||||
new NfsTime(fstat.getModificationTime()));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user