HDFS-5107 Fix array copy error in Readdir and Readdirplus responses. Contributed by Brandon Li
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515166 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
214d4377fc
commit
b7fb6fd6c4
@ -17,12 +17,14 @@
|
||||
*/
|
||||
package org.apache.hadoop.nfs.nfs3.response;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
|
||||
import com.google.common.collect.ObjectArrays;
|
||||
|
||||
/**
|
||||
* READDIR3 Response
|
||||
*/
|
||||
@ -56,12 +58,11 @@ long getCookie() {
|
||||
}
|
||||
|
||||
public static class DirList3 {
|
||||
final Entry3 entries[];
|
||||
final List<Entry3> entries;
|
||||
final boolean eof;
|
||||
|
||||
public DirList3(Entry3[] entries, boolean eof) {
|
||||
this.entries = ObjectArrays.newArray(entries, entries.length);
|
||||
System.arraycopy(this.entries, 0, entries, 0, entries.length);
|
||||
this.entries = Collections.unmodifiableList(Arrays.asList(entries));
|
||||
this.eof = eof;
|
||||
}
|
||||
}
|
||||
@ -102,12 +103,11 @@ public XDR send(XDR xdr, int xid) {
|
||||
|
||||
if (getStatus() == Nfs3Status.NFS3_OK) {
|
||||
xdr.writeLongAsHyper(cookieVerf);
|
||||
Entry3[] f = dirList.entries;
|
||||
for (int i = 0; i < f.length; i++) {
|
||||
for (Entry3 e : dirList.entries) {
|
||||
xdr.writeBoolean(true); // Value follows
|
||||
xdr.writeLongAsHyper(f[i].getFileId());
|
||||
xdr.writeString(f[i].getName());
|
||||
xdr.writeLongAsHyper(f[i].getCookie());
|
||||
xdr.writeLongAsHyper(e.getFileId());
|
||||
xdr.writeString(e.getName());
|
||||
xdr.writeLongAsHyper(e.getCookie());
|
||||
}
|
||||
|
||||
xdr.writeBoolean(false);
|
||||
|
@ -17,13 +17,15 @@
|
||||
*/
|
||||
package org.apache.hadoop.nfs.nfs3.response;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
|
||||
import com.google.common.collect.ObjectArrays;
|
||||
|
||||
/**
|
||||
* READDIRPLUS3 Response
|
||||
*/
|
||||
@ -60,16 +62,15 @@ void seralize(XDR xdr) {
|
||||
}
|
||||
|
||||
public static class DirListPlus3 {
|
||||
EntryPlus3 entries[];
|
||||
List<EntryPlus3> entries;
|
||||
boolean eof;
|
||||
|
||||
public DirListPlus3(EntryPlus3[] entries, boolean eof) {
|
||||
this.entries = ObjectArrays.newArray(entries, entries.length);
|
||||
System.arraycopy(this.entries, 0, entries, 0, entries.length);
|
||||
this.entries = Collections.unmodifiableList(Arrays.asList(entries));
|
||||
this.eof = eof;
|
||||
}
|
||||
|
||||
EntryPlus3[] getEntries() {
|
||||
List<EntryPlus3> getEntries() {
|
||||
return entries;
|
||||
}
|
||||
|
||||
@ -101,10 +102,9 @@ public XDR send(XDR out, int xid) {
|
||||
|
||||
if (getStatus() == Nfs3Status.NFS3_OK) {
|
||||
out.writeLongAsHyper(cookieVerf);
|
||||
EntryPlus3[] f = dirListPlus.getEntries();
|
||||
for (int i = 0; i < f.length; i++) {
|
||||
for (EntryPlus3 f : dirListPlus.getEntries()) {
|
||||
out.writeBoolean(true); // next
|
||||
f[i].seralize(out);
|
||||
f.seralize(out);
|
||||
}
|
||||
|
||||
out.writeBoolean(false);
|
||||
|
@ -293,6 +293,9 @@ Release 2.1.1-beta - UNRELEASED
|
||||
|
||||
HDFS-5104 Support dotdot name in NFS LOOKUP operation (brandonli)
|
||||
|
||||
HDFS-5107 Fix array copy error in Readdir and Readdirplus responses
|
||||
(brandonli)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may
|
||||
|
Loading…
Reference in New Issue
Block a user