HADOOP-12334. Change Mode Of Copy Operation of HBase WAL Archiving to bypass Azure Storage Throttling after retries. Contributed by Gaurav Kanade.

This commit is contained in:
cnauroth 2015-10-22 12:21:32 -07:00
parent aea26bf4dd
commit 47641fcbc9
2 changed files with 43 additions and 7 deletions

View File

@ -1335,6 +1335,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12418. TestRPC.testRPCInterruptedSimple fails intermittently. HADOOP-12418. TestRPC.testRPCInterruptedSimple fails intermittently.
(kihwal) (kihwal)
HADOOP-12334. Change Mode Of Copy Operation of HBase WAL Archiving to bypass
Azure Storage Throttling after retries. (Gaurav Kanade via cnauroth)
Release 2.7.2 - UNRELEASED Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -60,6 +60,7 @@
import org.apache.hadoop.fs.azure.metrics.ResponseReceivedMetricUpdater; import org.apache.hadoop.fs.azure.metrics.ResponseReceivedMetricUpdater;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.io.IOUtils;
import org.mortbay.util.ajax.JSON; import org.mortbay.util.ajax.JSON;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -76,6 +77,7 @@
import com.microsoft.azure.storage.blob.BlobListingDetails; import com.microsoft.azure.storage.blob.BlobListingDetails;
import com.microsoft.azure.storage.blob.BlobProperties; import com.microsoft.azure.storage.blob.BlobProperties;
import com.microsoft.azure.storage.blob.BlobRequestOptions; import com.microsoft.azure.storage.blob.BlobRequestOptions;
import com.microsoft.azure.storage.blob.BlobType;
import com.microsoft.azure.storage.blob.CloudBlob; import com.microsoft.azure.storage.blob.CloudBlob;
import com.microsoft.azure.storage.blob.CopyStatus; import com.microsoft.azure.storage.blob.CopyStatus;
import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; import com.microsoft.azure.storage.blob.DeleteSnapshotsOption;
@ -2373,6 +2375,9 @@ public void rename(String srcKey, String dstKey, boolean acquireLease,
throw new IOException("Cannot acquire new lease if one already exists."); throw new IOException("Cannot acquire new lease if one already exists.");
} }
CloudBlobWrapper srcBlob = null;
CloudBlobWrapper dstBlob = null;
SelfRenewingLease lease = null;
try { try {
// Attempts rename may occur before opening any streams so first, // Attempts rename may occur before opening any streams so first,
// check if a session exists, if not create a session with the Azure // check if a session exists, if not create a session with the Azure
@ -2388,8 +2393,8 @@ public void rename(String srcKey, String dstKey, boolean acquireLease,
// Get the source blob and assert its existence. If the source key // Get the source blob and assert its existence. If the source key
// needs to be normalized then normalize it. // needs to be normalized then normalize it.
// //
CloudBlobWrapper srcBlob = getBlobReference(srcKey);
srcBlob = getBlobReference(srcKey);
if (!srcBlob.exists(getInstrumentedContext())) { if (!srcBlob.exists(getInstrumentedContext())) {
throw new AzureException ("Source blob " + srcKey + throw new AzureException ("Source blob " + srcKey +
" does not exist."); " does not exist.");
@ -2406,7 +2411,6 @@ public void rename(String srcKey, String dstKey, boolean acquireLease,
* when HBase runs on HDFS, where the region server recovers the lease * when HBase runs on HDFS, where the region server recovers the lease
* on a log file, to gain exclusive access to it, before it splits it. * on a log file, to gain exclusive access to it, before it splits it.
*/ */
SelfRenewingLease lease = null;
if (acquireLease) { if (acquireLease) {
lease = srcBlob.acquireLease(); lease = srcBlob.acquireLease();
} else if (existingLease != null) { } else if (existingLease != null) {
@ -2416,7 +2420,7 @@ public void rename(String srcKey, String dstKey, boolean acquireLease,
// Get the destination blob. The destination key always needs to be // Get the destination blob. The destination key always needs to be
// normalized. // normalized.
// //
CloudBlobWrapper dstBlob = getBlobReference(dstKey); dstBlob = getBlobReference(dstKey);
// Rename the source blob to the destination blob by copying it to // Rename the source blob to the destination blob by copying it to
// the destination blob then deleting it. // the destination blob then deleting it.
@ -2458,8 +2462,37 @@ public void rename(String srcKey, String dstKey, boolean acquireLease,
waitForCopyToComplete(dstBlob, getInstrumentedContext()); waitForCopyToComplete(dstBlob, getInstrumentedContext());
safeDelete(srcBlob, lease); safeDelete(srcBlob, lease);
} catch (StorageException e) { } catch (StorageException e) {
// Re-throw exception as an Azure storage exception. if (e.getErrorCode().equals(
StorageErrorCode.SERVER_BUSY.toString())) {
LOG.warn("Rename: CopyBlob: StorageException: ServerBusy: Retry complete, will attempt client side copy for page blob");
InputStream ipStream = null;
OutputStream opStream = null;
try {
if(srcBlob.getProperties().getBlobType() == BlobType.PAGE_BLOB){
ipStream = openInputStream(srcBlob);
opStream = openOutputStream(dstBlob);
byte[] buffer = new byte[PageBlobFormatHelpers.PAGE_SIZE];
int len;
while ((len = ipStream.read(buffer)) != -1) {
opStream.write(buffer, 0, len);
}
opStream.flush();
opStream.close();
ipStream.close();
} else {
throw new AzureException(e); throw new AzureException(e);
}
safeDelete(srcBlob, lease);
} catch(StorageException se) {
LOG.warn("Rename: CopyBlob: StorageException: Failed");
throw new AzureException(se);
} finally {
IOUtils.closeStream(ipStream);
IOUtils.closeStream(opStream);
}
} else {
throw new AzureException(e);
}
} catch (URISyntaxException e) { } catch (URISyntaxException e) {
// Re-throw exception as an Azure storage exception. // Re-throw exception as an Azure storage exception.
throw new AzureException(e); throw new AzureException(e);