From 8d4926f38bf53b32453cd2bc7322c8818f752f85 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 14 Jun 2018 09:58:50 -0700 Subject: [PATCH 01/70] HDFS-13563. TestDFSAdminWithHA times out on Windows. Contributed by Lukas Majercak. --- .../org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java index aa4d481915..c6139c13e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java @@ -97,6 +97,13 @@ private void setUpHaCluster(boolean security) throws Exception { System.setOut(new PrintStream(out)); System.setErr(new PrintStream(err)); + + // Reduce the number of retries to speed up the tests. + conf.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3); + conf.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, + 500); } @After From 9591765040b85927ac69179ab46383eef9560a28 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Thu, 14 Jun 2018 15:54:21 -0400 Subject: [PATCH 02/70] YARN-8410. Fixed a bug in A record lookup by CNAME record. Contributed by Shane Kumpf --- .../registry/server/dns/RegistryDNS.java | 29 +++++++++++++++---- .../registry/server/dns/TestRegistryDNS.java | 23 +++++++++++++-- 2 files changed, 44 insertions(+), 8 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java index 5e994fb776..002284384c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java @@ -1126,19 +1126,38 @@ byte[] generateReply(Message query, Socket s) */ private byte remoteLookup(Message response, Name name, int type, int iterations) { + // If retrieving the root zone, query for NS record type + if (name.toString().equals(".")) { + type = Type.NS; + } + + // Always add any CNAMEs to the response first + if (type != Type.CNAME) { + Record[] cnameAnswers = getRecords(name, Type.CNAME); + if (cnameAnswers != null) { + for (Record cnameR : cnameAnswers) { + if (!response.findRecord(cnameR)) { + response.addRecord(cnameR, Section.ANSWER); + } + } + } + } + // Forward lookup to primary DNS servers Record[] answers = getRecords(name, type); try { for (Record r : answers) { - if (r.getType() == Type.SOA) { - response.addRecord(r, Section.AUTHORITY); - } else { - response.addRecord(r, Section.ANSWER); + if (!response.findRecord(r)) { + if (r.getType() == Type.SOA) { + response.addRecord(r, Section.AUTHORITY); + } else { + response.addRecord(r, Section.ANSWER); + } } if (r.getType() == Type.CNAME) { Name cname = ((CNAMERecord) r).getAlias(); if (iterations < 6) { - remoteLookup(response, cname, Type.CNAME, iterations + 1); + remoteLookup(response, cname, type, iterations + 1); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java index 6ba58dd99d..969faf968b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java @@ -410,7 +410,7 @@ Record[] assertDNSQuery(String lookup, int type, int numRecs) return recs; } - Record[] assertDNSQueryNotNull(String lookup, int type) + Record[] assertDNSQueryNotNull(String lookup, int type, int answerCount) throws IOException { Name name = Name.fromString(lookup); Record question = Record.newRecord(name, type, DClass.IN); @@ -424,7 +424,7 @@ Record[] assertDNSQueryNotNull(String lookup, int type) assertEquals("Questions do not match", query.getQuestion(), response.getQuestion()); Record[] recs = response.getSectionArray(Section.ANSWER); - assertEquals(1, recs.length); + assertEquals(answerCount, recs.length); assertEquals(recs[0].getType(), type); return recs; } @@ -656,7 +656,24 @@ public void testExternalCNAMERecord() throws Exception { // start assessing whether correct records are available Record[] recs = - assertDNSQueryNotNull("mail.yahoo.com.", Type.CNAME); + assertDNSQueryNotNull("mail.yahoo.com.", Type.CNAME, 1); + } + + @Test + public void testRootLookup() throws Exception { + setRegistryDNS(new RegistryDNS("TestRegistry")); + Configuration conf = new Configuration(); + conf.set(RegistryConstants.KEY_DNS_DOMAIN, "dev.test"); + conf.set(RegistryConstants.KEY_DNS_ZONE_SUBNET, "172.17.0"); + conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS); + conf.set(RegistryConstants.KEY_DNS_ZONES_DIR, + getClass().getResource("/").getFile()); + getRegistryDNS().setDomainName(conf); + getRegistryDNS().initializeZones(conf); + + // start assessing whether correct records are available + Record[] recs = + assertDNSQueryNotNull(".", Type.NS, 13); } @Test From 361ffb26bebf2491bbe3219ef4a83eb753660018 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Thu, 14 Jun 2018 13:14:25 -0700 Subject: [PATCH 03/70] YARN-8426:Upgrade jquery-ui to 1.12.1 in YARN. Contributed by Sunil Govindan --- .../hadoop-yarn/hadoop-yarn-common/pom.xml | 2 +- .../apache/hadoop/yarn/webapp/view/JQueryUI.java | 2 +- .../static/jquery/jquery-ui-1.12.1.custom.min.js | 13 +++++++++++++ .../static/jquery/jquery-ui-1.9.1.custom.min.js | 6 ------ 4 files changed, 15 insertions(+), 8 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.9.1.custom.min.js diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index af1440a56e..eddcbaae67 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -242,7 +242,7 @@ src/main/resources/webapps/static/dt-1.9.4/images/Sorting icons.psd src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js src/main/resources/webapps/static/jt/jquery.jstree.js - src/main/resources/webapps/static/jquery/jquery-ui-1.9.1.custom.min.js + src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js src/main/resources/webapps/static/jquery/jquery-3.3.1.min.js src/main/resources/webapps/static/jquery/themes-1.9.1/base/jquery-ui.css src/test/resources/application_1440536969523_0001.har/_index diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java index d4fba1f241..91e5f89df7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java @@ -68,7 +68,7 @@ protected void render(Block html) { html.link(root_url("static/jquery/themes-1.9.1/base/jquery-ui.css")) .link(root_url("static/dt-1.9.4/css/jui-dt.css")) .script(root_url("static/jquery/jquery-3.3.1.min.js")) - .script(root_url("static/jquery/jquery-ui-1.9.1.custom.min.js")) + .script(root_url("static/jquery/jquery-ui-1.12.1.custom.min.js")) .script(root_url("static/dt-1.9.4/js/jquery.dataTables.min.js")) .script(root_url("static/yarn.dt.plugins.js")) .script(root_url("static/dt-sorting/natural.js")) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js new file mode 100644 index 0000000000..25398a1674 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js @@ -0,0 +1,13 @@ +/*! jQuery UI - v1.12.1 - 2016-09-14 +* http://jqueryui.com +* Includes: widget.js, position.js, data.js, disable-selection.js, effect.js, effects/effect-blind.js, effects/effect-bounce.js, effects/effect-clip.js, effects/effect-drop.js, effects/effect-explode.js, effects/effect-fade.js, effects/effect-fold.js, effects/effect-highlight.js, effects/effect-puff.js, effects/effect-pulsate.js, effects/effect-scale.js, effects/effect-shake.js, effects/effect-size.js, effects/effect-slide.js, effects/effect-transfer.js, focusable.js, form-reset-mixin.js, jquery-1-7.js, keycode.js, labels.js, scroll-parent.js, tabbable.js, unique-id.js, widgets/accordion.js, widgets/autocomplete.js, widgets/button.js, widgets/checkboxradio.js, widgets/controlgroup.js, widgets/datepicker.js, widgets/dialog.js, widgets/draggable.js, widgets/droppable.js, widgets/menu.js, widgets/mouse.js, widgets/progressbar.js, widgets/resizable.js, widgets/selectable.js, widgets/selectmenu.js, widgets/slider.js, widgets/sortable.js, widgets/spinner.js, widgets/tabs.js, widgets/tooltip.js +* Copyright jQuery Foundation and other contributors; Licensed MIT */ + +(function(t){"function"==typeof define&&define.amd?define(["jquery"],t):t(jQuery)})(function(t){function e(t){for(var e=t.css("visibility");"inherit"===e;)t=t.parent(),e=t.css("visibility");return"hidden"!==e}function i(t){for(var e,i;t.length&&t[0]!==document;){if(e=t.css("position"),("absolute"===e||"relative"===e||"fixed"===e)&&(i=parseInt(t.css("zIndex"),10),!isNaN(i)&&0!==i))return i;t=t.parent()}return 0}function s(){this._curInst=null,this._keyEvent=!1,this._disabledInputs=[],this._datepickerShowing=!1,this._inDialog=!1,this._mainDivId="ui-datepicker-div",this._inlineClass="ui-datepicker-inline",this._appendClass="ui-datepicker-append",this._triggerClass="ui-datepicker-trigger",this._dialogClass="ui-datepicker-dialog",this._disableClass="ui-datepicker-disabled",this._unselectableClass="ui-datepicker-unselectable",this._currentClass="ui-datepicker-current-day",this._dayOverClass="ui-datepicker-days-cell-over",this.regional=[],this.regional[""]={closeText:"Done",prevText:"Prev",nextText:"Next",currentText:"Today",monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],weekHeader:"Wk",dateFormat:"mm/dd/yy",firstDay:0,isRTL:!1,showMonthAfterYear:!1,yearSuffix:""},this._defaults={showOn:"focus",showAnim:"fadeIn",showOptions:{},defaultDate:null,appendText:"",buttonText:"...",buttonImage:"",buttonImageOnly:!1,hideIfNoPrevNext:!1,navigationAsDateFormat:!1,gotoCurrent:!1,changeMonth:!1,changeYear:!1,yearRange:"c-10:c+10",showOtherMonths:!1,selectOtherMonths:!1,showWeek:!1,calculateWeek:this.iso8601Week,shortYearCutoff:"+10",minDate:null,maxDate:null,duration:"fast",beforeShowDay:null,beforeShow:null,onSelect:null,onChangeMonthYear:null,onClose:null,numberOfMonths:1,showCurrentAtPos:0,stepMonths:1,stepBigMonths:12,altField:"",altFormat:"",constrainInput:!0,showButtonPanel:!1,autoSize:!1,disabled:!1},t.extend(this._defaults,this.regional[""]),this.regional.en=t.extend(!0,{},this.regional[""]),this.regional["en-US"]=t.extend(!0,{},this.regional.en),this.dpDiv=n(t("
"))}function n(e){var i="button, .ui-datepicker-prev, .ui-datepicker-next, .ui-datepicker-calendar td a";return e.on("mouseout",i,function(){t(this).removeClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).removeClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).removeClass("ui-datepicker-next-hover")}).on("mouseover",i,o)}function o(){t.datepicker._isDisabledDatepicker(m.inline?m.dpDiv.parent()[0]:m.input[0])||(t(this).parents(".ui-datepicker-calendar").find("a").removeClass("ui-state-hover"),t(this).addClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).addClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).addClass("ui-datepicker-next-hover"))}function a(e,i){t.extend(e,i);for(var s in i)null==i[s]&&(e[s]=i[s]);return e}function r(t){return function(){var e=this.element.val();t.apply(this,arguments),this._refresh(),e!==this.element.val()&&this._trigger("change")}}t.ui=t.ui||{},t.ui.version="1.12.1";var h=0,l=Array.prototype.slice;t.cleanData=function(e){return function(i){var s,n,o;for(o=0;null!=(n=i[o]);o++)try{s=t._data(n,"events"),s&&s.remove&&t(n).triggerHandler("remove")}catch(a){}e(i)}}(t.cleanData),t.widget=function(e,i,s){var n,o,a,r={},h=e.split(".")[0];e=e.split(".")[1];var l=h+"-"+e;return s||(s=i,i=t.Widget),t.isArray(s)&&(s=t.extend.apply(null,[{}].concat(s))),t.expr[":"][l.toLowerCase()]=function(e){return!!t.data(e,l)},t[h]=t[h]||{},n=t[h][e],o=t[h][e]=function(t,e){return this._createWidget?(arguments.length&&this._createWidget(t,e),void 0):new o(t,e)},t.extend(o,n,{version:s.version,_proto:t.extend({},s),_childConstructors:[]}),a=new i,a.options=t.widget.extend({},a.options),t.each(s,function(e,s){return t.isFunction(s)?(r[e]=function(){function t(){return i.prototype[e].apply(this,arguments)}function n(t){return i.prototype[e].apply(this,t)}return function(){var e,i=this._super,o=this._superApply;return this._super=t,this._superApply=n,e=s.apply(this,arguments),this._super=i,this._superApply=o,e}}(),void 0):(r[e]=s,void 0)}),o.prototype=t.widget.extend(a,{widgetEventPrefix:n?a.widgetEventPrefix||e:e},r,{constructor:o,namespace:h,widgetName:e,widgetFullName:l}),n?(t.each(n._childConstructors,function(e,i){var s=i.prototype;t.widget(s.namespace+"."+s.widgetName,o,i._proto)}),delete n._childConstructors):i._childConstructors.push(o),t.widget.bridge(e,o),o},t.widget.extend=function(e){for(var i,s,n=l.call(arguments,1),o=0,a=n.length;a>o;o++)for(i in n[o])s=n[o][i],n[o].hasOwnProperty(i)&&void 0!==s&&(e[i]=t.isPlainObject(s)?t.isPlainObject(e[i])?t.widget.extend({},e[i],s):t.widget.extend({},s):s);return e},t.widget.bridge=function(e,i){var s=i.prototype.widgetFullName||e;t.fn[e]=function(n){var o="string"==typeof n,a=l.call(arguments,1),r=this;return o?this.length||"instance"!==n?this.each(function(){var i,o=t.data(this,s);return"instance"===n?(r=o,!1):o?t.isFunction(o[n])&&"_"!==n.charAt(0)?(i=o[n].apply(o,a),i!==o&&void 0!==i?(r=i&&i.jquery?r.pushStack(i.get()):i,!1):void 0):t.error("no such method '"+n+"' for "+e+" widget instance"):t.error("cannot call methods on "+e+" prior to initialization; "+"attempted to call method '"+n+"'")}):r=void 0:(a.length&&(n=t.widget.extend.apply(null,[n].concat(a))),this.each(function(){var e=t.data(this,s);e?(e.option(n||{}),e._init&&e._init()):t.data(this,s,new i(n,this))})),r}},t.Widget=function(){},t.Widget._childConstructors=[],t.Widget.prototype={widgetName:"widget",widgetEventPrefix:"",defaultElement:"
",options:{classes:{},disabled:!1,create:null},_createWidget:function(e,i){i=t(i||this.defaultElement||this)[0],this.element=t(i),this.uuid=h++,this.eventNamespace="."+this.widgetName+this.uuid,this.bindings=t(),this.hoverable=t(),this.focusable=t(),this.classesElementLookup={},i!==this&&(t.data(i,this.widgetFullName,this),this._on(!0,this.element,{remove:function(t){t.target===i&&this.destroy()}}),this.document=t(i.style?i.ownerDocument:i.document||i),this.window=t(this.document[0].defaultView||this.document[0].parentWindow)),this.options=t.widget.extend({},this.options,this._getCreateOptions(),e),this._create(),this.options.disabled&&this._setOptionDisabled(this.options.disabled),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:function(){return{}},_getCreateEventData:t.noop,_create:t.noop,_init:t.noop,destroy:function(){var e=this;this._destroy(),t.each(this.classesElementLookup,function(t,i){e._removeClass(i,t)}),this.element.off(this.eventNamespace).removeData(this.widgetFullName),this.widget().off(this.eventNamespace).removeAttr("aria-disabled"),this.bindings.off(this.eventNamespace)},_destroy:t.noop,widget:function(){return this.element},option:function(e,i){var s,n,o,a=e;if(0===arguments.length)return t.widget.extend({},this.options);if("string"==typeof e)if(a={},s=e.split("."),e=s.shift(),s.length){for(n=a[e]=t.widget.extend({},this.options[e]),o=0;s.length-1>o;o++)n[s[o]]=n[s[o]]||{},n=n[s[o]];if(e=s.pop(),1===arguments.length)return void 0===n[e]?null:n[e];n[e]=i}else{if(1===arguments.length)return void 0===this.options[e]?null:this.options[e];a[e]=i}return this._setOptions(a),this},_setOptions:function(t){var e;for(e in t)this._setOption(e,t[e]);return this},_setOption:function(t,e){return"classes"===t&&this._setOptionClasses(e),this.options[t]=e,"disabled"===t&&this._setOptionDisabled(e),this},_setOptionClasses:function(e){var i,s,n;for(i in e)n=this.classesElementLookup[i],e[i]!==this.options.classes[i]&&n&&n.length&&(s=t(n.get()),this._removeClass(n,i),s.addClass(this._classes({element:s,keys:i,classes:e,add:!0})))},_setOptionDisabled:function(t){this._toggleClass(this.widget(),this.widgetFullName+"-disabled",null,!!t),t&&(this._removeClass(this.hoverable,null,"ui-state-hover"),this._removeClass(this.focusable,null,"ui-state-focus"))},enable:function(){return this._setOptions({disabled:!1})},disable:function(){return this._setOptions({disabled:!0})},_classes:function(e){function i(i,o){var a,r;for(r=0;i.length>r;r++)a=n.classesElementLookup[i[r]]||t(),a=e.add?t(t.unique(a.get().concat(e.element.get()))):t(a.not(e.element).get()),n.classesElementLookup[i[r]]=a,s.push(i[r]),o&&e.classes[i[r]]&&s.push(e.classes[i[r]])}var s=[],n=this;return e=t.extend({element:this.element,classes:this.options.classes||{}},e),this._on(e.element,{remove:"_untrackClassesElement"}),e.keys&&i(e.keys.match(/\S+/g)||[],!0),e.extra&&i(e.extra.match(/\S+/g)||[]),s.join(" ")},_untrackClassesElement:function(e){var i=this;t.each(i.classesElementLookup,function(s,n){-1!==t.inArray(e.target,n)&&(i.classesElementLookup[s]=t(n.not(e.target).get()))})},_removeClass:function(t,e,i){return this._toggleClass(t,e,i,!1)},_addClass:function(t,e,i){return this._toggleClass(t,e,i,!0)},_toggleClass:function(t,e,i,s){s="boolean"==typeof s?s:i;var n="string"==typeof t||null===t,o={extra:n?e:i,keys:n?t:e,element:n?this.element:t,add:s};return o.element.toggleClass(this._classes(o),s),this},_on:function(e,i,s){var n,o=this;"boolean"!=typeof e&&(s=i,i=e,e=!1),s?(i=n=t(i),this.bindings=this.bindings.add(i)):(s=i,i=this.element,n=this.widget()),t.each(s,function(s,a){function r(){return e||o.options.disabled!==!0&&!t(this).hasClass("ui-state-disabled")?("string"==typeof a?o[a]:a).apply(o,arguments):void 0}"string"!=typeof a&&(r.guid=a.guid=a.guid||r.guid||t.guid++);var h=s.match(/^([\w:-]*)\s*(.*)$/),l=h[1]+o.eventNamespace,c=h[2];c?n.on(l,c,r):i.on(l,r)})},_off:function(e,i){i=(i||"").split(" ").join(this.eventNamespace+" ")+this.eventNamespace,e.off(i).off(i),this.bindings=t(this.bindings.not(e).get()),this.focusable=t(this.focusable.not(e).get()),this.hoverable=t(this.hoverable.not(e).get())},_delay:function(t,e){function i(){return("string"==typeof t?s[t]:t).apply(s,arguments)}var s=this;return setTimeout(i,e||0)},_hoverable:function(e){this.hoverable=this.hoverable.add(e),this._on(e,{mouseenter:function(e){this._addClass(t(e.currentTarget),null,"ui-state-hover")},mouseleave:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-hover")}})},_focusable:function(e){this.focusable=this.focusable.add(e),this._on(e,{focusin:function(e){this._addClass(t(e.currentTarget),null,"ui-state-focus")},focusout:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-focus")}})},_trigger:function(e,i,s){var n,o,a=this.options[e];if(s=s||{},i=t.Event(i),i.type=(e===this.widgetEventPrefix?e:this.widgetEventPrefix+e).toLowerCase(),i.target=this.element[0],o=i.originalEvent)for(n in o)n in i||(i[n]=o[n]);return this.element.trigger(i,s),!(t.isFunction(a)&&a.apply(this.element[0],[i].concat(s))===!1||i.isDefaultPrevented())}},t.each({show:"fadeIn",hide:"fadeOut"},function(e,i){t.Widget.prototype["_"+e]=function(s,n,o){"string"==typeof n&&(n={effect:n});var a,r=n?n===!0||"number"==typeof n?i:n.effect||i:e;n=n||{},"number"==typeof n&&(n={duration:n}),a=!t.isEmptyObject(n),n.complete=o,n.delay&&s.delay(n.delay),a&&t.effects&&t.effects.effect[r]?s[e](n):r!==e&&s[r]?s[r](n.duration,n.easing,o):s.queue(function(i){t(this)[e](),o&&o.call(s[0]),i()})}}),t.widget,function(){function e(t,e,i){return[parseFloat(t[0])*(u.test(t[0])?e/100:1),parseFloat(t[1])*(u.test(t[1])?i/100:1)]}function i(e,i){return parseInt(t.css(e,i),10)||0}function s(e){var i=e[0];return 9===i.nodeType?{width:e.width(),height:e.height(),offset:{top:0,left:0}}:t.isWindow(i)?{width:e.width(),height:e.height(),offset:{top:e.scrollTop(),left:e.scrollLeft()}}:i.preventDefault?{width:0,height:0,offset:{top:i.pageY,left:i.pageX}}:{width:e.outerWidth(),height:e.outerHeight(),offset:e.offset()}}var n,o=Math.max,a=Math.abs,r=/left|center|right/,h=/top|center|bottom/,l=/[\+\-]\d+(\.[\d]+)?%?/,c=/^\w+/,u=/%$/,d=t.fn.position;t.position={scrollbarWidth:function(){if(void 0!==n)return n;var e,i,s=t("
"),o=s.children()[0];return t("body").append(s),e=o.offsetWidth,s.css("overflow","scroll"),i=o.offsetWidth,e===i&&(i=s[0].clientWidth),s.remove(),n=e-i},getScrollInfo:function(e){var i=e.isWindow||e.isDocument?"":e.element.css("overflow-x"),s=e.isWindow||e.isDocument?"":e.element.css("overflow-y"),n="scroll"===i||"auto"===i&&e.widthi?"left":e>0?"right":"center",vertical:0>r?"top":s>0?"bottom":"middle"};l>p&&p>a(e+i)&&(u.horizontal="center"),c>f&&f>a(s+r)&&(u.vertical="middle"),u.important=o(a(e),a(i))>o(a(s),a(r))?"horizontal":"vertical",n.using.call(this,t,u)}),h.offset(t.extend(D,{using:r}))})},t.ui.position={fit:{left:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollLeft:s.offset.left,a=s.width,r=t.left-e.collisionPosition.marginLeft,h=n-r,l=r+e.collisionWidth-a-n;e.collisionWidth>a?h>0&&0>=l?(i=t.left+h+e.collisionWidth-a-n,t.left+=h-i):t.left=l>0&&0>=h?n:h>l?n+a-e.collisionWidth:n:h>0?t.left+=h:l>0?t.left-=l:t.left=o(t.left-r,t.left)},top:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollTop:s.offset.top,a=e.within.height,r=t.top-e.collisionPosition.marginTop,h=n-r,l=r+e.collisionHeight-a-n;e.collisionHeight>a?h>0&&0>=l?(i=t.top+h+e.collisionHeight-a-n,t.top+=h-i):t.top=l>0&&0>=h?n:h>l?n+a-e.collisionHeight:n:h>0?t.top+=h:l>0?t.top-=l:t.top=o(t.top-r,t.top)}},flip:{left:function(t,e){var i,s,n=e.within,o=n.offset.left+n.scrollLeft,r=n.width,h=n.isWindow?n.scrollLeft:n.offset.left,l=t.left-e.collisionPosition.marginLeft,c=l-h,u=l+e.collisionWidth-r-h,d="left"===e.my[0]?-e.elemWidth:"right"===e.my[0]?e.elemWidth:0,p="left"===e.at[0]?e.targetWidth:"right"===e.at[0]?-e.targetWidth:0,f=-2*e.offset[0];0>c?(i=t.left+d+p+f+e.collisionWidth-r-o,(0>i||a(c)>i)&&(t.left+=d+p+f)):u>0&&(s=t.left-e.collisionPosition.marginLeft+d+p+f-h,(s>0||u>a(s))&&(t.left+=d+p+f))},top:function(t,e){var i,s,n=e.within,o=n.offset.top+n.scrollTop,r=n.height,h=n.isWindow?n.scrollTop:n.offset.top,l=t.top-e.collisionPosition.marginTop,c=l-h,u=l+e.collisionHeight-r-h,d="top"===e.my[1],p=d?-e.elemHeight:"bottom"===e.my[1]?e.elemHeight:0,f="top"===e.at[1]?e.targetHeight:"bottom"===e.at[1]?-e.targetHeight:0,g=-2*e.offset[1];0>c?(s=t.top+p+f+g+e.collisionHeight-r-o,(0>s||a(c)>s)&&(t.top+=p+f+g)):u>0&&(i=t.top-e.collisionPosition.marginTop+p+f+g-h,(i>0||u>a(i))&&(t.top+=p+f+g))}},flipfit:{left:function(){t.ui.position.flip.left.apply(this,arguments),t.ui.position.fit.left.apply(this,arguments)},top:function(){t.ui.position.flip.top.apply(this,arguments),t.ui.position.fit.top.apply(this,arguments)}}}}(),t.ui.position,t.extend(t.expr[":"],{data:t.expr.createPseudo?t.expr.createPseudo(function(e){return function(i){return!!t.data(i,e)}}):function(e,i,s){return!!t.data(e,s[3])}}),t.fn.extend({disableSelection:function(){var t="onselectstart"in document.createElement("div")?"selectstart":"mousedown";return function(){return this.on(t+".ui-disableSelection",function(t){t.preventDefault()})}}(),enableSelection:function(){return this.off(".ui-disableSelection")}});var c="ui-effects-",u="ui-effects-style",d="ui-effects-animated",p=t;t.effects={effect:{}},function(t,e){function i(t,e,i){var s=u[e.type]||{};return null==t?i||!e.def?null:e.def:(t=s.floor?~~t:parseFloat(t),isNaN(t)?e.def:s.mod?(t+s.mod)%s.mod:0>t?0:t>s.max?s.max:t)}function s(i){var s=l(),n=s._rgba=[];return i=i.toLowerCase(),f(h,function(t,o){var a,r=o.re.exec(i),h=r&&o.parse(r),l=o.space||"rgba";return h?(a=s[l](h),s[c[l].cache]=a[c[l].cache],n=s._rgba=a._rgba,!1):e}),n.length?("0,0,0,0"===n.join()&&t.extend(n,o.transparent),s):o[i]}function n(t,e,i){return i=(i+1)%1,1>6*i?t+6*(e-t)*i:1>2*i?e:2>3*i?t+6*(e-t)*(2/3-i):t}var o,a="backgroundColor borderBottomColor borderLeftColor borderRightColor borderTopColor color columnRuleColor outlineColor textDecorationColor textEmphasisColor",r=/^([\-+])=\s*(\d+\.?\d*)/,h=[{re:/rgba?\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(t){return[t[1],t[2],t[3],t[4]]}},{re:/rgba?\(\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(t){return[2.55*t[1],2.55*t[2],2.55*t[3],t[4]]}},{re:/#([a-f0-9]{2})([a-f0-9]{2})([a-f0-9]{2})/,parse:function(t){return[parseInt(t[1],16),parseInt(t[2],16),parseInt(t[3],16)]}},{re:/#([a-f0-9])([a-f0-9])([a-f0-9])/,parse:function(t){return[parseInt(t[1]+t[1],16),parseInt(t[2]+t[2],16),parseInt(t[3]+t[3],16)]}},{re:/hsla?\(\s*(\d+(?:\.\d+)?)\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,space:"hsla",parse:function(t){return[t[1],t[2]/100,t[3]/100,t[4]]}}],l=t.Color=function(e,i,s,n){return new t.Color.fn.parse(e,i,s,n)},c={rgba:{props:{red:{idx:0,type:"byte"},green:{idx:1,type:"byte"},blue:{idx:2,type:"byte"}}},hsla:{props:{hue:{idx:0,type:"degrees"},saturation:{idx:1,type:"percent"},lightness:{idx:2,type:"percent"}}}},u={"byte":{floor:!0,max:255},percent:{max:1},degrees:{mod:360,floor:!0}},d=l.support={},p=t("

")[0],f=t.each;p.style.cssText="background-color:rgba(1,1,1,.5)",d.rgba=p.style.backgroundColor.indexOf("rgba")>-1,f(c,function(t,e){e.cache="_"+t,e.props.alpha={idx:3,type:"percent",def:1}}),l.fn=t.extend(l.prototype,{parse:function(n,a,r,h){if(n===e)return this._rgba=[null,null,null,null],this;(n.jquery||n.nodeType)&&(n=t(n).css(a),a=e);var u=this,d=t.type(n),p=this._rgba=[];return a!==e&&(n=[n,a,r,h],d="array"),"string"===d?this.parse(s(n)||o._default):"array"===d?(f(c.rgba.props,function(t,e){p[e.idx]=i(n[e.idx],e)}),this):"object"===d?(n instanceof l?f(c,function(t,e){n[e.cache]&&(u[e.cache]=n[e.cache].slice())}):f(c,function(e,s){var o=s.cache;f(s.props,function(t,e){if(!u[o]&&s.to){if("alpha"===t||null==n[t])return;u[o]=s.to(u._rgba)}u[o][e.idx]=i(n[t],e,!0)}),u[o]&&0>t.inArray(null,u[o].slice(0,3))&&(u[o][3]=1,s.from&&(u._rgba=s.from(u[o])))}),this):e},is:function(t){var i=l(t),s=!0,n=this;return f(c,function(t,o){var a,r=i[o.cache];return r&&(a=n[o.cache]||o.to&&o.to(n._rgba)||[],f(o.props,function(t,i){return null!=r[i.idx]?s=r[i.idx]===a[i.idx]:e})),s}),s},_space:function(){var t=[],e=this;return f(c,function(i,s){e[s.cache]&&t.push(i)}),t.pop()},transition:function(t,e){var s=l(t),n=s._space(),o=c[n],a=0===this.alpha()?l("transparent"):this,r=a[o.cache]||o.to(a._rgba),h=r.slice();return s=s[o.cache],f(o.props,function(t,n){var o=n.idx,a=r[o],l=s[o],c=u[n.type]||{};null!==l&&(null===a?h[o]=l:(c.mod&&(l-a>c.mod/2?a+=c.mod:a-l>c.mod/2&&(a-=c.mod)),h[o]=i((l-a)*e+a,n)))}),this[n](h)},blend:function(e){if(1===this._rgba[3])return this;var i=this._rgba.slice(),s=i.pop(),n=l(e)._rgba;return l(t.map(i,function(t,e){return(1-s)*n[e]+s*t}))},toRgbaString:function(){var e="rgba(",i=t.map(this._rgba,function(t,e){return null==t?e>2?1:0:t});return 1===i[3]&&(i.pop(),e="rgb("),e+i.join()+")"},toHslaString:function(){var e="hsla(",i=t.map(this.hsla(),function(t,e){return null==t&&(t=e>2?1:0),e&&3>e&&(t=Math.round(100*t)+"%"),t});return 1===i[3]&&(i.pop(),e="hsl("),e+i.join()+")"},toHexString:function(e){var i=this._rgba.slice(),s=i.pop();return e&&i.push(~~(255*s)),"#"+t.map(i,function(t){return t=(t||0).toString(16),1===t.length?"0"+t:t}).join("")},toString:function(){return 0===this._rgba[3]?"transparent":this.toRgbaString()}}),l.fn.parse.prototype=l.fn,c.hsla.to=function(t){if(null==t[0]||null==t[1]||null==t[2])return[null,null,null,t[3]];var e,i,s=t[0]/255,n=t[1]/255,o=t[2]/255,a=t[3],r=Math.max(s,n,o),h=Math.min(s,n,o),l=r-h,c=r+h,u=.5*c;return e=h===r?0:s===r?60*(n-o)/l+360:n===r?60*(o-s)/l+120:60*(s-n)/l+240,i=0===l?0:.5>=u?l/c:l/(2-c),[Math.round(e)%360,i,u,null==a?1:a]},c.hsla.from=function(t){if(null==t[0]||null==t[1]||null==t[2])return[null,null,null,t[3]];var e=t[0]/360,i=t[1],s=t[2],o=t[3],a=.5>=s?s*(1+i):s+i-s*i,r=2*s-a;return[Math.round(255*n(r,a,e+1/3)),Math.round(255*n(r,a,e)),Math.round(255*n(r,a,e-1/3)),o]},f(c,function(s,n){var o=n.props,a=n.cache,h=n.to,c=n.from;l.fn[s]=function(s){if(h&&!this[a]&&(this[a]=h(this._rgba)),s===e)return this[a].slice();var n,r=t.type(s),u="array"===r||"object"===r?s:arguments,d=this[a].slice();return f(o,function(t,e){var s=u["object"===r?t:e.idx];null==s&&(s=d[e.idx]),d[e.idx]=i(s,e)}),c?(n=l(c(d)),n[a]=d,n):l(d)},f(o,function(e,i){l.fn[e]||(l.fn[e]=function(n){var o,a=t.type(n),h="alpha"===e?this._hsla?"hsla":"rgba":s,l=this[h](),c=l[i.idx];return"undefined"===a?c:("function"===a&&(n=n.call(this,c),a=t.type(n)),null==n&&i.empty?this:("string"===a&&(o=r.exec(n),o&&(n=c+parseFloat(o[2])*("+"===o[1]?1:-1))),l[i.idx]=n,this[h](l)))})})}),l.hook=function(e){var i=e.split(" ");f(i,function(e,i){t.cssHooks[i]={set:function(e,n){var o,a,r="";if("transparent"!==n&&("string"!==t.type(n)||(o=s(n)))){if(n=l(o||n),!d.rgba&&1!==n._rgba[3]){for(a="backgroundColor"===i?e.parentNode:e;(""===r||"transparent"===r)&&a&&a.style;)try{r=t.css(a,"backgroundColor"),a=a.parentNode}catch(h){}n=n.blend(r&&"transparent"!==r?r:"_default")}n=n.toRgbaString()}try{e.style[i]=n}catch(h){}}},t.fx.step[i]=function(e){e.colorInit||(e.start=l(e.elem,i),e.end=l(e.end),e.colorInit=!0),t.cssHooks[i].set(e.elem,e.start.transition(e.end,e.pos))}})},l.hook(a),t.cssHooks.borderColor={expand:function(t){var e={};return f(["Top","Right","Bottom","Left"],function(i,s){e["border"+s+"Color"]=t}),e}},o=t.Color.names={aqua:"#00ffff",black:"#000000",blue:"#0000ff",fuchsia:"#ff00ff",gray:"#808080",green:"#008000",lime:"#00ff00",maroon:"#800000",navy:"#000080",olive:"#808000",purple:"#800080",red:"#ff0000",silver:"#c0c0c0",teal:"#008080",white:"#ffffff",yellow:"#ffff00",transparent:[null,null,null,0],_default:"#ffffff"}}(p),function(){function e(e){var i,s,n=e.ownerDocument.defaultView?e.ownerDocument.defaultView.getComputedStyle(e,null):e.currentStyle,o={};if(n&&n.length&&n[0]&&n[n[0]])for(s=n.length;s--;)i=n[s],"string"==typeof n[i]&&(o[t.camelCase(i)]=n[i]);else for(i in n)"string"==typeof n[i]&&(o[i]=n[i]);return o}function i(e,i){var s,o,a={};for(s in i)o=i[s],e[s]!==o&&(n[s]||(t.fx.step[s]||!isNaN(parseFloat(o)))&&(a[s]=o));return a}var s=["add","remove","toggle"],n={border:1,borderBottom:1,borderColor:1,borderLeft:1,borderRight:1,borderTop:1,borderWidth:1,margin:1,padding:1};t.each(["borderLeftStyle","borderRightStyle","borderBottomStyle","borderTopStyle"],function(e,i){t.fx.step[i]=function(t){("none"!==t.end&&!t.setAttr||1===t.pos&&!t.setAttr)&&(p.style(t.elem,i,t.end),t.setAttr=!0)}}),t.fn.addBack||(t.fn.addBack=function(t){return this.add(null==t?this.prevObject:this.prevObject.filter(t))}),t.effects.animateClass=function(n,o,a,r){var h=t.speed(o,a,r);return this.queue(function(){var o,a=t(this),r=a.attr("class")||"",l=h.children?a.find("*").addBack():a;l=l.map(function(){var i=t(this);return{el:i,start:e(this)}}),o=function(){t.each(s,function(t,e){n[e]&&a[e+"Class"](n[e])})},o(),l=l.map(function(){return this.end=e(this.el[0]),this.diff=i(this.start,this.end),this}),a.attr("class",r),l=l.map(function(){var e=this,i=t.Deferred(),s=t.extend({},h,{queue:!1,complete:function(){i.resolve(e)}});return this.el.animate(this.diff,s),i.promise()}),t.when.apply(t,l.get()).done(function(){o(),t.each(arguments,function(){var e=this.el;t.each(this.diff,function(t){e.css(t,"")})}),h.complete.call(a[0])})})},t.fn.extend({addClass:function(e){return function(i,s,n,o){return s?t.effects.animateClass.call(this,{add:i},s,n,o):e.apply(this,arguments)}}(t.fn.addClass),removeClass:function(e){return function(i,s,n,o){return arguments.length>1?t.effects.animateClass.call(this,{remove:i},s,n,o):e.apply(this,arguments)}}(t.fn.removeClass),toggleClass:function(e){return function(i,s,n,o,a){return"boolean"==typeof s||void 0===s?n?t.effects.animateClass.call(this,s?{add:i}:{remove:i},n,o,a):e.apply(this,arguments):t.effects.animateClass.call(this,{toggle:i},s,n,o)}}(t.fn.toggleClass),switchClass:function(e,i,s,n,o){return t.effects.animateClass.call(this,{add:i,remove:e},s,n,o)}})}(),function(){function e(e,i,s,n){return t.isPlainObject(e)&&(i=e,e=e.effect),e={effect:e},null==i&&(i={}),t.isFunction(i)&&(n=i,s=null,i={}),("number"==typeof i||t.fx.speeds[i])&&(n=s,s=i,i={}),t.isFunction(s)&&(n=s,s=null),i&&t.extend(e,i),s=s||i.duration,e.duration=t.fx.off?0:"number"==typeof s?s:s in t.fx.speeds?t.fx.speeds[s]:t.fx.speeds._default,e.complete=n||i.complete,e}function i(e){return!e||"number"==typeof e||t.fx.speeds[e]?!0:"string"!=typeof e||t.effects.effect[e]?t.isFunction(e)?!0:"object"!=typeof e||e.effect?!1:!0:!0}function s(t,e){var i=e.outerWidth(),s=e.outerHeight(),n=/^rect\((-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto)\)$/,o=n.exec(t)||["",0,i,s,0];return{top:parseFloat(o[1])||0,right:"auto"===o[2]?i:parseFloat(o[2]),bottom:"auto"===o[3]?s:parseFloat(o[3]),left:parseFloat(o[4])||0}}t.expr&&t.expr.filters&&t.expr.filters.animated&&(t.expr.filters.animated=function(e){return function(i){return!!t(i).data(d)||e(i)}}(t.expr.filters.animated)),t.uiBackCompat!==!1&&t.extend(t.effects,{save:function(t,e){for(var i=0,s=e.length;s>i;i++)null!==e[i]&&t.data(c+e[i],t[0].style[e[i]])},restore:function(t,e){for(var i,s=0,n=e.length;n>s;s++)null!==e[s]&&(i=t.data(c+e[s]),t.css(e[s],i))},setMode:function(t,e){return"toggle"===e&&(e=t.is(":hidden")?"show":"hide"),e},createWrapper:function(e){if(e.parent().is(".ui-effects-wrapper"))return e.parent();var i={width:e.outerWidth(!0),height:e.outerHeight(!0),"float":e.css("float")},s=t("

").addClass("ui-effects-wrapper").css({fontSize:"100%",background:"transparent",border:"none",margin:0,padding:0}),n={width:e.width(),height:e.height()},o=document.activeElement;try{o.id}catch(a){o=document.body}return e.wrap(s),(e[0]===o||t.contains(e[0],o))&&t(o).trigger("focus"),s=e.parent(),"static"===e.css("position")?(s.css({position:"relative"}),e.css({position:"relative"})):(t.extend(i,{position:e.css("position"),zIndex:e.css("z-index")}),t.each(["top","left","bottom","right"],function(t,s){i[s]=e.css(s),isNaN(parseInt(i[s],10))&&(i[s]="auto")}),e.css({position:"relative",top:0,left:0,right:"auto",bottom:"auto"})),e.css(n),s.css(i).show()},removeWrapper:function(e){var i=document.activeElement;return e.parent().is(".ui-effects-wrapper")&&(e.parent().replaceWith(e),(e[0]===i||t.contains(e[0],i))&&t(i).trigger("focus")),e}}),t.extend(t.effects,{version:"1.12.1",define:function(e,i,s){return s||(s=i,i="effect"),t.effects.effect[e]=s,t.effects.effect[e].mode=i,s},scaledDimensions:function(t,e,i){if(0===e)return{height:0,width:0,outerHeight:0,outerWidth:0};var s="horizontal"!==i?(e||100)/100:1,n="vertical"!==i?(e||100)/100:1;return{height:t.height()*n,width:t.width()*s,outerHeight:t.outerHeight()*n,outerWidth:t.outerWidth()*s}},clipToBox:function(t){return{width:t.clip.right-t.clip.left,height:t.clip.bottom-t.clip.top,left:t.clip.left,top:t.clip.top}},unshift:function(t,e,i){var s=t.queue();e>1&&s.splice.apply(s,[1,0].concat(s.splice(e,i))),t.dequeue()},saveStyle:function(t){t.data(u,t[0].style.cssText)},restoreStyle:function(t){t[0].style.cssText=t.data(u)||"",t.removeData(u)},mode:function(t,e){var i=t.is(":hidden");return"toggle"===e&&(e=i?"show":"hide"),(i?"hide"===e:"show"===e)&&(e="none"),e},getBaseline:function(t,e){var i,s;switch(t[0]){case"top":i=0;break;case"middle":i=.5;break;case"bottom":i=1;break;default:i=t[0]/e.height}switch(t[1]){case"left":s=0;break;case"center":s=.5;break;case"right":s=1;break;default:s=t[1]/e.width}return{x:s,y:i}},createPlaceholder:function(e){var i,s=e.css("position"),n=e.position();return e.css({marginTop:e.css("marginTop"),marginBottom:e.css("marginBottom"),marginLeft:e.css("marginLeft"),marginRight:e.css("marginRight")}).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()),/^(static|relative)/.test(s)&&(s="absolute",i=t("<"+e[0].nodeName+">").insertAfter(e).css({display:/^(inline|ruby)/.test(e.css("display"))?"inline-block":"block",visibility:"hidden",marginTop:e.css("marginTop"),marginBottom:e.css("marginBottom"),marginLeft:e.css("marginLeft"),marginRight:e.css("marginRight"),"float":e.css("float")}).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()).addClass("ui-effects-placeholder"),e.data(c+"placeholder",i)),e.css({position:s,left:n.left,top:n.top}),i},removePlaceholder:function(t){var e=c+"placeholder",i=t.data(e);i&&(i.remove(),t.removeData(e))},cleanUp:function(e){t.effects.restoreStyle(e),t.effects.removePlaceholder(e)},setTransition:function(e,i,s,n){return n=n||{},t.each(i,function(t,i){var o=e.cssUnit(i);o[0]>0&&(n[i]=o[0]*s+o[1])}),n}}),t.fn.extend({effect:function(){function i(e){function i(){r.removeData(d),t.effects.cleanUp(r),"hide"===s.mode&&r.hide(),a()}function a(){t.isFunction(h)&&h.call(r[0]),t.isFunction(e)&&e()}var r=t(this);s.mode=c.shift(),t.uiBackCompat===!1||o?"none"===s.mode?(r[l](),a()):n.call(r[0],s,i):(r.is(":hidden")?"hide"===l:"show"===l)?(r[l](),a()):n.call(r[0],s,a)}var s=e.apply(this,arguments),n=t.effects.effect[s.effect],o=n.mode,a=s.queue,r=a||"fx",h=s.complete,l=s.mode,c=[],u=function(e){var i=t(this),s=t.effects.mode(i,l)||o;i.data(d,!0),c.push(s),o&&("show"===s||s===o&&"hide"===s)&&i.show(),o&&"none"===s||t.effects.saveStyle(i),t.isFunction(e)&&e()};return t.fx.off||!n?l?this[l](s.duration,h):this.each(function(){h&&h.call(this)}):a===!1?this.each(u).each(i):this.queue(r,u).queue(r,i)},show:function(t){return function(s){if(i(s))return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="show",this.effect.call(this,n) +}}(t.fn.show),hide:function(t){return function(s){if(i(s))return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="hide",this.effect.call(this,n)}}(t.fn.hide),toggle:function(t){return function(s){if(i(s)||"boolean"==typeof s)return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="toggle",this.effect.call(this,n)}}(t.fn.toggle),cssUnit:function(e){var i=this.css(e),s=[];return t.each(["em","px","%","pt"],function(t,e){i.indexOf(e)>0&&(s=[parseFloat(i),e])}),s},cssClip:function(t){return t?this.css("clip","rect("+t.top+"px "+t.right+"px "+t.bottom+"px "+t.left+"px)"):s(this.css("clip"),this)},transfer:function(e,i){var s=t(this),n=t(e.to),o="fixed"===n.css("position"),a=t("body"),r=o?a.scrollTop():0,h=o?a.scrollLeft():0,l=n.offset(),c={top:l.top-r,left:l.left-h,height:n.innerHeight(),width:n.innerWidth()},u=s.offset(),d=t("
").appendTo("body").addClass(e.className).css({top:u.top-r,left:u.left-h,height:s.innerHeight(),width:s.innerWidth(),position:o?"fixed":"absolute"}).animate(c,e.duration,e.easing,function(){d.remove(),t.isFunction(i)&&i()})}}),t.fx.step.clip=function(e){e.clipInit||(e.start=t(e.elem).cssClip(),"string"==typeof e.end&&(e.end=s(e.end,e.elem)),e.clipInit=!0),t(e.elem).cssClip({top:e.pos*(e.end.top-e.start.top)+e.start.top,right:e.pos*(e.end.right-e.start.right)+e.start.right,bottom:e.pos*(e.end.bottom-e.start.bottom)+e.start.bottom,left:e.pos*(e.end.left-e.start.left)+e.start.left})}}(),function(){var e={};t.each(["Quad","Cubic","Quart","Quint","Expo"],function(t,i){e[i]=function(e){return Math.pow(e,t+2)}}),t.extend(e,{Sine:function(t){return 1-Math.cos(t*Math.PI/2)},Circ:function(t){return 1-Math.sqrt(1-t*t)},Elastic:function(t){return 0===t||1===t?t:-Math.pow(2,8*(t-1))*Math.sin((80*(t-1)-7.5)*Math.PI/15)},Back:function(t){return t*t*(3*t-2)},Bounce:function(t){for(var e,i=4;((e=Math.pow(2,--i))-1)/11>t;);return 1/Math.pow(4,3-i)-7.5625*Math.pow((3*e-2)/22-t,2)}}),t.each(e,function(e,i){t.easing["easeIn"+e]=i,t.easing["easeOut"+e]=function(t){return 1-i(1-t)},t.easing["easeInOut"+e]=function(t){return.5>t?i(2*t)/2:1-i(-2*t+2)/2}})}();var f=t.effects;t.effects.define("blind","hide",function(e,i){var s={up:["bottom","top"],vertical:["bottom","top"],down:["top","bottom"],left:["right","left"],horizontal:["right","left"],right:["left","right"]},n=t(this),o=e.direction||"up",a=n.cssClip(),r={clip:t.extend({},a)},h=t.effects.createPlaceholder(n);r.clip[s[o][0]]=r.clip[s[o][1]],"show"===e.mode&&(n.cssClip(r.clip),h&&h.css(t.effects.clipToBox(r)),r.clip=a),h&&h.animate(t.effects.clipToBox(r),e.duration,e.easing),n.animate(r,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("bounce",function(e,i){var s,n,o,a=t(this),r=e.mode,h="hide"===r,l="show"===r,c=e.direction||"up",u=e.distance,d=e.times||5,p=2*d+(l||h?1:0),f=e.duration/p,g=e.easing,m="up"===c||"down"===c?"top":"left",_="up"===c||"left"===c,v=0,b=a.queue().length;for(t.effects.createPlaceholder(a),o=a.css(m),u||(u=a["top"===m?"outerHeight":"outerWidth"]()/3),l&&(n={opacity:1},n[m]=o,a.css("opacity",0).css(m,_?2*-u:2*u).animate(n,f,g)),h&&(u/=Math.pow(2,d-1)),n={},n[m]=o;d>v;v++)s={},s[m]=(_?"-=":"+=")+u,a.animate(s,f,g).animate(n,f,g),u=h?2*u:u/2;h&&(s={opacity:0},s[m]=(_?"-=":"+=")+u,a.animate(s,f,g)),a.queue(i),t.effects.unshift(a,b,p+1)}),t.effects.define("clip","hide",function(e,i){var s,n={},o=t(this),a=e.direction||"vertical",r="both"===a,h=r||"horizontal"===a,l=r||"vertical"===a;s=o.cssClip(),n.clip={top:l?(s.bottom-s.top)/2:s.top,right:h?(s.right-s.left)/2:s.right,bottom:l?(s.bottom-s.top)/2:s.bottom,left:h?(s.right-s.left)/2:s.left},t.effects.createPlaceholder(o),"show"===e.mode&&(o.cssClip(n.clip),n.clip=s),o.animate(n,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("drop","hide",function(e,i){var s,n=t(this),o=e.mode,a="show"===o,r=e.direction||"left",h="up"===r||"down"===r?"top":"left",l="up"===r||"left"===r?"-=":"+=",c="+="===l?"-=":"+=",u={opacity:0};t.effects.createPlaceholder(n),s=e.distance||n["top"===h?"outerHeight":"outerWidth"](!0)/2,u[h]=l+s,a&&(n.css(u),u[h]=c+s,u.opacity=1),n.animate(u,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("explode","hide",function(e,i){function s(){b.push(this),b.length===u*d&&n()}function n(){p.css({visibility:"visible"}),t(b).remove(),i()}var o,a,r,h,l,c,u=e.pieces?Math.round(Math.sqrt(e.pieces)):3,d=u,p=t(this),f=e.mode,g="show"===f,m=p.show().css("visibility","hidden").offset(),_=Math.ceil(p.outerWidth()/d),v=Math.ceil(p.outerHeight()/u),b=[];for(o=0;u>o;o++)for(h=m.top+o*v,c=o-(u-1)/2,a=0;d>a;a++)r=m.left+a*_,l=a-(d-1)/2,p.clone().appendTo("body").wrap("
").css({position:"absolute",visibility:"visible",left:-a*_,top:-o*v}).parent().addClass("ui-effects-explode").css({position:"absolute",overflow:"hidden",width:_,height:v,left:r+(g?l*_:0),top:h+(g?c*v:0),opacity:g?0:1}).animate({left:r+(g?0:l*_),top:h+(g?0:c*v),opacity:g?1:0},e.duration||500,e.easing,s)}),t.effects.define("fade","toggle",function(e,i){var s="show"===e.mode;t(this).css("opacity",s?0:1).animate({opacity:s?1:0},{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("fold","hide",function(e,i){var s=t(this),n=e.mode,o="show"===n,a="hide"===n,r=e.size||15,h=/([0-9]+)%/.exec(r),l=!!e.horizFirst,c=l?["right","bottom"]:["bottom","right"],u=e.duration/2,d=t.effects.createPlaceholder(s),p=s.cssClip(),f={clip:t.extend({},p)},g={clip:t.extend({},p)},m=[p[c[0]],p[c[1]]],_=s.queue().length;h&&(r=parseInt(h[1],10)/100*m[a?0:1]),f.clip[c[0]]=r,g.clip[c[0]]=r,g.clip[c[1]]=0,o&&(s.cssClip(g.clip),d&&d.css(t.effects.clipToBox(g)),g.clip=p),s.queue(function(i){d&&d.animate(t.effects.clipToBox(f),u,e.easing).animate(t.effects.clipToBox(g),u,e.easing),i()}).animate(f,u,e.easing).animate(g,u,e.easing).queue(i),t.effects.unshift(s,_,4)}),t.effects.define("highlight","show",function(e,i){var s=t(this),n={backgroundColor:s.css("backgroundColor")};"hide"===e.mode&&(n.opacity=0),t.effects.saveStyle(s),s.css({backgroundImage:"none",backgroundColor:e.color||"#ffff99"}).animate(n,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("size",function(e,i){var s,n,o,a=t(this),r=["fontSize"],h=["borderTopWidth","borderBottomWidth","paddingTop","paddingBottom"],l=["borderLeftWidth","borderRightWidth","paddingLeft","paddingRight"],c=e.mode,u="effect"!==c,d=e.scale||"both",p=e.origin||["middle","center"],f=a.css("position"),g=a.position(),m=t.effects.scaledDimensions(a),_=e.from||m,v=e.to||t.effects.scaledDimensions(a,0);t.effects.createPlaceholder(a),"show"===c&&(o=_,_=v,v=o),n={from:{y:_.height/m.height,x:_.width/m.width},to:{y:v.height/m.height,x:v.width/m.width}},("box"===d||"both"===d)&&(n.from.y!==n.to.y&&(_=t.effects.setTransition(a,h,n.from.y,_),v=t.effects.setTransition(a,h,n.to.y,v)),n.from.x!==n.to.x&&(_=t.effects.setTransition(a,l,n.from.x,_),v=t.effects.setTransition(a,l,n.to.x,v))),("content"===d||"both"===d)&&n.from.y!==n.to.y&&(_=t.effects.setTransition(a,r,n.from.y,_),v=t.effects.setTransition(a,r,n.to.y,v)),p&&(s=t.effects.getBaseline(p,m),_.top=(m.outerHeight-_.outerHeight)*s.y+g.top,_.left=(m.outerWidth-_.outerWidth)*s.x+g.left,v.top=(m.outerHeight-v.outerHeight)*s.y+g.top,v.left=(m.outerWidth-v.outerWidth)*s.x+g.left),a.css(_),("content"===d||"both"===d)&&(h=h.concat(["marginTop","marginBottom"]).concat(r),l=l.concat(["marginLeft","marginRight"]),a.find("*[width]").each(function(){var i=t(this),s=t.effects.scaledDimensions(i),o={height:s.height*n.from.y,width:s.width*n.from.x,outerHeight:s.outerHeight*n.from.y,outerWidth:s.outerWidth*n.from.x},a={height:s.height*n.to.y,width:s.width*n.to.x,outerHeight:s.height*n.to.y,outerWidth:s.width*n.to.x};n.from.y!==n.to.y&&(o=t.effects.setTransition(i,h,n.from.y,o),a=t.effects.setTransition(i,h,n.to.y,a)),n.from.x!==n.to.x&&(o=t.effects.setTransition(i,l,n.from.x,o),a=t.effects.setTransition(i,l,n.to.x,a)),u&&t.effects.saveStyle(i),i.css(o),i.animate(a,e.duration,e.easing,function(){u&&t.effects.restoreStyle(i)})})),a.animate(v,{queue:!1,duration:e.duration,easing:e.easing,complete:function(){var e=a.offset();0===v.opacity&&a.css("opacity",_.opacity),u||(a.css("position","static"===f?"relative":f).offset(e),t.effects.saveStyle(a)),i()}})}),t.effects.define("scale",function(e,i){var s=t(this),n=e.mode,o=parseInt(e.percent,10)||(0===parseInt(e.percent,10)?0:"effect"!==n?0:100),a=t.extend(!0,{from:t.effects.scaledDimensions(s),to:t.effects.scaledDimensions(s,o,e.direction||"both"),origin:e.origin||["middle","center"]},e);e.fade&&(a.from.opacity=1,a.to.opacity=0),t.effects.effect.size.call(this,a,i)}),t.effects.define("puff","hide",function(e,i){var s=t.extend(!0,{},e,{fade:!0,percent:parseInt(e.percent,10)||150});t.effects.effect.scale.call(this,s,i)}),t.effects.define("pulsate","show",function(e,i){var s=t(this),n=e.mode,o="show"===n,a="hide"===n,r=o||a,h=2*(e.times||5)+(r?1:0),l=e.duration/h,c=0,u=1,d=s.queue().length;for((o||!s.is(":visible"))&&(s.css("opacity",0).show(),c=1);h>u;u++)s.animate({opacity:c},l,e.easing),c=1-c;s.animate({opacity:c},l,e.easing),s.queue(i),t.effects.unshift(s,d,h+1)}),t.effects.define("shake",function(e,i){var s=1,n=t(this),o=e.direction||"left",a=e.distance||20,r=e.times||3,h=2*r+1,l=Math.round(e.duration/h),c="up"===o||"down"===o?"top":"left",u="up"===o||"left"===o,d={},p={},f={},g=n.queue().length;for(t.effects.createPlaceholder(n),d[c]=(u?"-=":"+=")+a,p[c]=(u?"+=":"-=")+2*a,f[c]=(u?"-=":"+=")+2*a,n.animate(d,l,e.easing);r>s;s++)n.animate(p,l,e.easing).animate(f,l,e.easing);n.animate(p,l,e.easing).animate(d,l/2,e.easing).queue(i),t.effects.unshift(n,g,h+1)}),t.effects.define("slide","show",function(e,i){var s,n,o=t(this),a={up:["bottom","top"],down:["top","bottom"],left:["right","left"],right:["left","right"]},r=e.mode,h=e.direction||"left",l="up"===h||"down"===h?"top":"left",c="up"===h||"left"===h,u=e.distance||o["top"===l?"outerHeight":"outerWidth"](!0),d={};t.effects.createPlaceholder(o),s=o.cssClip(),n=o.position()[l],d[l]=(c?-1:1)*u+n,d.clip=o.cssClip(),d.clip[a[h][1]]=d.clip[a[h][0]],"show"===r&&(o.cssClip(d.clip),o.css(l,d[l]),d.clip=s,d[l]=n),o.animate(d,{queue:!1,duration:e.duration,easing:e.easing,complete:i})});var f;t.uiBackCompat!==!1&&(f=t.effects.define("transfer",function(e,i){t(this).transfer(e,i)})),t.ui.focusable=function(i,s){var n,o,a,r,h,l=i.nodeName.toLowerCase();return"area"===l?(n=i.parentNode,o=n.name,i.href&&o&&"map"===n.nodeName.toLowerCase()?(a=t("img[usemap='#"+o+"']"),a.length>0&&a.is(":visible")):!1):(/^(input|select|textarea|button|object)$/.test(l)?(r=!i.disabled,r&&(h=t(i).closest("fieldset")[0],h&&(r=!h.disabled))):r="a"===l?i.href||s:s,r&&t(i).is(":visible")&&e(t(i)))},t.extend(t.expr[":"],{focusable:function(e){return t.ui.focusable(e,null!=t.attr(e,"tabindex"))}}),t.ui.focusable,t.fn.form=function(){return"string"==typeof this[0].form?this.closest("form"):t(this[0].form)},t.ui.formResetMixin={_formResetHandler:function(){var e=t(this);setTimeout(function(){var i=e.data("ui-form-reset-instances");t.each(i,function(){this.refresh()})})},_bindFormResetHandler:function(){if(this.form=this.element.form(),this.form.length){var t=this.form.data("ui-form-reset-instances")||[];t.length||this.form.on("reset.ui-form-reset",this._formResetHandler),t.push(this),this.form.data("ui-form-reset-instances",t)}},_unbindFormResetHandler:function(){if(this.form.length){var e=this.form.data("ui-form-reset-instances");e.splice(t.inArray(this,e),1),e.length?this.form.data("ui-form-reset-instances",e):this.form.removeData("ui-form-reset-instances").off("reset.ui-form-reset")}}},"1.7"===t.fn.jquery.substring(0,3)&&(t.each(["Width","Height"],function(e,i){function s(e,i,s,o){return t.each(n,function(){i-=parseFloat(t.css(e,"padding"+this))||0,s&&(i-=parseFloat(t.css(e,"border"+this+"Width"))||0),o&&(i-=parseFloat(t.css(e,"margin"+this))||0)}),i}var n="Width"===i?["Left","Right"]:["Top","Bottom"],o=i.toLowerCase(),a={innerWidth:t.fn.innerWidth,innerHeight:t.fn.innerHeight,outerWidth:t.fn.outerWidth,outerHeight:t.fn.outerHeight};t.fn["inner"+i]=function(e){return void 0===e?a["inner"+i].call(this):this.each(function(){t(this).css(o,s(this,e)+"px")})},t.fn["outer"+i]=function(e,n){return"number"!=typeof e?a["outer"+i].call(this,e):this.each(function(){t(this).css(o,s(this,e,!0,n)+"px")})}}),t.fn.addBack=function(t){return this.add(null==t?this.prevObject:this.prevObject.filter(t))}),t.ui.keyCode={BACKSPACE:8,COMMA:188,DELETE:46,DOWN:40,END:35,ENTER:13,ESCAPE:27,HOME:36,LEFT:37,PAGE_DOWN:34,PAGE_UP:33,PERIOD:190,RIGHT:39,SPACE:32,TAB:9,UP:38},t.ui.escapeSelector=function(){var t=/([!"#$%&'()*+,.\/:;<=>?@[\]^`{|}~])/g;return function(e){return e.replace(t,"\\$1")}}(),t.fn.labels=function(){var e,i,s,n,o;return this[0].labels&&this[0].labels.length?this.pushStack(this[0].labels):(n=this.eq(0).parents("label"),s=this.attr("id"),s&&(e=this.eq(0).parents().last(),o=e.add(e.length?e.siblings():this.siblings()),i="label[for='"+t.ui.escapeSelector(s)+"']",n=n.add(o.find(i).addBack(i))),this.pushStack(n))},t.fn.scrollParent=function(e){var i=this.css("position"),s="absolute"===i,n=e?/(auto|scroll|hidden)/:/(auto|scroll)/,o=this.parents().filter(function(){var e=t(this);return s&&"static"===e.css("position")?!1:n.test(e.css("overflow")+e.css("overflow-y")+e.css("overflow-x"))}).eq(0);return"fixed"!==i&&o.length?o:t(this[0].ownerDocument||document)},t.extend(t.expr[":"],{tabbable:function(e){var i=t.attr(e,"tabindex"),s=null!=i;return(!s||i>=0)&&t.ui.focusable(e,s)}}),t.fn.extend({uniqueId:function(){var t=0;return function(){return this.each(function(){this.id||(this.id="ui-id-"+ ++t)})}}(),removeUniqueId:function(){return this.each(function(){/^ui-id-\d+$/.test(this.id)&&t(this).removeAttr("id")})}}),t.widget("ui.accordion",{version:"1.12.1",options:{active:0,animate:{},classes:{"ui-accordion-header":"ui-corner-top","ui-accordion-header-collapsed":"ui-corner-all","ui-accordion-content":"ui-corner-bottom"},collapsible:!1,event:"click",header:"> li > :first-child, > :not(li):even",heightStyle:"auto",icons:{activeHeader:"ui-icon-triangle-1-s",header:"ui-icon-triangle-1-e"},activate:null,beforeActivate:null},hideProps:{borderTopWidth:"hide",borderBottomWidth:"hide",paddingTop:"hide",paddingBottom:"hide",height:"hide"},showProps:{borderTopWidth:"show",borderBottomWidth:"show",paddingTop:"show",paddingBottom:"show",height:"show"},_create:function(){var e=this.options;this.prevShow=this.prevHide=t(),this._addClass("ui-accordion","ui-widget ui-helper-reset"),this.element.attr("role","tablist"),e.collapsible||e.active!==!1&&null!=e.active||(e.active=0),this._processPanels(),0>e.active&&(e.active+=this.headers.length),this._refresh()},_getCreateEventData:function(){return{header:this.active,panel:this.active.length?this.active.next():t()}},_createIcons:function(){var e,i,s=this.options.icons;s&&(e=t(""),this._addClass(e,"ui-accordion-header-icon","ui-icon "+s.header),e.prependTo(this.headers),i=this.active.children(".ui-accordion-header-icon"),this._removeClass(i,s.header)._addClass(i,null,s.activeHeader)._addClass(this.headers,"ui-accordion-icons"))},_destroyIcons:function(){this._removeClass(this.headers,"ui-accordion-icons"),this.headers.children(".ui-accordion-header-icon").remove()},_destroy:function(){var t;this.element.removeAttr("role"),this.headers.removeAttr("role aria-expanded aria-selected aria-controls tabIndex").removeUniqueId(),this._destroyIcons(),t=this.headers.next().css("display","").removeAttr("role aria-hidden aria-labelledby").removeUniqueId(),"content"!==this.options.heightStyle&&t.css("height","")},_setOption:function(t,e){return"active"===t?(this._activate(e),void 0):("event"===t&&(this.options.event&&this._off(this.headers,this.options.event),this._setupEvents(e)),this._super(t,e),"collapsible"!==t||e||this.options.active!==!1||this._activate(0),"icons"===t&&(this._destroyIcons(),e&&this._createIcons()),void 0)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t),this._toggleClass(null,"ui-state-disabled",!!t),this._toggleClass(this.headers.add(this.headers.next()),null,"ui-state-disabled",!!t)},_keydown:function(e){if(!e.altKey&&!e.ctrlKey){var i=t.ui.keyCode,s=this.headers.length,n=this.headers.index(e.target),o=!1;switch(e.keyCode){case i.RIGHT:case i.DOWN:o=this.headers[(n+1)%s];break;case i.LEFT:case i.UP:o=this.headers[(n-1+s)%s];break;case i.SPACE:case i.ENTER:this._eventHandler(e);break;case i.HOME:o=this.headers[0];break;case i.END:o=this.headers[s-1]}o&&(t(e.target).attr("tabIndex",-1),t(o).attr("tabIndex",0),t(o).trigger("focus"),e.preventDefault())}},_panelKeyDown:function(e){e.keyCode===t.ui.keyCode.UP&&e.ctrlKey&&t(e.currentTarget).prev().trigger("focus")},refresh:function(){var e=this.options;this._processPanels(),e.active===!1&&e.collapsible===!0||!this.headers.length?(e.active=!1,this.active=t()):e.active===!1?this._activate(0):this.active.length&&!t.contains(this.element[0],this.active[0])?this.headers.length===this.headers.find(".ui-state-disabled").length?(e.active=!1,this.active=t()):this._activate(Math.max(0,e.active-1)):e.active=this.headers.index(this.active),this._destroyIcons(),this._refresh()},_processPanels:function(){var t=this.headers,e=this.panels;this.headers=this.element.find(this.options.header),this._addClass(this.headers,"ui-accordion-header ui-accordion-header-collapsed","ui-state-default"),this.panels=this.headers.next().filter(":not(.ui-accordion-content-active)").hide(),this._addClass(this.panels,"ui-accordion-content","ui-helper-reset ui-widget-content"),e&&(this._off(t.not(this.headers)),this._off(e.not(this.panels)))},_refresh:function(){var e,i=this.options,s=i.heightStyle,n=this.element.parent();this.active=this._findActive(i.active),this._addClass(this.active,"ui-accordion-header-active","ui-state-active")._removeClass(this.active,"ui-accordion-header-collapsed"),this._addClass(this.active.next(),"ui-accordion-content-active"),this.active.next().show(),this.headers.attr("role","tab").each(function(){var e=t(this),i=e.uniqueId().attr("id"),s=e.next(),n=s.uniqueId().attr("id");e.attr("aria-controls",n),s.attr("aria-labelledby",i)}).next().attr("role","tabpanel"),this.headers.not(this.active).attr({"aria-selected":"false","aria-expanded":"false",tabIndex:-1}).next().attr({"aria-hidden":"true"}).hide(),this.active.length?this.active.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0}).next().attr({"aria-hidden":"false"}):this.headers.eq(0).attr("tabIndex",0),this._createIcons(),this._setupEvents(i.event),"fill"===s?(e=n.height(),this.element.siblings(":visible").each(function(){var i=t(this),s=i.css("position");"absolute"!==s&&"fixed"!==s&&(e-=i.outerHeight(!0))}),this.headers.each(function(){e-=t(this).outerHeight(!0)}),this.headers.next().each(function(){t(this).height(Math.max(0,e-t(this).innerHeight()+t(this).height()))}).css("overflow","auto")):"auto"===s&&(e=0,this.headers.next().each(function(){var i=t(this).is(":visible");i||t(this).show(),e=Math.max(e,t(this).css("height","").height()),i||t(this).hide()}).height(e))},_activate:function(e){var i=this._findActive(e)[0];i!==this.active[0]&&(i=i||this.active[0],this._eventHandler({target:i,currentTarget:i,preventDefault:t.noop}))},_findActive:function(e){return"number"==typeof e?this.headers.eq(e):t()},_setupEvents:function(e){var i={keydown:"_keydown"};e&&t.each(e.split(" "),function(t,e){i[e]="_eventHandler"}),this._off(this.headers.add(this.headers.next())),this._on(this.headers,i),this._on(this.headers.next(),{keydown:"_panelKeyDown"}),this._hoverable(this.headers),this._focusable(this.headers)},_eventHandler:function(e){var i,s,n=this.options,o=this.active,a=t(e.currentTarget),r=a[0]===o[0],h=r&&n.collapsible,l=h?t():a.next(),c=o.next(),u={oldHeader:o,oldPanel:c,newHeader:h?t():a,newPanel:l};e.preventDefault(),r&&!n.collapsible||this._trigger("beforeActivate",e,u)===!1||(n.active=h?!1:this.headers.index(a),this.active=r?t():a,this._toggle(u),this._removeClass(o,"ui-accordion-header-active","ui-state-active"),n.icons&&(i=o.children(".ui-accordion-header-icon"),this._removeClass(i,null,n.icons.activeHeader)._addClass(i,null,n.icons.header)),r||(this._removeClass(a,"ui-accordion-header-collapsed")._addClass(a,"ui-accordion-header-active","ui-state-active"),n.icons&&(s=a.children(".ui-accordion-header-icon"),this._removeClass(s,null,n.icons.header)._addClass(s,null,n.icons.activeHeader)),this._addClass(a.next(),"ui-accordion-content-active")))},_toggle:function(e){var i=e.newPanel,s=this.prevShow.length?this.prevShow:e.oldPanel;this.prevShow.add(this.prevHide).stop(!0,!0),this.prevShow=i,this.prevHide=s,this.options.animate?this._animate(i,s,e):(s.hide(),i.show(),this._toggleComplete(e)),s.attr({"aria-hidden":"true"}),s.prev().attr({"aria-selected":"false","aria-expanded":"false"}),i.length&&s.length?s.prev().attr({tabIndex:-1,"aria-expanded":"false"}):i.length&&this.headers.filter(function(){return 0===parseInt(t(this).attr("tabIndex"),10)}).attr("tabIndex",-1),i.attr("aria-hidden","false").prev().attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0})},_animate:function(t,e,i){var s,n,o,a=this,r=0,h=t.css("box-sizing"),l=t.length&&(!e.length||t.index()",delay:300,options:{icons:{submenu:"ui-icon-caret-1-e"},items:"> *",menus:"ul",position:{my:"left top",at:"right top"},role:"menu",blur:null,focus:null,select:null},_create:function(){this.activeMenu=this.element,this.mouseHandled=!1,this.element.uniqueId().attr({role:this.options.role,tabIndex:0}),this._addClass("ui-menu","ui-widget ui-widget-content"),this._on({"mousedown .ui-menu-item":function(t){t.preventDefault()},"click .ui-menu-item":function(e){var i=t(e.target),s=t(t.ui.safeActiveElement(this.document[0]));!this.mouseHandled&&i.not(".ui-state-disabled").length&&(this.select(e),e.isPropagationStopped()||(this.mouseHandled=!0),i.has(".ui-menu").length?this.expand(e):!this.element.is(":focus")&&s.closest(".ui-menu").length&&(this.element.trigger("focus",[!0]),this.active&&1===this.active.parents(".ui-menu").length&&clearTimeout(this.timer)))},"mouseenter .ui-menu-item":function(e){if(!this.previousFilter){var i=t(e.target).closest(".ui-menu-item"),s=t(e.currentTarget);i[0]===s[0]&&(this._removeClass(s.siblings().children(".ui-state-active"),null,"ui-state-active"),this.focus(e,s))}},mouseleave:"collapseAll","mouseleave .ui-menu":"collapseAll",focus:function(t,e){var i=this.active||this.element.find(this.options.items).eq(0);e||this.focus(t,i)},blur:function(e){this._delay(function(){var i=!t.contains(this.element[0],t.ui.safeActiveElement(this.document[0]));i&&this.collapseAll(e)})},keydown:"_keydown"}),this.refresh(),this._on(this.document,{click:function(t){this._closeOnDocumentClick(t)&&this.collapseAll(t),this.mouseHandled=!1}})},_destroy:function(){var e=this.element.find(".ui-menu-item").removeAttr("role aria-disabled"),i=e.children(".ui-menu-item-wrapper").removeUniqueId().removeAttr("tabIndex role aria-haspopup");this.element.removeAttr("aria-activedescendant").find(".ui-menu").addBack().removeAttr("role aria-labelledby aria-expanded aria-hidden aria-disabled tabIndex").removeUniqueId().show(),i.children().each(function(){var e=t(this);e.data("ui-menu-submenu-caret")&&e.remove()})},_keydown:function(e){var i,s,n,o,a=!0;switch(e.keyCode){case t.ui.keyCode.PAGE_UP:this.previousPage(e);break;case t.ui.keyCode.PAGE_DOWN:this.nextPage(e);break;case t.ui.keyCode.HOME:this._move("first","first",e);break;case t.ui.keyCode.END:this._move("last","last",e);break;case t.ui.keyCode.UP:this.previous(e);break;case t.ui.keyCode.DOWN:this.next(e);break;case t.ui.keyCode.LEFT:this.collapse(e);break;case t.ui.keyCode.RIGHT:this.active&&!this.active.is(".ui-state-disabled")&&this.expand(e);break;case t.ui.keyCode.ENTER:case t.ui.keyCode.SPACE:this._activate(e);break;case t.ui.keyCode.ESCAPE:this.collapse(e);break;default:a=!1,s=this.previousFilter||"",o=!1,n=e.keyCode>=96&&105>=e.keyCode?""+(e.keyCode-96):String.fromCharCode(e.keyCode),clearTimeout(this.filterTimer),n===s?o=!0:n=s+n,i=this._filterMenuItems(n),i=o&&-1!==i.index(this.active.next())?this.active.nextAll(".ui-menu-item"):i,i.length||(n=String.fromCharCode(e.keyCode),i=this._filterMenuItems(n)),i.length?(this.focus(e,i),this.previousFilter=n,this.filterTimer=this._delay(function(){delete this.previousFilter},1e3)):delete this.previousFilter}a&&e.preventDefault()},_activate:function(t){this.active&&!this.active.is(".ui-state-disabled")&&(this.active.children("[aria-haspopup='true']").length?this.expand(t):this.select(t))},refresh:function(){var e,i,s,n,o,a=this,r=this.options.icons.submenu,h=this.element.find(this.options.menus);this._toggleClass("ui-menu-icons",null,!!this.element.find(".ui-icon").length),s=h.filter(":not(.ui-menu)").hide().attr({role:this.options.role,"aria-hidden":"true","aria-expanded":"false"}).each(function(){var e=t(this),i=e.prev(),s=t("").data("ui-menu-submenu-caret",!0);a._addClass(s,"ui-menu-icon","ui-icon "+r),i.attr("aria-haspopup","true").prepend(s),e.attr("aria-labelledby",i.attr("id"))}),this._addClass(s,"ui-menu","ui-widget ui-widget-content ui-front"),e=h.add(this.element),i=e.find(this.options.items),i.not(".ui-menu-item").each(function(){var e=t(this);a._isDivider(e)&&a._addClass(e,"ui-menu-divider","ui-widget-content")}),n=i.not(".ui-menu-item, .ui-menu-divider"),o=n.children().not(".ui-menu").uniqueId().attr({tabIndex:-1,role:this._itemRole()}),this._addClass(n,"ui-menu-item")._addClass(o,"ui-menu-item-wrapper"),i.filter(".ui-state-disabled").attr("aria-disabled","true"),this.active&&!t.contains(this.element[0],this.active[0])&&this.blur()},_itemRole:function(){return{menu:"menuitem",listbox:"option"}[this.options.role]},_setOption:function(t,e){if("icons"===t){var i=this.element.find(".ui-menu-icon");this._removeClass(i,null,this.options.icons.submenu)._addClass(i,null,e.submenu)}this._super(t,e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t+""),this._toggleClass(null,"ui-state-disabled",!!t)},focus:function(t,e){var i,s,n;this.blur(t,t&&"focus"===t.type),this._scrollIntoView(e),this.active=e.first(),s=this.active.children(".ui-menu-item-wrapper"),this._addClass(s,null,"ui-state-active"),this.options.role&&this.element.attr("aria-activedescendant",s.attr("id")),n=this.active.parent().closest(".ui-menu-item").children(".ui-menu-item-wrapper"),this._addClass(n,null,"ui-state-active"),t&&"keydown"===t.type?this._close():this.timer=this._delay(function(){this._close()},this.delay),i=e.children(".ui-menu"),i.length&&t&&/^mouse/.test(t.type)&&this._startOpening(i),this.activeMenu=e.parent(),this._trigger("focus",t,{item:e})},_scrollIntoView:function(e){var i,s,n,o,a,r;this._hasScroll()&&(i=parseFloat(t.css(this.activeMenu[0],"borderTopWidth"))||0,s=parseFloat(t.css(this.activeMenu[0],"paddingTop"))||0,n=e.offset().top-this.activeMenu.offset().top-i-s,o=this.activeMenu.scrollTop(),a=this.activeMenu.height(),r=e.outerHeight(),0>n?this.activeMenu.scrollTop(o+n):n+r>a&&this.activeMenu.scrollTop(o+n-a+r))},blur:function(t,e){e||clearTimeout(this.timer),this.active&&(this._removeClass(this.active.children(".ui-menu-item-wrapper"),null,"ui-state-active"),this._trigger("blur",t,{item:this.active}),this.active=null)},_startOpening:function(t){clearTimeout(this.timer),"true"===t.attr("aria-hidden")&&(this.timer=this._delay(function(){this._close(),this._open(t)},this.delay))},_open:function(e){var i=t.extend({of:this.active},this.options.position);clearTimeout(this.timer),this.element.find(".ui-menu").not(e.parents(".ui-menu")).hide().attr("aria-hidden","true"),e.show().removeAttr("aria-hidden").attr("aria-expanded","true").position(i)},collapseAll:function(e,i){clearTimeout(this.timer),this.timer=this._delay(function(){var s=i?this.element:t(e&&e.target).closest(this.element.find(".ui-menu"));s.length||(s=this.element),this._close(s),this.blur(e),this._removeClass(s.find(".ui-state-active"),null,"ui-state-active"),this.activeMenu=s},this.delay)},_close:function(t){t||(t=this.active?this.active.parent():this.element),t.find(".ui-menu").hide().attr("aria-hidden","true").attr("aria-expanded","false")},_closeOnDocumentClick:function(e){return!t(e.target).closest(".ui-menu").length},_isDivider:function(t){return!/[^\-\u2014\u2013\s]/.test(t.text())},collapse:function(t){var e=this.active&&this.active.parent().closest(".ui-menu-item",this.element);e&&e.length&&(this._close(),this.focus(t,e))},expand:function(t){var e=this.active&&this.active.children(".ui-menu ").find(this.options.items).first();e&&e.length&&(this._open(e.parent()),this._delay(function(){this.focus(t,e)}))},next:function(t){this._move("next","first",t)},previous:function(t){this._move("prev","last",t)},isFirstItem:function(){return this.active&&!this.active.prevAll(".ui-menu-item").length},isLastItem:function(){return this.active&&!this.active.nextAll(".ui-menu-item").length},_move:function(t,e,i){var s;this.active&&(s="first"===t||"last"===t?this.active["first"===t?"prevAll":"nextAll"](".ui-menu-item").eq(-1):this.active[t+"All"](".ui-menu-item").eq(0)),s&&s.length&&this.active||(s=this.activeMenu.find(this.options.items)[e]()),this.focus(i,s)},nextPage:function(e){var i,s,n;return this.active?(this.isLastItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.nextAll(".ui-menu-item").each(function(){return i=t(this),0>i.offset().top-s-n}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items)[this.active?"last":"first"]())),void 0):(this.next(e),void 0)},previousPage:function(e){var i,s,n;return this.active?(this.isFirstItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.prevAll(".ui-menu-item").each(function(){return i=t(this),i.offset().top-s+n>0}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items).first())),void 0):(this.next(e),void 0)},_hasScroll:function(){return this.element.outerHeight()",options:{appendTo:null,autoFocus:!1,delay:300,minLength:1,position:{my:"left top",at:"left bottom",collision:"none"},source:null,change:null,close:null,focus:null,open:null,response:null,search:null,select:null},requestIndex:0,pending:0,_create:function(){var e,i,s,n=this.element[0].nodeName.toLowerCase(),o="textarea"===n,a="input"===n; +this.isMultiLine=o||!a&&this._isContentEditable(this.element),this.valueMethod=this.element[o||a?"val":"text"],this.isNewMenu=!0,this._addClass("ui-autocomplete-input"),this.element.attr("autocomplete","off"),this._on(this.element,{keydown:function(n){if(this.element.prop("readOnly"))return e=!0,s=!0,i=!0,void 0;e=!1,s=!1,i=!1;var o=t.ui.keyCode;switch(n.keyCode){case o.PAGE_UP:e=!0,this._move("previousPage",n);break;case o.PAGE_DOWN:e=!0,this._move("nextPage",n);break;case o.UP:e=!0,this._keyEvent("previous",n);break;case o.DOWN:e=!0,this._keyEvent("next",n);break;case o.ENTER:this.menu.active&&(e=!0,n.preventDefault(),this.menu.select(n));break;case o.TAB:this.menu.active&&this.menu.select(n);break;case o.ESCAPE:this.menu.element.is(":visible")&&(this.isMultiLine||this._value(this.term),this.close(n),n.preventDefault());break;default:i=!0,this._searchTimeout(n)}},keypress:function(s){if(e)return e=!1,(!this.isMultiLine||this.menu.element.is(":visible"))&&s.preventDefault(),void 0;if(!i){var n=t.ui.keyCode;switch(s.keyCode){case n.PAGE_UP:this._move("previousPage",s);break;case n.PAGE_DOWN:this._move("nextPage",s);break;case n.UP:this._keyEvent("previous",s);break;case n.DOWN:this._keyEvent("next",s)}}},input:function(t){return s?(s=!1,t.preventDefault(),void 0):(this._searchTimeout(t),void 0)},focus:function(){this.selectedItem=null,this.previous=this._value()},blur:function(t){return this.cancelBlur?(delete this.cancelBlur,void 0):(clearTimeout(this.searching),this.close(t),this._change(t),void 0)}}),this._initSource(),this.menu=t("
    ").appendTo(this._appendTo()).menu({role:null}).hide().menu("instance"),this._addClass(this.menu.element,"ui-autocomplete","ui-front"),this._on(this.menu.element,{mousedown:function(e){e.preventDefault(),this.cancelBlur=!0,this._delay(function(){delete this.cancelBlur,this.element[0]!==t.ui.safeActiveElement(this.document[0])&&this.element.trigger("focus")})},menufocus:function(e,i){var s,n;return this.isNewMenu&&(this.isNewMenu=!1,e.originalEvent&&/^mouse/.test(e.originalEvent.type))?(this.menu.blur(),this.document.one("mousemove",function(){t(e.target).trigger(e.originalEvent)}),void 0):(n=i.item.data("ui-autocomplete-item"),!1!==this._trigger("focus",e,{item:n})&&e.originalEvent&&/^key/.test(e.originalEvent.type)&&this._value(n.value),s=i.item.attr("aria-label")||n.value,s&&t.trim(s).length&&(this.liveRegion.children().hide(),t("
    ").text(s).appendTo(this.liveRegion)),void 0)},menuselect:function(e,i){var s=i.item.data("ui-autocomplete-item"),n=this.previous;this.element[0]!==t.ui.safeActiveElement(this.document[0])&&(this.element.trigger("focus"),this.previous=n,this._delay(function(){this.previous=n,this.selectedItem=s})),!1!==this._trigger("select",e,{item:s})&&this._value(s.value),this.term=this._value(),this.close(e),this.selectedItem=s}}),this.liveRegion=t("
    ",{role:"status","aria-live":"assertive","aria-relevant":"additions"}).appendTo(this.document[0].body),this._addClass(this.liveRegion,null,"ui-helper-hidden-accessible"),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_destroy:function(){clearTimeout(this.searching),this.element.removeAttr("autocomplete"),this.menu.element.remove(),this.liveRegion.remove()},_setOption:function(t,e){this._super(t,e),"source"===t&&this._initSource(),"appendTo"===t&&this.menu.element.appendTo(this._appendTo()),"disabled"===t&&e&&this.xhr&&this.xhr.abort()},_isEventTargetInWidget:function(e){var i=this.menu.element[0];return e.target===this.element[0]||e.target===i||t.contains(i,e.target)},_closeOnClickOutside:function(t){this._isEventTargetInWidget(t)||this.close()},_appendTo:function(){var e=this.options.appendTo;return e&&(e=e.jquery||e.nodeType?t(e):this.document.find(e).eq(0)),e&&e[0]||(e=this.element.closest(".ui-front, dialog")),e.length||(e=this.document[0].body),e},_initSource:function(){var e,i,s=this;t.isArray(this.options.source)?(e=this.options.source,this.source=function(i,s){s(t.ui.autocomplete.filter(e,i.term))}):"string"==typeof this.options.source?(i=this.options.source,this.source=function(e,n){s.xhr&&s.xhr.abort(),s.xhr=t.ajax({url:i,data:e,dataType:"json",success:function(t){n(t)},error:function(){n([])}})}):this.source=this.options.source},_searchTimeout:function(t){clearTimeout(this.searching),this.searching=this._delay(function(){var e=this.term===this._value(),i=this.menu.element.is(":visible"),s=t.altKey||t.ctrlKey||t.metaKey||t.shiftKey;(!e||e&&!i&&!s)&&(this.selectedItem=null,this.search(null,t))},this.options.delay)},search:function(t,e){return t=null!=t?t:this._value(),this.term=this._value(),t.length").append(t("
    ").text(i.label)).appendTo(e)},_move:function(t,e){return this.menu.element.is(":visible")?this.menu.isFirstItem()&&/^previous/.test(t)||this.menu.isLastItem()&&/^next/.test(t)?(this.isMultiLine||this._value(this.term),this.menu.blur(),void 0):(this.menu[t](e),void 0):(this.search(null,e),void 0)},widget:function(){return this.menu.element},_value:function(){return this.valueMethod.apply(this.element,arguments)},_keyEvent:function(t,e){(!this.isMultiLine||this.menu.element.is(":visible"))&&(this._move(t,e),e.preventDefault())},_isContentEditable:function(t){if(!t.length)return!1;var e=t.prop("contentEditable");return"inherit"===e?this._isContentEditable(t.parent()):"true"===e}}),t.extend(t.ui.autocomplete,{escapeRegex:function(t){return t.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g,"\\$&")},filter:function(e,i){var s=RegExp(t.ui.autocomplete.escapeRegex(i),"i");return t.grep(e,function(t){return s.test(t.label||t.value||t)})}}),t.widget("ui.autocomplete",t.ui.autocomplete,{options:{messages:{noResults:"No search results.",results:function(t){return t+(t>1?" results are":" result is")+" available, use up and down arrow keys to navigate."}}},__response:function(e){var i;this._superApply(arguments),this.options.disabled||this.cancelSearch||(i=e&&e.length?this.options.messages.results(e.length):this.options.messages.noResults,this.liveRegion.children().hide(),t("
    ").text(i).appendTo(this.liveRegion))}}),t.ui.autocomplete;var g=/ui-corner-([a-z]){2,6}/g;t.widget("ui.controlgroup",{version:"1.12.1",defaultElement:"
    ",options:{direction:"horizontal",disabled:null,onlyVisible:!0,items:{button:"input[type=button], input[type=submit], input[type=reset], button, a",controlgroupLabel:".ui-controlgroup-label",checkboxradio:"input[type='checkbox'], input[type='radio']",selectmenu:"select",spinner:".ui-spinner-input"}},_create:function(){this._enhance()},_enhance:function(){this.element.attr("role","toolbar"),this.refresh()},_destroy:function(){this._callChildMethod("destroy"),this.childWidgets.removeData("ui-controlgroup-data"),this.element.removeAttr("role"),this.options.items.controlgroupLabel&&this.element.find(this.options.items.controlgroupLabel).find(".ui-controlgroup-label-contents").contents().unwrap()},_initWidgets:function(){var e=this,i=[];t.each(this.options.items,function(s,n){var o,a={};return n?"controlgroupLabel"===s?(o=e.element.find(n),o.each(function(){var e=t(this);e.children(".ui-controlgroup-label-contents").length||e.contents().wrapAll("")}),e._addClass(o,null,"ui-widget ui-widget-content ui-state-default"),i=i.concat(o.get()),void 0):(t.fn[s]&&(a=e["_"+s+"Options"]?e["_"+s+"Options"]("middle"):{classes:{}},e.element.find(n).each(function(){var n=t(this),o=n[s]("instance"),r=t.widget.extend({},a);if("button"!==s||!n.parent(".ui-spinner").length){o||(o=n[s]()[s]("instance")),o&&(r.classes=e._resolveClassesValues(r.classes,o)),n[s](r);var h=n[s]("widget");t.data(h[0],"ui-controlgroup-data",o?o:n[s]("instance")),i.push(h[0])}})),void 0):void 0}),this.childWidgets=t(t.unique(i)),this._addClass(this.childWidgets,"ui-controlgroup-item")},_callChildMethod:function(e){this.childWidgets.each(function(){var i=t(this),s=i.data("ui-controlgroup-data");s&&s[e]&&s[e]()})},_updateCornerClass:function(t,e){var i="ui-corner-top ui-corner-bottom ui-corner-left ui-corner-right ui-corner-all",s=this._buildSimpleOptions(e,"label").classes.label;this._removeClass(t,null,i),this._addClass(t,null,s)},_buildSimpleOptions:function(t,e){var i="vertical"===this.options.direction,s={classes:{}};return s.classes[e]={middle:"",first:"ui-corner-"+(i?"top":"left"),last:"ui-corner-"+(i?"bottom":"right"),only:"ui-corner-all"}[t],s},_spinnerOptions:function(t){var e=this._buildSimpleOptions(t,"ui-spinner");return e.classes["ui-spinner-up"]="",e.classes["ui-spinner-down"]="",e},_buttonOptions:function(t){return this._buildSimpleOptions(t,"ui-button")},_checkboxradioOptions:function(t){return this._buildSimpleOptions(t,"ui-checkboxradio-label")},_selectmenuOptions:function(t){var e="vertical"===this.options.direction;return{width:e?"auto":!1,classes:{middle:{"ui-selectmenu-button-open":"","ui-selectmenu-button-closed":""},first:{"ui-selectmenu-button-open":"ui-corner-"+(e?"top":"tl"),"ui-selectmenu-button-closed":"ui-corner-"+(e?"top":"left")},last:{"ui-selectmenu-button-open":e?"":"ui-corner-tr","ui-selectmenu-button-closed":"ui-corner-"+(e?"bottom":"right")},only:{"ui-selectmenu-button-open":"ui-corner-top","ui-selectmenu-button-closed":"ui-corner-all"}}[t]}},_resolveClassesValues:function(e,i){var s={};return t.each(e,function(n){var o=i.options.classes[n]||"";o=t.trim(o.replace(g,"")),s[n]=(o+" "+e[n]).replace(/\s+/g," ")}),s},_setOption:function(t,e){return"direction"===t&&this._removeClass("ui-controlgroup-"+this.options.direction),this._super(t,e),"disabled"===t?(this._callChildMethod(e?"disable":"enable"),void 0):(this.refresh(),void 0)},refresh:function(){var e,i=this;this._addClass("ui-controlgroup ui-controlgroup-"+this.options.direction),"horizontal"===this.options.direction&&this._addClass(null,"ui-helper-clearfix"),this._initWidgets(),e=this.childWidgets,this.options.onlyVisible&&(e=e.filter(":visible")),e.length&&(t.each(["first","last"],function(t,s){var n=e[s]().data("ui-controlgroup-data");if(n&&i["_"+n.widgetName+"Options"]){var o=i["_"+n.widgetName+"Options"](1===e.length?"only":s);o.classes=i._resolveClassesValues(o.classes,n),n.element[n.widgetName](o)}else i._updateCornerClass(e[s](),s)}),this._callChildMethod("refresh"))}}),t.widget("ui.checkboxradio",[t.ui.formResetMixin,{version:"1.12.1",options:{disabled:null,label:null,icon:!0,classes:{"ui-checkboxradio-label":"ui-corner-all","ui-checkboxradio-icon":"ui-corner-all"}},_getCreateOptions:function(){var e,i,s=this,n=this._super()||{};return this._readType(),i=this.element.labels(),this.label=t(i[i.length-1]),this.label.length||t.error("No label found for checkboxradio widget"),this.originalLabel="",this.label.contents().not(this.element[0]).each(function(){s.originalLabel+=3===this.nodeType?t(this).text():this.outerHTML}),this.originalLabel&&(n.label=this.originalLabel),e=this.element[0].disabled,null!=e&&(n.disabled=e),n},_create:function(){var t=this.element[0].checked;this._bindFormResetHandler(),null==this.options.disabled&&(this.options.disabled=this.element[0].disabled),this._setOption("disabled",this.options.disabled),this._addClass("ui-checkboxradio","ui-helper-hidden-accessible"),this._addClass(this.label,"ui-checkboxradio-label","ui-button ui-widget"),"radio"===this.type&&this._addClass(this.label,"ui-checkboxradio-radio-label"),this.options.label&&this.options.label!==this.originalLabel?this._updateLabel():this.originalLabel&&(this.options.label=this.originalLabel),this._enhance(),t&&(this._addClass(this.label,"ui-checkboxradio-checked","ui-state-active"),this.icon&&this._addClass(this.icon,null,"ui-state-hover")),this._on({change:"_toggleClasses",focus:function(){this._addClass(this.label,null,"ui-state-focus ui-visual-focus")},blur:function(){this._removeClass(this.label,null,"ui-state-focus ui-visual-focus")}})},_readType:function(){var e=this.element[0].nodeName.toLowerCase();this.type=this.element[0].type,"input"===e&&/radio|checkbox/.test(this.type)||t.error("Can't create checkboxradio on element.nodeName="+e+" and element.type="+this.type)},_enhance:function(){this._updateIcon(this.element[0].checked)},widget:function(){return this.label},_getRadioGroup:function(){var e,i=this.element[0].name,s="input[name='"+t.ui.escapeSelector(i)+"']";return i?(e=this.form.length?t(this.form[0].elements).filter(s):t(s).filter(function(){return 0===t(this).form().length}),e.not(this.element)):t([])},_toggleClasses:function(){var e=this.element[0].checked;this._toggleClass(this.label,"ui-checkboxradio-checked","ui-state-active",e),this.options.icon&&"checkbox"===this.type&&this._toggleClass(this.icon,null,"ui-icon-check ui-state-checked",e)._toggleClass(this.icon,null,"ui-icon-blank",!e),"radio"===this.type&&this._getRadioGroup().each(function(){var e=t(this).checkboxradio("instance");e&&e._removeClass(e.label,"ui-checkboxradio-checked","ui-state-active")})},_destroy:function(){this._unbindFormResetHandler(),this.icon&&(this.icon.remove(),this.iconSpace.remove())},_setOption:function(t,e){return"label"!==t||e?(this._super(t,e),"disabled"===t?(this._toggleClass(this.label,null,"ui-state-disabled",e),this.element[0].disabled=e,void 0):(this.refresh(),void 0)):void 0},_updateIcon:function(e){var i="ui-icon ui-icon-background ";this.options.icon?(this.icon||(this.icon=t(""),this.iconSpace=t(" "),this._addClass(this.iconSpace,"ui-checkboxradio-icon-space")),"checkbox"===this.type?(i+=e?"ui-icon-check ui-state-checked":"ui-icon-blank",this._removeClass(this.icon,null,e?"ui-icon-blank":"ui-icon-check")):i+="ui-icon-blank",this._addClass(this.icon,"ui-checkboxradio-icon",i),e||this._removeClass(this.icon,null,"ui-icon-check ui-state-checked"),this.icon.prependTo(this.label).after(this.iconSpace)):void 0!==this.icon&&(this.icon.remove(),this.iconSpace.remove(),delete this.icon)},_updateLabel:function(){var t=this.label.contents().not(this.element[0]);this.icon&&(t=t.not(this.icon[0])),this.iconSpace&&(t=t.not(this.iconSpace[0])),t.remove(),this.label.append(this.options.label)},refresh:function(){var t=this.element[0].checked,e=this.element[0].disabled;this._updateIcon(t),this._toggleClass(this.label,"ui-checkboxradio-checked","ui-state-active",t),null!==this.options.label&&this._updateLabel(),e!==this.options.disabled&&this._setOptions({disabled:e})}}]),t.ui.checkboxradio,t.widget("ui.button",{version:"1.12.1",defaultElement:"").addClass(this._triggerClass).html(o?t("").attr({src:o,alt:n,title:n}):n)),e[r?"before":"after"](i.trigger),i.trigger.on("click",function(){return t.datepicker._datepickerShowing&&t.datepicker._lastInput===e[0]?t.datepicker._hideDatepicker():t.datepicker._datepickerShowing&&t.datepicker._lastInput!==e[0]?(t.datepicker._hideDatepicker(),t.datepicker._showDatepicker(e[0])):t.datepicker._showDatepicker(e[0]),!1}))},_autoSize:function(t){if(this._get(t,"autoSize")&&!t.inline){var e,i,s,n,o=new Date(2009,11,20),a=this._get(t,"dateFormat");a.match(/[DM]/)&&(e=function(t){for(i=0,s=0,n=0;t.length>n;n++)t[n].length>i&&(i=t[n].length,s=n);return s},o.setMonth(e(this._get(t,a.match(/MM/)?"monthNames":"monthNamesShort"))),o.setDate(e(this._get(t,a.match(/DD/)?"dayNames":"dayNamesShort"))+20-o.getDay())),t.input.attr("size",this._formatDate(t,o).length)}},_inlineDatepicker:function(e,i){var s=t(e);s.hasClass(this.markerClassName)||(s.addClass(this.markerClassName).append(i.dpDiv),t.data(e,"datepicker",i),this._setDate(i,this._getDefaultDate(i),!0),this._updateDatepicker(i),this._updateAlternate(i),i.settings.disabled&&this._disableDatepicker(e),i.dpDiv.css("display","block"))},_dialogDatepicker:function(e,i,s,n,o){var r,h,l,c,u,d=this._dialogInst;return d||(this.uuid+=1,r="dp"+this.uuid,this._dialogInput=t(""),this._dialogInput.on("keydown",this._doKeyDown),t("body").append(this._dialogInput),d=this._dialogInst=this._newInst(this._dialogInput,!1),d.settings={},t.data(this._dialogInput[0],"datepicker",d)),a(d.settings,n||{}),i=i&&i.constructor===Date?this._formatDate(d,i):i,this._dialogInput.val(i),this._pos=o?o.length?o:[o.pageX,o.pageY]:null,this._pos||(h=document.documentElement.clientWidth,l=document.documentElement.clientHeight,c=document.documentElement.scrollLeft||document.body.scrollLeft,u=document.documentElement.scrollTop||document.body.scrollTop,this._pos=[h/2-100+c,l/2-150+u]),this._dialogInput.css("left",this._pos[0]+20+"px").css("top",this._pos[1]+"px"),d.settings.onSelect=s,this._inDialog=!0,this.dpDiv.addClass(this._dialogClass),this._showDatepicker(this._dialogInput[0]),t.blockUI&&t.blockUI(this.dpDiv),t.data(this._dialogInput[0],"datepicker",d),this},_destroyDatepicker:function(e){var i,s=t(e),n=t.data(e,"datepicker");s.hasClass(this.markerClassName)&&(i=e.nodeName.toLowerCase(),t.removeData(e,"datepicker"),"input"===i?(n.append.remove(),n.trigger.remove(),s.removeClass(this.markerClassName).off("focus",this._showDatepicker).off("keydown",this._doKeyDown).off("keypress",this._doKeyPress).off("keyup",this._doKeyUp)):("div"===i||"span"===i)&&s.removeClass(this.markerClassName).empty(),m===n&&(m=null))},_enableDatepicker:function(e){var i,s,n=t(e),o=t.data(e,"datepicker");n.hasClass(this.markerClassName)&&(i=e.nodeName.toLowerCase(),"input"===i?(e.disabled=!1,o.trigger.filter("button").each(function(){this.disabled=!1}).end().filter("img").css({opacity:"1.0",cursor:""})):("div"===i||"span"===i)&&(s=n.children("."+this._inlineClass),s.children().removeClass("ui-state-disabled"),s.find("select.ui-datepicker-month, select.ui-datepicker-year").prop("disabled",!1)),this._disabledInputs=t.map(this._disabledInputs,function(t){return t===e?null:t}))},_disableDatepicker:function(e){var i,s,n=t(e),o=t.data(e,"datepicker");n.hasClass(this.markerClassName)&&(i=e.nodeName.toLowerCase(),"input"===i?(e.disabled=!0,o.trigger.filter("button").each(function(){this.disabled=!0}).end().filter("img").css({opacity:"0.5",cursor:"default"})):("div"===i||"span"===i)&&(s=n.children("."+this._inlineClass),s.children().addClass("ui-state-disabled"),s.find("select.ui-datepicker-month, select.ui-datepicker-year").prop("disabled",!0)),this._disabledInputs=t.map(this._disabledInputs,function(t){return t===e?null:t}),this._disabledInputs[this._disabledInputs.length]=e)},_isDisabledDatepicker:function(t){if(!t)return!1;for(var e=0;this._disabledInputs.length>e;e++)if(this._disabledInputs[e]===t)return!0;return!1},_getInst:function(e){try{return t.data(e,"datepicker")}catch(i){throw"Missing instance data for this datepicker"}},_optionDatepicker:function(e,i,s){var n,o,r,h,l=this._getInst(e);return 2===arguments.length&&"string"==typeof i?"defaults"===i?t.extend({},t.datepicker._defaults):l?"all"===i?t.extend({},l.settings):this._get(l,i):null:(n=i||{},"string"==typeof i&&(n={},n[i]=s),l&&(this._curInst===l&&this._hideDatepicker(),o=this._getDateDatepicker(e,!0),r=this._getMinMaxDate(l,"min"),h=this._getMinMaxDate(l,"max"),a(l.settings,n),null!==r&&void 0!==n.dateFormat&&void 0===n.minDate&&(l.settings.minDate=this._formatDate(l,r)),null!==h&&void 0!==n.dateFormat&&void 0===n.maxDate&&(l.settings.maxDate=this._formatDate(l,h)),"disabled"in n&&(n.disabled?this._disableDatepicker(e):this._enableDatepicker(e)),this._attachments(t(e),l),this._autoSize(l),this._setDate(l,o),this._updateAlternate(l),this._updateDatepicker(l)),void 0)},_changeDatepicker:function(t,e,i){this._optionDatepicker(t,e,i)},_refreshDatepicker:function(t){var e=this._getInst(t);e&&this._updateDatepicker(e)},_setDateDatepicker:function(t,e){var i=this._getInst(t);i&&(this._setDate(i,e),this._updateDatepicker(i),this._updateAlternate(i))},_getDateDatepicker:function(t,e){var i=this._getInst(t);return i&&!i.inline&&this._setDateFromField(i,e),i?this._getDate(i):null},_doKeyDown:function(e){var i,s,n,o=t.datepicker._getInst(e.target),a=!0,r=o.dpDiv.is(".ui-datepicker-rtl");if(o._keyEvent=!0,t.datepicker._datepickerShowing)switch(e.keyCode){case 9:t.datepicker._hideDatepicker(),a=!1;break;case 13:return n=t("td."+t.datepicker._dayOverClass+":not(."+t.datepicker._currentClass+")",o.dpDiv),n[0]&&t.datepicker._selectDay(e.target,o.selectedMonth,o.selectedYear,n[0]),i=t.datepicker._get(o,"onSelect"),i?(s=t.datepicker._formatDate(o),i.apply(o.input?o.input[0]:null,[s,o])):t.datepicker._hideDatepicker(),!1;case 27:t.datepicker._hideDatepicker();break;case 33:t.datepicker._adjustDate(e.target,e.ctrlKey?-t.datepicker._get(o,"stepBigMonths"):-t.datepicker._get(o,"stepMonths"),"M");break;case 34:t.datepicker._adjustDate(e.target,e.ctrlKey?+t.datepicker._get(o,"stepBigMonths"):+t.datepicker._get(o,"stepMonths"),"M");break;case 35:(e.ctrlKey||e.metaKey)&&t.datepicker._clearDate(e.target),a=e.ctrlKey||e.metaKey;break;case 36:(e.ctrlKey||e.metaKey)&&t.datepicker._gotoToday(e.target),a=e.ctrlKey||e.metaKey;break;case 37:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,r?1:-1,"D"),a=e.ctrlKey||e.metaKey,e.originalEvent.altKey&&t.datepicker._adjustDate(e.target,e.ctrlKey?-t.datepicker._get(o,"stepBigMonths"):-t.datepicker._get(o,"stepMonths"),"M");break;case 38:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,-7,"D"),a=e.ctrlKey||e.metaKey;break;case 39:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,r?-1:1,"D"),a=e.ctrlKey||e.metaKey,e.originalEvent.altKey&&t.datepicker._adjustDate(e.target,e.ctrlKey?+t.datepicker._get(o,"stepBigMonths"):+t.datepicker._get(o,"stepMonths"),"M");break;case 40:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,7,"D"),a=e.ctrlKey||e.metaKey;break;default:a=!1}else 36===e.keyCode&&e.ctrlKey?t.datepicker._showDatepicker(this):a=!1;a&&(e.preventDefault(),e.stopPropagation())},_doKeyPress:function(e){var i,s,n=t.datepicker._getInst(e.target);return t.datepicker._get(n,"constrainInput")?(i=t.datepicker._possibleChars(t.datepicker._get(n,"dateFormat")),s=String.fromCharCode(null==e.charCode?e.keyCode:e.charCode),e.ctrlKey||e.metaKey||" ">s||!i||i.indexOf(s)>-1):void 0},_doKeyUp:function(e){var i,s=t.datepicker._getInst(e.target);if(s.input.val()!==s.lastVal)try{i=t.datepicker.parseDate(t.datepicker._get(s,"dateFormat"),s.input?s.input.val():null,t.datepicker._getFormatConfig(s)),i&&(t.datepicker._setDateFromField(s),t.datepicker._updateAlternate(s),t.datepicker._updateDatepicker(s))}catch(n){}return!0},_showDatepicker:function(e){if(e=e.target||e,"input"!==e.nodeName.toLowerCase()&&(e=t("input",e.parentNode)[0]),!t.datepicker._isDisabledDatepicker(e)&&t.datepicker._lastInput!==e){var s,n,o,r,h,l,c;s=t.datepicker._getInst(e),t.datepicker._curInst&&t.datepicker._curInst!==s&&(t.datepicker._curInst.dpDiv.stop(!0,!0),s&&t.datepicker._datepickerShowing&&t.datepicker._hideDatepicker(t.datepicker._curInst.input[0])),n=t.datepicker._get(s,"beforeShow"),o=n?n.apply(e,[e,s]):{},o!==!1&&(a(s.settings,o),s.lastVal=null,t.datepicker._lastInput=e,t.datepicker._setDateFromField(s),t.datepicker._inDialog&&(e.value=""),t.datepicker._pos||(t.datepicker._pos=t.datepicker._findPos(e),t.datepicker._pos[1]+=e.offsetHeight),r=!1,t(e).parents().each(function(){return r|="fixed"===t(this).css("position"),!r}),h={left:t.datepicker._pos[0],top:t.datepicker._pos[1]},t.datepicker._pos=null,s.dpDiv.empty(),s.dpDiv.css({position:"absolute",display:"block",top:"-1000px"}),t.datepicker._updateDatepicker(s),h=t.datepicker._checkOffset(s,h,r),s.dpDiv.css({position:t.datepicker._inDialog&&t.blockUI?"static":r?"fixed":"absolute",display:"none",left:h.left+"px",top:h.top+"px"}),s.inline||(l=t.datepicker._get(s,"showAnim"),c=t.datepicker._get(s,"duration"),s.dpDiv.css("z-index",i(t(e))+1),t.datepicker._datepickerShowing=!0,t.effects&&t.effects.effect[l]?s.dpDiv.show(l,t.datepicker._get(s,"showOptions"),c):s.dpDiv[l||"show"](l?c:null),t.datepicker._shouldFocusInput(s)&&s.input.trigger("focus"),t.datepicker._curInst=s)) +}},_updateDatepicker:function(e){this.maxRows=4,m=e,e.dpDiv.empty().append(this._generateHTML(e)),this._attachHandlers(e);var i,s=this._getNumberOfMonths(e),n=s[1],a=17,r=e.dpDiv.find("."+this._dayOverClass+" a");r.length>0&&o.apply(r.get(0)),e.dpDiv.removeClass("ui-datepicker-multi-2 ui-datepicker-multi-3 ui-datepicker-multi-4").width(""),n>1&&e.dpDiv.addClass("ui-datepicker-multi-"+n).css("width",a*n+"em"),e.dpDiv[(1!==s[0]||1!==s[1]?"add":"remove")+"Class"]("ui-datepicker-multi"),e.dpDiv[(this._get(e,"isRTL")?"add":"remove")+"Class"]("ui-datepicker-rtl"),e===t.datepicker._curInst&&t.datepicker._datepickerShowing&&t.datepicker._shouldFocusInput(e)&&e.input.trigger("focus"),e.yearshtml&&(i=e.yearshtml,setTimeout(function(){i===e.yearshtml&&e.yearshtml&&e.dpDiv.find("select.ui-datepicker-year:first").replaceWith(e.yearshtml),i=e.yearshtml=null},0))},_shouldFocusInput:function(t){return t.input&&t.input.is(":visible")&&!t.input.is(":disabled")&&!t.input.is(":focus")},_checkOffset:function(e,i,s){var n=e.dpDiv.outerWidth(),o=e.dpDiv.outerHeight(),a=e.input?e.input.outerWidth():0,r=e.input?e.input.outerHeight():0,h=document.documentElement.clientWidth+(s?0:t(document).scrollLeft()),l=document.documentElement.clientHeight+(s?0:t(document).scrollTop());return i.left-=this._get(e,"isRTL")?n-a:0,i.left-=s&&i.left===e.input.offset().left?t(document).scrollLeft():0,i.top-=s&&i.top===e.input.offset().top+r?t(document).scrollTop():0,i.left-=Math.min(i.left,i.left+n>h&&h>n?Math.abs(i.left+n-h):0),i.top-=Math.min(i.top,i.top+o>l&&l>o?Math.abs(o+r):0),i},_findPos:function(e){for(var i,s=this._getInst(e),n=this._get(s,"isRTL");e&&("hidden"===e.type||1!==e.nodeType||t.expr.filters.hidden(e));)e=e[n?"previousSibling":"nextSibling"];return i=t(e).offset(),[i.left,i.top]},_hideDatepicker:function(e){var i,s,n,o,a=this._curInst;!a||e&&a!==t.data(e,"datepicker")||this._datepickerShowing&&(i=this._get(a,"showAnim"),s=this._get(a,"duration"),n=function(){t.datepicker._tidyDialog(a)},t.effects&&(t.effects.effect[i]||t.effects[i])?a.dpDiv.hide(i,t.datepicker._get(a,"showOptions"),s,n):a.dpDiv["slideDown"===i?"slideUp":"fadeIn"===i?"fadeOut":"hide"](i?s:null,n),i||n(),this._datepickerShowing=!1,o=this._get(a,"onClose"),o&&o.apply(a.input?a.input[0]:null,[a.input?a.input.val():"",a]),this._lastInput=null,this._inDialog&&(this._dialogInput.css({position:"absolute",left:"0",top:"-100px"}),t.blockUI&&(t.unblockUI(),t("body").append(this.dpDiv))),this._inDialog=!1)},_tidyDialog:function(t){t.dpDiv.removeClass(this._dialogClass).off(".ui-datepicker-calendar")},_checkExternalClick:function(e){if(t.datepicker._curInst){var i=t(e.target),s=t.datepicker._getInst(i[0]);(i[0].id!==t.datepicker._mainDivId&&0===i.parents("#"+t.datepicker._mainDivId).length&&!i.hasClass(t.datepicker.markerClassName)&&!i.closest("."+t.datepicker._triggerClass).length&&t.datepicker._datepickerShowing&&(!t.datepicker._inDialog||!t.blockUI)||i.hasClass(t.datepicker.markerClassName)&&t.datepicker._curInst!==s)&&t.datepicker._hideDatepicker()}},_adjustDate:function(e,i,s){var n=t(e),o=this._getInst(n[0]);this._isDisabledDatepicker(n[0])||(this._adjustInstDate(o,i+("M"===s?this._get(o,"showCurrentAtPos"):0),s),this._updateDatepicker(o))},_gotoToday:function(e){var i,s=t(e),n=this._getInst(s[0]);this._get(n,"gotoCurrent")&&n.currentDay?(n.selectedDay=n.currentDay,n.drawMonth=n.selectedMonth=n.currentMonth,n.drawYear=n.selectedYear=n.currentYear):(i=new Date,n.selectedDay=i.getDate(),n.drawMonth=n.selectedMonth=i.getMonth(),n.drawYear=n.selectedYear=i.getFullYear()),this._notifyChange(n),this._adjustDate(s)},_selectMonthYear:function(e,i,s){var n=t(e),o=this._getInst(n[0]);o["selected"+("M"===s?"Month":"Year")]=o["draw"+("M"===s?"Month":"Year")]=parseInt(i.options[i.selectedIndex].value,10),this._notifyChange(o),this._adjustDate(n)},_selectDay:function(e,i,s,n){var o,a=t(e);t(n).hasClass(this._unselectableClass)||this._isDisabledDatepicker(a[0])||(o=this._getInst(a[0]),o.selectedDay=o.currentDay=t("a",n).html(),o.selectedMonth=o.currentMonth=i,o.selectedYear=o.currentYear=s,this._selectDate(e,this._formatDate(o,o.currentDay,o.currentMonth,o.currentYear)))},_clearDate:function(e){var i=t(e);this._selectDate(i,"")},_selectDate:function(e,i){var s,n=t(e),o=this._getInst(n[0]);i=null!=i?i:this._formatDate(o),o.input&&o.input.val(i),this._updateAlternate(o),s=this._get(o,"onSelect"),s?s.apply(o.input?o.input[0]:null,[i,o]):o.input&&o.input.trigger("change"),o.inline?this._updateDatepicker(o):(this._hideDatepicker(),this._lastInput=o.input[0],"object"!=typeof o.input[0]&&o.input.trigger("focus"),this._lastInput=null)},_updateAlternate:function(e){var i,s,n,o=this._get(e,"altField");o&&(i=this._get(e,"altFormat")||this._get(e,"dateFormat"),s=this._getDate(e),n=this.formatDate(i,s,this._getFormatConfig(e)),t(o).val(n))},noWeekends:function(t){var e=t.getDay();return[e>0&&6>e,""]},iso8601Week:function(t){var e,i=new Date(t.getTime());return i.setDate(i.getDate()+4-(i.getDay()||7)),e=i.getTime(),i.setMonth(0),i.setDate(1),Math.floor(Math.round((e-i)/864e5)/7)+1},parseDate:function(e,i,s){if(null==e||null==i)throw"Invalid arguments";if(i="object"==typeof i?""+i:i+"",""===i)return null;var n,o,a,r,h=0,l=(s?s.shortYearCutoff:null)||this._defaults.shortYearCutoff,c="string"!=typeof l?l:(new Date).getFullYear()%100+parseInt(l,10),u=(s?s.dayNamesShort:null)||this._defaults.dayNamesShort,d=(s?s.dayNames:null)||this._defaults.dayNames,p=(s?s.monthNamesShort:null)||this._defaults.monthNamesShort,f=(s?s.monthNames:null)||this._defaults.monthNames,g=-1,m=-1,_=-1,v=-1,b=!1,y=function(t){var i=e.length>n+1&&e.charAt(n+1)===t;return i&&n++,i},w=function(t){var e=y(t),s="@"===t?14:"!"===t?20:"y"===t&&e?4:"o"===t?3:2,n="y"===t?s:1,o=RegExp("^\\d{"+n+","+s+"}"),a=i.substring(h).match(o);if(!a)throw"Missing number at position "+h;return h+=a[0].length,parseInt(a[0],10)},k=function(e,s,n){var o=-1,a=t.map(y(e)?n:s,function(t,e){return[[e,t]]}).sort(function(t,e){return-(t[1].length-e[1].length)});if(t.each(a,function(t,e){var s=e[1];return i.substr(h,s.length).toLowerCase()===s.toLowerCase()?(o=e[0],h+=s.length,!1):void 0}),-1!==o)return o+1;throw"Unknown name at position "+h},x=function(){if(i.charAt(h)!==e.charAt(n))throw"Unexpected literal at position "+h;h++};for(n=0;e.length>n;n++)if(b)"'"!==e.charAt(n)||y("'")?x():b=!1;else switch(e.charAt(n)){case"d":_=w("d");break;case"D":k("D",u,d);break;case"o":v=w("o");break;case"m":m=w("m");break;case"M":m=k("M",p,f);break;case"y":g=w("y");break;case"@":r=new Date(w("@")),g=r.getFullYear(),m=r.getMonth()+1,_=r.getDate();break;case"!":r=new Date((w("!")-this._ticksTo1970)/1e4),g=r.getFullYear(),m=r.getMonth()+1,_=r.getDate();break;case"'":y("'")?x():b=!0;break;default:x()}if(i.length>h&&(a=i.substr(h),!/^\s+/.test(a)))throw"Extra/unparsed characters found in date: "+a;if(-1===g?g=(new Date).getFullYear():100>g&&(g+=(new Date).getFullYear()-(new Date).getFullYear()%100+(c>=g?0:-100)),v>-1)for(m=1,_=v;;){if(o=this._getDaysInMonth(g,m-1),o>=_)break;m++,_-=o}if(r=this._daylightSavingAdjust(new Date(g,m-1,_)),r.getFullYear()!==g||r.getMonth()+1!==m||r.getDate()!==_)throw"Invalid date";return r},ATOM:"yy-mm-dd",COOKIE:"D, dd M yy",ISO_8601:"yy-mm-dd",RFC_822:"D, d M y",RFC_850:"DD, dd-M-y",RFC_1036:"D, d M y",RFC_1123:"D, d M yy",RFC_2822:"D, d M yy",RSS:"D, d M y",TICKS:"!",TIMESTAMP:"@",W3C:"yy-mm-dd",_ticksTo1970:1e7*60*60*24*(718685+Math.floor(492.5)-Math.floor(19.7)+Math.floor(4.925)),formatDate:function(t,e,i){if(!e)return"";var s,n=(i?i.dayNamesShort:null)||this._defaults.dayNamesShort,o=(i?i.dayNames:null)||this._defaults.dayNames,a=(i?i.monthNamesShort:null)||this._defaults.monthNamesShort,r=(i?i.monthNames:null)||this._defaults.monthNames,h=function(e){var i=t.length>s+1&&t.charAt(s+1)===e;return i&&s++,i},l=function(t,e,i){var s=""+e;if(h(t))for(;i>s.length;)s="0"+s;return s},c=function(t,e,i,s){return h(t)?s[e]:i[e]},u="",d=!1;if(e)for(s=0;t.length>s;s++)if(d)"'"!==t.charAt(s)||h("'")?u+=t.charAt(s):d=!1;else switch(t.charAt(s)){case"d":u+=l("d",e.getDate(),2);break;case"D":u+=c("D",e.getDay(),n,o);break;case"o":u+=l("o",Math.round((new Date(e.getFullYear(),e.getMonth(),e.getDate()).getTime()-new Date(e.getFullYear(),0,0).getTime())/864e5),3);break;case"m":u+=l("m",e.getMonth()+1,2);break;case"M":u+=c("M",e.getMonth(),a,r);break;case"y":u+=h("y")?e.getFullYear():(10>e.getFullYear()%100?"0":"")+e.getFullYear()%100;break;case"@":u+=e.getTime();break;case"!":u+=1e4*e.getTime()+this._ticksTo1970;break;case"'":h("'")?u+="'":d=!0;break;default:u+=t.charAt(s)}return u},_possibleChars:function(t){var e,i="",s=!1,n=function(i){var s=t.length>e+1&&t.charAt(e+1)===i;return s&&e++,s};for(e=0;t.length>e;e++)if(s)"'"!==t.charAt(e)||n("'")?i+=t.charAt(e):s=!1;else switch(t.charAt(e)){case"d":case"m":case"y":case"@":i+="0123456789";break;case"D":case"M":return null;case"'":n("'")?i+="'":s=!0;break;default:i+=t.charAt(e)}return i},_get:function(t,e){return void 0!==t.settings[e]?t.settings[e]:this._defaults[e]},_setDateFromField:function(t,e){if(t.input.val()!==t.lastVal){var i=this._get(t,"dateFormat"),s=t.lastVal=t.input?t.input.val():null,n=this._getDefaultDate(t),o=n,a=this._getFormatConfig(t);try{o=this.parseDate(i,s,a)||n}catch(r){s=e?"":s}t.selectedDay=o.getDate(),t.drawMonth=t.selectedMonth=o.getMonth(),t.drawYear=t.selectedYear=o.getFullYear(),t.currentDay=s?o.getDate():0,t.currentMonth=s?o.getMonth():0,t.currentYear=s?o.getFullYear():0,this._adjustInstDate(t)}},_getDefaultDate:function(t){return this._restrictMinMax(t,this._determineDate(t,this._get(t,"defaultDate"),new Date))},_determineDate:function(e,i,s){var n=function(t){var e=new Date;return e.setDate(e.getDate()+t),e},o=function(i){try{return t.datepicker.parseDate(t.datepicker._get(e,"dateFormat"),i,t.datepicker._getFormatConfig(e))}catch(s){}for(var n=(i.toLowerCase().match(/^c/)?t.datepicker._getDate(e):null)||new Date,o=n.getFullYear(),a=n.getMonth(),r=n.getDate(),h=/([+\-]?[0-9]+)\s*(d|D|w|W|m|M|y|Y)?/g,l=h.exec(i);l;){switch(l[2]||"d"){case"d":case"D":r+=parseInt(l[1],10);break;case"w":case"W":r+=7*parseInt(l[1],10);break;case"m":case"M":a+=parseInt(l[1],10),r=Math.min(r,t.datepicker._getDaysInMonth(o,a));break;case"y":case"Y":o+=parseInt(l[1],10),r=Math.min(r,t.datepicker._getDaysInMonth(o,a))}l=h.exec(i)}return new Date(o,a,r)},a=null==i||""===i?s:"string"==typeof i?o(i):"number"==typeof i?isNaN(i)?s:n(i):new Date(i.getTime());return a=a&&"Invalid Date"==""+a?s:a,a&&(a.setHours(0),a.setMinutes(0),a.setSeconds(0),a.setMilliseconds(0)),this._daylightSavingAdjust(a)},_daylightSavingAdjust:function(t){return t?(t.setHours(t.getHours()>12?t.getHours()+2:0),t):null},_setDate:function(t,e,i){var s=!e,n=t.selectedMonth,o=t.selectedYear,a=this._restrictMinMax(t,this._determineDate(t,e,new Date));t.selectedDay=t.currentDay=a.getDate(),t.drawMonth=t.selectedMonth=t.currentMonth=a.getMonth(),t.drawYear=t.selectedYear=t.currentYear=a.getFullYear(),n===t.selectedMonth&&o===t.selectedYear||i||this._notifyChange(t),this._adjustInstDate(t),t.input&&t.input.val(s?"":this._formatDate(t))},_getDate:function(t){var e=!t.currentYear||t.input&&""===t.input.val()?null:this._daylightSavingAdjust(new Date(t.currentYear,t.currentMonth,t.currentDay));return e},_attachHandlers:function(e){var i=this._get(e,"stepMonths"),s="#"+e.id.replace(/\\\\/g,"\\");e.dpDiv.find("[data-handler]").map(function(){var e={prev:function(){t.datepicker._adjustDate(s,-i,"M")},next:function(){t.datepicker._adjustDate(s,+i,"M")},hide:function(){t.datepicker._hideDatepicker()},today:function(){t.datepicker._gotoToday(s)},selectDay:function(){return t.datepicker._selectDay(s,+this.getAttribute("data-month"),+this.getAttribute("data-year"),this),!1},selectMonth:function(){return t.datepicker._selectMonthYear(s,this,"M"),!1},selectYear:function(){return t.datepicker._selectMonthYear(s,this,"Y"),!1}};t(this).on(this.getAttribute("data-event"),e[this.getAttribute("data-handler")])})},_generateHTML:function(t){var e,i,s,n,o,a,r,h,l,c,u,d,p,f,g,m,_,v,b,y,w,k,x,C,D,I,T,P,M,S,H,z,O,A,N,W,E,F,L,R=new Date,B=this._daylightSavingAdjust(new Date(R.getFullYear(),R.getMonth(),R.getDate())),Y=this._get(t,"isRTL"),j=this._get(t,"showButtonPanel"),q=this._get(t,"hideIfNoPrevNext"),K=this._get(t,"navigationAsDateFormat"),U=this._getNumberOfMonths(t),V=this._get(t,"showCurrentAtPos"),$=this._get(t,"stepMonths"),X=1!==U[0]||1!==U[1],G=this._daylightSavingAdjust(t.currentDay?new Date(t.currentYear,t.currentMonth,t.currentDay):new Date(9999,9,9)),Q=this._getMinMaxDate(t,"min"),J=this._getMinMaxDate(t,"max"),Z=t.drawMonth-V,te=t.drawYear;if(0>Z&&(Z+=12,te--),J)for(e=this._daylightSavingAdjust(new Date(J.getFullYear(),J.getMonth()-U[0]*U[1]+1,J.getDate())),e=Q&&Q>e?Q:e;this._daylightSavingAdjust(new Date(te,Z,1))>e;)Z--,0>Z&&(Z=11,te--);for(t.drawMonth=Z,t.drawYear=te,i=this._get(t,"prevText"),i=K?this.formatDate(i,this._daylightSavingAdjust(new Date(te,Z-$,1)),this._getFormatConfig(t)):i,s=this._canAdjustMonth(t,-1,te,Z)?""+i+"":q?"":""+i+"",n=this._get(t,"nextText"),n=K?this.formatDate(n,this._daylightSavingAdjust(new Date(te,Z+$,1)),this._getFormatConfig(t)):n,o=this._canAdjustMonth(t,1,te,Z)?""+n+"":q?"":""+n+"",a=this._get(t,"currentText"),r=this._get(t,"gotoCurrent")&&t.currentDay?G:B,a=K?this.formatDate(a,r,this._getFormatConfig(t)):a,h=t.inline?"":"",l=j?"
    "+(Y?h:"")+(this._isInRange(t,r)?"":"")+(Y?"":h)+"
    ":"",c=parseInt(this._get(t,"firstDay"),10),c=isNaN(c)?0:c,u=this._get(t,"showWeek"),d=this._get(t,"dayNames"),p=this._get(t,"dayNamesMin"),f=this._get(t,"monthNames"),g=this._get(t,"monthNamesShort"),m=this._get(t,"beforeShowDay"),_=this._get(t,"showOtherMonths"),v=this._get(t,"selectOtherMonths"),b=this._getDefaultDate(t),y="",k=0;U[0]>k;k++){for(x="",this.maxRows=4,C=0;U[1]>C;C++){if(D=this._daylightSavingAdjust(new Date(te,Z,t.selectedDay)),I=" ui-corner-all",T="",X){if(T+="
    "}for(T+="
    "+(/all|left/.test(I)&&0===k?Y?o:s:"")+(/all|right/.test(I)&&0===k?Y?s:o:"")+this._generateMonthYearHeader(t,Z,te,Q,J,k>0||C>0,f,g)+"
    "+"",P=u?"":"",w=0;7>w;w++)M=(w+c)%7,P+="";for(T+=P+"",S=this._getDaysInMonth(te,Z),te===t.selectedYear&&Z===t.selectedMonth&&(t.selectedDay=Math.min(t.selectedDay,S)),H=(this._getFirstDayOfMonth(te,Z)-c+7)%7,z=Math.ceil((H+S)/7),O=X?this.maxRows>z?this.maxRows:z:z,this.maxRows=O,A=this._daylightSavingAdjust(new Date(te,Z,1-H)),N=0;O>N;N++){for(T+="",W=u?"":"",w=0;7>w;w++)E=m?m.apply(t.input?t.input[0]:null,[A]):[!0,""],F=A.getMonth()!==Z,L=F&&!v||!E[0]||Q&&Q>A||J&&A>J,W+="",A.setDate(A.getDate()+1),A=this._daylightSavingAdjust(A);T+=W+""}Z++,Z>11&&(Z=0,te++),T+="
    "+this._get(t,"weekHeader")+"=5?" class='ui-datepicker-week-end'":"")+">"+""+p[M]+"
    "+this._get(t,"calculateWeek")(A)+""+(F&&!_?" ":L?""+A.getDate()+"":""+A.getDate()+"")+"
    "+(X?"
    "+(U[0]>0&&C===U[1]-1?"
    ":""):""),x+=T}y+=x}return y+=l,t._keyEvent=!1,y},_generateMonthYearHeader:function(t,e,i,s,n,o,a,r){var h,l,c,u,d,p,f,g,m=this._get(t,"changeMonth"),_=this._get(t,"changeYear"),v=this._get(t,"showMonthAfterYear"),b="
    ",y="";if(o||!m)y+=""+a[e]+"";else{for(h=s&&s.getFullYear()===i,l=n&&n.getFullYear()===i,y+=""}if(v||(b+=y+(!o&&m&&_?"":" ")),!t.yearshtml)if(t.yearshtml="",o||!_)b+=""+i+"";else{for(u=this._get(t,"yearRange").split(":"),d=(new Date).getFullYear(),p=function(t){var e=t.match(/c[+\-].*/)?i+parseInt(t.substring(1),10):t.match(/[+\-].*/)?d+parseInt(t,10):parseInt(t,10);return isNaN(e)?d:e},f=p(u[0]),g=Math.max(f,p(u[1]||"")),f=s?Math.max(f,s.getFullYear()):f,g=n?Math.min(g,n.getFullYear()):g,t.yearshtml+="",b+=t.yearshtml,t.yearshtml=null}return b+=this._get(t,"yearSuffix"),v&&(b+=(!o&&m&&_?"":" ")+y),b+="
    "},_adjustInstDate:function(t,e,i){var s=t.selectedYear+("Y"===i?e:0),n=t.selectedMonth+("M"===i?e:0),o=Math.min(t.selectedDay,this._getDaysInMonth(s,n))+("D"===i?e:0),a=this._restrictMinMax(t,this._daylightSavingAdjust(new Date(s,n,o)));t.selectedDay=a.getDate(),t.drawMonth=t.selectedMonth=a.getMonth(),t.drawYear=t.selectedYear=a.getFullYear(),("M"===i||"Y"===i)&&this._notifyChange(t)},_restrictMinMax:function(t,e){var i=this._getMinMaxDate(t,"min"),s=this._getMinMaxDate(t,"max"),n=i&&i>e?i:e;return s&&n>s?s:n},_notifyChange:function(t){var e=this._get(t,"onChangeMonthYear");e&&e.apply(t.input?t.input[0]:null,[t.selectedYear,t.selectedMonth+1,t])},_getNumberOfMonths:function(t){var e=this._get(t,"numberOfMonths");return null==e?[1,1]:"number"==typeof e?[1,e]:e},_getMinMaxDate:function(t,e){return this._determineDate(t,this._get(t,e+"Date"),null)},_getDaysInMonth:function(t,e){return 32-this._daylightSavingAdjust(new Date(t,e,32)).getDate()},_getFirstDayOfMonth:function(t,e){return new Date(t,e,1).getDay()},_canAdjustMonth:function(t,e,i,s){var n=this._getNumberOfMonths(t),o=this._daylightSavingAdjust(new Date(i,s+(0>e?e:n[0]*n[1]),1));return 0>e&&o.setDate(this._getDaysInMonth(o.getFullYear(),o.getMonth())),this._isInRange(t,o)},_isInRange:function(t,e){var i,s,n=this._getMinMaxDate(t,"min"),o=this._getMinMaxDate(t,"max"),a=null,r=null,h=this._get(t,"yearRange");return h&&(i=h.split(":"),s=(new Date).getFullYear(),a=parseInt(i[0],10),r=parseInt(i[1],10),i[0].match(/[+\-].*/)&&(a+=s),i[1].match(/[+\-].*/)&&(r+=s)),(!n||e.getTime()>=n.getTime())&&(!o||e.getTime()<=o.getTime())&&(!a||e.getFullYear()>=a)&&(!r||r>=e.getFullYear())},_getFormatConfig:function(t){var e=this._get(t,"shortYearCutoff");return e="string"!=typeof e?e:(new Date).getFullYear()%100+parseInt(e,10),{shortYearCutoff:e,dayNamesShort:this._get(t,"dayNamesShort"),dayNames:this._get(t,"dayNames"),monthNamesShort:this._get(t,"monthNamesShort"),monthNames:this._get(t,"monthNames")}},_formatDate:function(t,e,i,s){e||(t.currentDay=t.selectedDay,t.currentMonth=t.selectedMonth,t.currentYear=t.selectedYear);var n=e?"object"==typeof e?e:this._daylightSavingAdjust(new Date(s,i,e)):this._daylightSavingAdjust(new Date(t.currentYear,t.currentMonth,t.currentDay));return this.formatDate(this._get(t,"dateFormat"),n,this._getFormatConfig(t))}}),t.fn.datepicker=function(e){if(!this.length)return this;t.datepicker.initialized||(t(document).on("mousedown",t.datepicker._checkExternalClick),t.datepicker.initialized=!0),0===t("#"+t.datepicker._mainDivId).length&&t("body").append(t.datepicker.dpDiv);var i=Array.prototype.slice.call(arguments,1);return"string"!=typeof e||"isDisabled"!==e&&"getDate"!==e&&"widget"!==e?"option"===e&&2===arguments.length&&"string"==typeof arguments[1]?t.datepicker["_"+e+"Datepicker"].apply(t.datepicker,[this[0]].concat(i)):this.each(function(){"string"==typeof e?t.datepicker["_"+e+"Datepicker"].apply(t.datepicker,[this].concat(i)):t.datepicker._attachDatepicker(this,e)}):t.datepicker["_"+e+"Datepicker"].apply(t.datepicker,[this[0]].concat(i))},t.datepicker=new s,t.datepicker.initialized=!1,t.datepicker.uuid=(new Date).getTime(),t.datepicker.version="1.12.1",t.datepicker,t.ui.ie=!!/msie [\w.]+/.exec(navigator.userAgent.toLowerCase());var _=!1;t(document).on("mouseup",function(){_=!1}),t.widget("ui.mouse",{version:"1.12.1",options:{cancel:"input, textarea, button, select, option",distance:1,delay:0},_mouseInit:function(){var e=this;this.element.on("mousedown."+this.widgetName,function(t){return e._mouseDown(t)}).on("click."+this.widgetName,function(i){return!0===t.data(i.target,e.widgetName+".preventClickEvent")?(t.removeData(i.target,e.widgetName+".preventClickEvent"),i.stopImmediatePropagation(),!1):void 0}),this.started=!1},_mouseDestroy:function(){this.element.off("."+this.widgetName),this._mouseMoveDelegate&&this.document.off("mousemove."+this.widgetName,this._mouseMoveDelegate).off("mouseup."+this.widgetName,this._mouseUpDelegate)},_mouseDown:function(e){if(!_){this._mouseMoved=!1,this._mouseStarted&&this._mouseUp(e),this._mouseDownEvent=e;var i=this,s=1===e.which,n="string"==typeof this.options.cancel&&e.target.nodeName?t(e.target).closest(this.options.cancel).length:!1;return s&&!n&&this._mouseCapture(e)?(this.mouseDelayMet=!this.options.delay,this.mouseDelayMet||(this._mouseDelayTimer=setTimeout(function(){i.mouseDelayMet=!0},this.options.delay)),this._mouseDistanceMet(e)&&this._mouseDelayMet(e)&&(this._mouseStarted=this._mouseStart(e)!==!1,!this._mouseStarted)?(e.preventDefault(),!0):(!0===t.data(e.target,this.widgetName+".preventClickEvent")&&t.removeData(e.target,this.widgetName+".preventClickEvent"),this._mouseMoveDelegate=function(t){return i._mouseMove(t)},this._mouseUpDelegate=function(t){return i._mouseUp(t)},this.document.on("mousemove."+this.widgetName,this._mouseMoveDelegate).on("mouseup."+this.widgetName,this._mouseUpDelegate),e.preventDefault(),_=!0,!0)):!0}},_mouseMove:function(e){if(this._mouseMoved){if(t.ui.ie&&(!document.documentMode||9>document.documentMode)&&!e.button)return this._mouseUp(e);if(!e.which)if(e.originalEvent.altKey||e.originalEvent.ctrlKey||e.originalEvent.metaKey||e.originalEvent.shiftKey)this.ignoreMissingWhich=!0;else if(!this.ignoreMissingWhich)return this._mouseUp(e)}return(e.which||e.button)&&(this._mouseMoved=!0),this._mouseStarted?(this._mouseDrag(e),e.preventDefault()):(this._mouseDistanceMet(e)&&this._mouseDelayMet(e)&&(this._mouseStarted=this._mouseStart(this._mouseDownEvent,e)!==!1,this._mouseStarted?this._mouseDrag(e):this._mouseUp(e)),!this._mouseStarted)},_mouseUp:function(e){this.document.off("mousemove."+this.widgetName,this._mouseMoveDelegate).off("mouseup."+this.widgetName,this._mouseUpDelegate),this._mouseStarted&&(this._mouseStarted=!1,e.target===this._mouseDownEvent.target&&t.data(e.target,this.widgetName+".preventClickEvent",!0),this._mouseStop(e)),this._mouseDelayTimer&&(clearTimeout(this._mouseDelayTimer),delete this._mouseDelayTimer),this.ignoreMissingWhich=!1,_=!1,e.preventDefault()},_mouseDistanceMet:function(t){return Math.max(Math.abs(this._mouseDownEvent.pageX-t.pageX),Math.abs(this._mouseDownEvent.pageY-t.pageY))>=this.options.distance},_mouseDelayMet:function(){return this.mouseDelayMet},_mouseStart:function(){},_mouseDrag:function(){},_mouseStop:function(){},_mouseCapture:function(){return!0}}),t.ui.plugin={add:function(e,i,s){var n,o=t.ui[e].prototype;for(n in s)o.plugins[n]=o.plugins[n]||[],o.plugins[n].push([i,s[n]])},call:function(t,e,i,s){var n,o=t.plugins[e];if(o&&(s||t.element[0].parentNode&&11!==t.element[0].parentNode.nodeType))for(n=0;o.length>n;n++)t.options[o[n][0]]&&o[n][1].apply(t.element,i)}},t.ui.safeBlur=function(e){e&&"body"!==e.nodeName.toLowerCase()&&t(e).trigger("blur")},t.widget("ui.draggable",t.ui.mouse,{version:"1.12.1",widgetEventPrefix:"drag",options:{addClasses:!0,appendTo:"parent",axis:!1,connectToSortable:!1,containment:!1,cursor:"auto",cursorAt:!1,grid:!1,handle:!1,helper:"original",iframeFix:!1,opacity:!1,refreshPositions:!1,revert:!1,revertDuration:500,scope:"default",scroll:!0,scrollSensitivity:20,scrollSpeed:20,snap:!1,snapMode:"both",snapTolerance:20,stack:!1,zIndex:!1,drag:null,start:null,stop:null},_create:function(){"original"===this.options.helper&&this._setPositionRelative(),this.options.addClasses&&this._addClass("ui-draggable"),this._setHandleClassName(),this._mouseInit()},_setOption:function(t,e){this._super(t,e),"handle"===t&&(this._removeHandleClassName(),this._setHandleClassName())},_destroy:function(){return(this.helper||this.element).is(".ui-draggable-dragging")?(this.destroyOnClear=!0,void 0):(this._removeHandleClassName(),this._mouseDestroy(),void 0)},_mouseCapture:function(e){var i=this.options;return this.helper||i.disabled||t(e.target).closest(".ui-resizable-handle").length>0?!1:(this.handle=this._getHandle(e),this.handle?(this._blurActiveElement(e),this._blockFrames(i.iframeFix===!0?"iframe":i.iframeFix),!0):!1)},_blockFrames:function(e){this.iframeBlocks=this.document.find(e).map(function(){var e=t(this);return t("
    ").css("position","absolute").appendTo(e.parent()).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()).offset(e.offset())[0]})},_unblockFrames:function(){this.iframeBlocks&&(this.iframeBlocks.remove(),delete this.iframeBlocks)},_blurActiveElement:function(e){var i=t.ui.safeActiveElement(this.document[0]),s=t(e.target);s.closest(i).length||t.ui.safeBlur(i)},_mouseStart:function(e){var i=this.options;return this.helper=this._createHelper(e),this._addClass(this.helper,"ui-draggable-dragging"),this._cacheHelperProportions(),t.ui.ddmanager&&(t.ui.ddmanager.current=this),this._cacheMargins(),this.cssPosition=this.helper.css("position"),this.scrollParent=this.helper.scrollParent(!0),this.offsetParent=this.helper.offsetParent(),this.hasFixedAncestor=this.helper.parents().filter(function(){return"fixed"===t(this).css("position")}).length>0,this.positionAbs=this.element.offset(),this._refreshOffsets(e),this.originalPosition=this.position=this._generatePosition(e,!1),this.originalPageX=e.pageX,this.originalPageY=e.pageY,i.cursorAt&&this._adjustOffsetFromHelper(i.cursorAt),this._setContainment(),this._trigger("start",e)===!1?(this._clear(),!1):(this._cacheHelperProportions(),t.ui.ddmanager&&!i.dropBehaviour&&t.ui.ddmanager.prepareOffsets(this,e),this._mouseDrag(e,!0),t.ui.ddmanager&&t.ui.ddmanager.dragStart(this,e),!0)},_refreshOffsets:function(t){this.offset={top:this.positionAbs.top-this.margins.top,left:this.positionAbs.left-this.margins.left,scroll:!1,parent:this._getParentOffset(),relative:this._getRelativeOffset()},this.offset.click={left:t.pageX-this.offset.left,top:t.pageY-this.offset.top}},_mouseDrag:function(e,i){if(this.hasFixedAncestor&&(this.offset.parent=this._getParentOffset()),this.position=this._generatePosition(e,!0),this.positionAbs=this._convertPositionTo("absolute"),!i){var s=this._uiHash();if(this._trigger("drag",e,s)===!1)return this._mouseUp(new t.Event("mouseup",e)),!1;this.position=s.position}return this.helper[0].style.left=this.position.left+"px",this.helper[0].style.top=this.position.top+"px",t.ui.ddmanager&&t.ui.ddmanager.drag(this,e),!1},_mouseStop:function(e){var i=this,s=!1;return t.ui.ddmanager&&!this.options.dropBehaviour&&(s=t.ui.ddmanager.drop(this,e)),this.dropped&&(s=this.dropped,this.dropped=!1),"invalid"===this.options.revert&&!s||"valid"===this.options.revert&&s||this.options.revert===!0||t.isFunction(this.options.revert)&&this.options.revert.call(this.element,s)?t(this.helper).animate(this.originalPosition,parseInt(this.options.revertDuration,10),function(){i._trigger("stop",e)!==!1&&i._clear()}):this._trigger("stop",e)!==!1&&this._clear(),!1},_mouseUp:function(e){return this._unblockFrames(),t.ui.ddmanager&&t.ui.ddmanager.dragStop(this,e),this.handleElement.is(e.target)&&this.element.trigger("focus"),t.ui.mouse.prototype._mouseUp.call(this,e)},cancel:function(){return this.helper.is(".ui-draggable-dragging")?this._mouseUp(new t.Event("mouseup",{target:this.element[0]})):this._clear(),this},_getHandle:function(e){return this.options.handle?!!t(e.target).closest(this.element.find(this.options.handle)).length:!0},_setHandleClassName:function(){this.handleElement=this.options.handle?this.element.find(this.options.handle):this.element,this._addClass(this.handleElement,"ui-draggable-handle")},_removeHandleClassName:function(){this._removeClass(this.handleElement,"ui-draggable-handle")},_createHelper:function(e){var i=this.options,s=t.isFunction(i.helper),n=s?t(i.helper.apply(this.element[0],[e])):"clone"===i.helper?this.element.clone().removeAttr("id"):this.element;return n.parents("body").length||n.appendTo("parent"===i.appendTo?this.element[0].parentNode:i.appendTo),s&&n[0]===this.element[0]&&this._setPositionRelative(),n[0]===this.element[0]||/(fixed|absolute)/.test(n.css("position"))||n.css("position","absolute"),n},_setPositionRelative:function(){/^(?:r|a|f)/.test(this.element.css("position"))||(this.element[0].style.position="relative")},_adjustOffsetFromHelper:function(e){"string"==typeof e&&(e=e.split(" ")),t.isArray(e)&&(e={left:+e[0],top:+e[1]||0}),"left"in e&&(this.offset.click.left=e.left+this.margins.left),"right"in e&&(this.offset.click.left=this.helperProportions.width-e.right+this.margins.left),"top"in e&&(this.offset.click.top=e.top+this.margins.top),"bottom"in e&&(this.offset.click.top=this.helperProportions.height-e.bottom+this.margins.top)},_isRootNode:function(t){return/(html|body)/i.test(t.tagName)||t===this.document[0]},_getParentOffset:function(){var e=this.offsetParent.offset(),i=this.document[0];return"absolute"===this.cssPosition&&this.scrollParent[0]!==i&&t.contains(this.scrollParent[0],this.offsetParent[0])&&(e.left+=this.scrollParent.scrollLeft(),e.top+=this.scrollParent.scrollTop()),this._isRootNode(this.offsetParent[0])&&(e={top:0,left:0}),{top:e.top+(parseInt(this.offsetParent.css("borderTopWidth"),10)||0),left:e.left+(parseInt(this.offsetParent.css("borderLeftWidth"),10)||0)}},_getRelativeOffset:function(){if("relative"!==this.cssPosition)return{top:0,left:0};var t=this.element.position(),e=this._isRootNode(this.scrollParent[0]);return{top:t.top-(parseInt(this.helper.css("top"),10)||0)+(e?0:this.scrollParent.scrollTop()),left:t.left-(parseInt(this.helper.css("left"),10)||0)+(e?0:this.scrollParent.scrollLeft())} +},_cacheMargins:function(){this.margins={left:parseInt(this.element.css("marginLeft"),10)||0,top:parseInt(this.element.css("marginTop"),10)||0,right:parseInt(this.element.css("marginRight"),10)||0,bottom:parseInt(this.element.css("marginBottom"),10)||0}},_cacheHelperProportions:function(){this.helperProportions={width:this.helper.outerWidth(),height:this.helper.outerHeight()}},_setContainment:function(){var e,i,s,n=this.options,o=this.document[0];return this.relativeContainer=null,n.containment?"window"===n.containment?(this.containment=[t(window).scrollLeft()-this.offset.relative.left-this.offset.parent.left,t(window).scrollTop()-this.offset.relative.top-this.offset.parent.top,t(window).scrollLeft()+t(window).width()-this.helperProportions.width-this.margins.left,t(window).scrollTop()+(t(window).height()||o.body.parentNode.scrollHeight)-this.helperProportions.height-this.margins.top],void 0):"document"===n.containment?(this.containment=[0,0,t(o).width()-this.helperProportions.width-this.margins.left,(t(o).height()||o.body.parentNode.scrollHeight)-this.helperProportions.height-this.margins.top],void 0):n.containment.constructor===Array?(this.containment=n.containment,void 0):("parent"===n.containment&&(n.containment=this.helper[0].parentNode),i=t(n.containment),s=i[0],s&&(e=/(scroll|auto)/.test(i.css("overflow")),this.containment=[(parseInt(i.css("borderLeftWidth"),10)||0)+(parseInt(i.css("paddingLeft"),10)||0),(parseInt(i.css("borderTopWidth"),10)||0)+(parseInt(i.css("paddingTop"),10)||0),(e?Math.max(s.scrollWidth,s.offsetWidth):s.offsetWidth)-(parseInt(i.css("borderRightWidth"),10)||0)-(parseInt(i.css("paddingRight"),10)||0)-this.helperProportions.width-this.margins.left-this.margins.right,(e?Math.max(s.scrollHeight,s.offsetHeight):s.offsetHeight)-(parseInt(i.css("borderBottomWidth"),10)||0)-(parseInt(i.css("paddingBottom"),10)||0)-this.helperProportions.height-this.margins.top-this.margins.bottom],this.relativeContainer=i),void 0):(this.containment=null,void 0)},_convertPositionTo:function(t,e){e||(e=this.position);var i="absolute"===t?1:-1,s=this._isRootNode(this.scrollParent[0]);return{top:e.top+this.offset.relative.top*i+this.offset.parent.top*i-("fixed"===this.cssPosition?-this.offset.scroll.top:s?0:this.offset.scroll.top)*i,left:e.left+this.offset.relative.left*i+this.offset.parent.left*i-("fixed"===this.cssPosition?-this.offset.scroll.left:s?0:this.offset.scroll.left)*i}},_generatePosition:function(t,e){var i,s,n,o,a=this.options,r=this._isRootNode(this.scrollParent[0]),h=t.pageX,l=t.pageY;return r&&this.offset.scroll||(this.offset.scroll={top:this.scrollParent.scrollTop(),left:this.scrollParent.scrollLeft()}),e&&(this.containment&&(this.relativeContainer?(s=this.relativeContainer.offset(),i=[this.containment[0]+s.left,this.containment[1]+s.top,this.containment[2]+s.left,this.containment[3]+s.top]):i=this.containment,t.pageX-this.offset.click.lefti[2]&&(h=i[2]+this.offset.click.left),t.pageY-this.offset.click.top>i[3]&&(l=i[3]+this.offset.click.top)),a.grid&&(n=a.grid[1]?this.originalPageY+Math.round((l-this.originalPageY)/a.grid[1])*a.grid[1]:this.originalPageY,l=i?n-this.offset.click.top>=i[1]||n-this.offset.click.top>i[3]?n:n-this.offset.click.top>=i[1]?n-a.grid[1]:n+a.grid[1]:n,o=a.grid[0]?this.originalPageX+Math.round((h-this.originalPageX)/a.grid[0])*a.grid[0]:this.originalPageX,h=i?o-this.offset.click.left>=i[0]||o-this.offset.click.left>i[2]?o:o-this.offset.click.left>=i[0]?o-a.grid[0]:o+a.grid[0]:o),"y"===a.axis&&(h=this.originalPageX),"x"===a.axis&&(l=this.originalPageY)),{top:l-this.offset.click.top-this.offset.relative.top-this.offset.parent.top+("fixed"===this.cssPosition?-this.offset.scroll.top:r?0:this.offset.scroll.top),left:h-this.offset.click.left-this.offset.relative.left-this.offset.parent.left+("fixed"===this.cssPosition?-this.offset.scroll.left:r?0:this.offset.scroll.left)}},_clear:function(){this._removeClass(this.helper,"ui-draggable-dragging"),this.helper[0]===this.element[0]||this.cancelHelperRemoval||this.helper.remove(),this.helper=null,this.cancelHelperRemoval=!1,this.destroyOnClear&&this.destroy()},_trigger:function(e,i,s){return s=s||this._uiHash(),t.ui.plugin.call(this,e,[i,s,this],!0),/^(drag|start|stop)/.test(e)&&(this.positionAbs=this._convertPositionTo("absolute"),s.offset=this.positionAbs),t.Widget.prototype._trigger.call(this,e,i,s)},plugins:{},_uiHash:function(){return{helper:this.helper,position:this.position,originalPosition:this.originalPosition,offset:this.positionAbs}}}),t.ui.plugin.add("draggable","connectToSortable",{start:function(e,i,s){var n=t.extend({},i,{item:s.element});s.sortables=[],t(s.options.connectToSortable).each(function(){var i=t(this).sortable("instance");i&&!i.options.disabled&&(s.sortables.push(i),i.refreshPositions(),i._trigger("activate",e,n))})},stop:function(e,i,s){var n=t.extend({},i,{item:s.element});s.cancelHelperRemoval=!1,t.each(s.sortables,function(){var t=this;t.isOver?(t.isOver=0,s.cancelHelperRemoval=!0,t.cancelHelperRemoval=!1,t._storedCSS={position:t.placeholder.css("position"),top:t.placeholder.css("top"),left:t.placeholder.css("left")},t._mouseStop(e),t.options.helper=t.options._helper):(t.cancelHelperRemoval=!0,t._trigger("deactivate",e,n))})},drag:function(e,i,s){t.each(s.sortables,function(){var n=!1,o=this;o.positionAbs=s.positionAbs,o.helperProportions=s.helperProportions,o.offset.click=s.offset.click,o._intersectsWith(o.containerCache)&&(n=!0,t.each(s.sortables,function(){return this.positionAbs=s.positionAbs,this.helperProportions=s.helperProportions,this.offset.click=s.offset.click,this!==o&&this._intersectsWith(this.containerCache)&&t.contains(o.element[0],this.element[0])&&(n=!1),n})),n?(o.isOver||(o.isOver=1,s._parent=i.helper.parent(),o.currentItem=i.helper.appendTo(o.element).data("ui-sortable-item",!0),o.options._helper=o.options.helper,o.options.helper=function(){return i.helper[0]},e.target=o.currentItem[0],o._mouseCapture(e,!0),o._mouseStart(e,!0,!0),o.offset.click.top=s.offset.click.top,o.offset.click.left=s.offset.click.left,o.offset.parent.left-=s.offset.parent.left-o.offset.parent.left,o.offset.parent.top-=s.offset.parent.top-o.offset.parent.top,s._trigger("toSortable",e),s.dropped=o.element,t.each(s.sortables,function(){this.refreshPositions()}),s.currentItem=s.element,o.fromOutside=s),o.currentItem&&(o._mouseDrag(e),i.position=o.position)):o.isOver&&(o.isOver=0,o.cancelHelperRemoval=!0,o.options._revert=o.options.revert,o.options.revert=!1,o._trigger("out",e,o._uiHash(o)),o._mouseStop(e,!0),o.options.revert=o.options._revert,o.options.helper=o.options._helper,o.placeholder&&o.placeholder.remove(),i.helper.appendTo(s._parent),s._refreshOffsets(e),i.position=s._generatePosition(e,!0),s._trigger("fromSortable",e),s.dropped=!1,t.each(s.sortables,function(){this.refreshPositions()}))})}}),t.ui.plugin.add("draggable","cursor",{start:function(e,i,s){var n=t("body"),o=s.options;n.css("cursor")&&(o._cursor=n.css("cursor")),n.css("cursor",o.cursor)},stop:function(e,i,s){var n=s.options;n._cursor&&t("body").css("cursor",n._cursor)}}),t.ui.plugin.add("draggable","opacity",{start:function(e,i,s){var n=t(i.helper),o=s.options;n.css("opacity")&&(o._opacity=n.css("opacity")),n.css("opacity",o.opacity)},stop:function(e,i,s){var n=s.options;n._opacity&&t(i.helper).css("opacity",n._opacity)}}),t.ui.plugin.add("draggable","scroll",{start:function(t,e,i){i.scrollParentNotHidden||(i.scrollParentNotHidden=i.helper.scrollParent(!1)),i.scrollParentNotHidden[0]!==i.document[0]&&"HTML"!==i.scrollParentNotHidden[0].tagName&&(i.overflowOffset=i.scrollParentNotHidden.offset())},drag:function(e,i,s){var n=s.options,o=!1,a=s.scrollParentNotHidden[0],r=s.document[0];a!==r&&"HTML"!==a.tagName?(n.axis&&"x"===n.axis||(s.overflowOffset.top+a.offsetHeight-e.pageY=0;d--)h=s.snapElements[d].left-s.margins.left,l=h+s.snapElements[d].width,c=s.snapElements[d].top-s.margins.top,u=c+s.snapElements[d].height,h-g>_||m>l+g||c-g>b||v>u+g||!t.contains(s.snapElements[d].item.ownerDocument,s.snapElements[d].item)?(s.snapElements[d].snapping&&s.options.snap.release&&s.options.snap.release.call(s.element,e,t.extend(s._uiHash(),{snapItem:s.snapElements[d].item})),s.snapElements[d].snapping=!1):("inner"!==f.snapMode&&(n=g>=Math.abs(c-b),o=g>=Math.abs(u-v),a=g>=Math.abs(h-_),r=g>=Math.abs(l-m),n&&(i.position.top=s._convertPositionTo("relative",{top:c-s.helperProportions.height,left:0}).top),o&&(i.position.top=s._convertPositionTo("relative",{top:u,left:0}).top),a&&(i.position.left=s._convertPositionTo("relative",{top:0,left:h-s.helperProportions.width}).left),r&&(i.position.left=s._convertPositionTo("relative",{top:0,left:l}).left)),p=n||o||a||r,"outer"!==f.snapMode&&(n=g>=Math.abs(c-v),o=g>=Math.abs(u-b),a=g>=Math.abs(h-m),r=g>=Math.abs(l-_),n&&(i.position.top=s._convertPositionTo("relative",{top:c,left:0}).top),o&&(i.position.top=s._convertPositionTo("relative",{top:u-s.helperProportions.height,left:0}).top),a&&(i.position.left=s._convertPositionTo("relative",{top:0,left:h}).left),r&&(i.position.left=s._convertPositionTo("relative",{top:0,left:l-s.helperProportions.width}).left)),!s.snapElements[d].snapping&&(n||o||a||r||p)&&s.options.snap.snap&&s.options.snap.snap.call(s.element,e,t.extend(s._uiHash(),{snapItem:s.snapElements[d].item})),s.snapElements[d].snapping=n||o||a||r||p)}}),t.ui.plugin.add("draggable","stack",{start:function(e,i,s){var n,o=s.options,a=t.makeArray(t(o.stack)).sort(function(e,i){return(parseInt(t(e).css("zIndex"),10)||0)-(parseInt(t(i).css("zIndex"),10)||0)});a.length&&(n=parseInt(t(a[0]).css("zIndex"),10)||0,t(a).each(function(e){t(this).css("zIndex",n+e)}),this.css("zIndex",n+a.length))}}),t.ui.plugin.add("draggable","zIndex",{start:function(e,i,s){var n=t(i.helper),o=s.options;n.css("zIndex")&&(o._zIndex=n.css("zIndex")),n.css("zIndex",o.zIndex)},stop:function(e,i,s){var n=s.options;n._zIndex&&t(i.helper).css("zIndex",n._zIndex)}}),t.ui.draggable,t.widget("ui.resizable",t.ui.mouse,{version:"1.12.1",widgetEventPrefix:"resize",options:{alsoResize:!1,animate:!1,animateDuration:"slow",animateEasing:"swing",aspectRatio:!1,autoHide:!1,classes:{"ui-resizable-se":"ui-icon ui-icon-gripsmall-diagonal-se"},containment:!1,ghost:!1,grid:!1,handles:"e,s,se",helper:!1,maxHeight:null,maxWidth:null,minHeight:10,minWidth:10,zIndex:90,resize:null,start:null,stop:null},_num:function(t){return parseFloat(t)||0},_isNumber:function(t){return!isNaN(parseFloat(t))},_hasScroll:function(e,i){if("hidden"===t(e).css("overflow"))return!1;var s=i&&"left"===i?"scrollLeft":"scrollTop",n=!1;return e[s]>0?!0:(e[s]=1,n=e[s]>0,e[s]=0,n)},_create:function(){var e,i=this.options,s=this;this._addClass("ui-resizable"),t.extend(this,{_aspectRatio:!!i.aspectRatio,aspectRatio:i.aspectRatio,originalElement:this.element,_proportionallyResizeElements:[],_helper:i.helper||i.ghost||i.animate?i.helper||"ui-resizable-helper":null}),this.element[0].nodeName.match(/^(canvas|textarea|input|select|button|img)$/i)&&(this.element.wrap(t("
    ").css({position:this.element.css("position"),width:this.element.outerWidth(),height:this.element.outerHeight(),top:this.element.css("top"),left:this.element.css("left")})),this.element=this.element.parent().data("ui-resizable",this.element.resizable("instance")),this.elementIsWrapper=!0,e={marginTop:this.originalElement.css("marginTop"),marginRight:this.originalElement.css("marginRight"),marginBottom:this.originalElement.css("marginBottom"),marginLeft:this.originalElement.css("marginLeft")},this.element.css(e),this.originalElement.css("margin",0),this.originalResizeStyle=this.originalElement.css("resize"),this.originalElement.css("resize","none"),this._proportionallyResizeElements.push(this.originalElement.css({position:"static",zoom:1,display:"block"})),this.originalElement.css(e),this._proportionallyResize()),this._setupHandles(),i.autoHide&&t(this.element).on("mouseenter",function(){i.disabled||(s._removeClass("ui-resizable-autohide"),s._handles.show())}).on("mouseleave",function(){i.disabled||s.resizing||(s._addClass("ui-resizable-autohide"),s._handles.hide())}),this._mouseInit()},_destroy:function(){this._mouseDestroy();var e,i=function(e){t(e).removeData("resizable").removeData("ui-resizable").off(".resizable").find(".ui-resizable-handle").remove()};return this.elementIsWrapper&&(i(this.element),e=this.element,this.originalElement.css({position:e.css("position"),width:e.outerWidth(),height:e.outerHeight(),top:e.css("top"),left:e.css("left")}).insertAfter(e),e.remove()),this.originalElement.css("resize",this.originalResizeStyle),i(this.originalElement),this},_setOption:function(t,e){switch(this._super(t,e),t){case"handles":this._removeHandles(),this._setupHandles();break;default:}},_setupHandles:function(){var e,i,s,n,o,a=this.options,r=this;if(this.handles=a.handles||(t(".ui-resizable-handle",this.element).length?{n:".ui-resizable-n",e:".ui-resizable-e",s:".ui-resizable-s",w:".ui-resizable-w",se:".ui-resizable-se",sw:".ui-resizable-sw",ne:".ui-resizable-ne",nw:".ui-resizable-nw"}:"e,s,se"),this._handles=t(),this.handles.constructor===String)for("all"===this.handles&&(this.handles="n,e,s,w,se,sw,ne,nw"),s=this.handles.split(","),this.handles={},i=0;s.length>i;i++)e=t.trim(s[i]),n="ui-resizable-"+e,o=t("
    "),this._addClass(o,"ui-resizable-handle "+n),o.css({zIndex:a.zIndex}),this.handles[e]=".ui-resizable-"+e,this.element.append(o);this._renderAxis=function(e){var i,s,n,o;e=e||this.element;for(i in this.handles)this.handles[i].constructor===String?this.handles[i]=this.element.children(this.handles[i]).first().show():(this.handles[i].jquery||this.handles[i].nodeType)&&(this.handles[i]=t(this.handles[i]),this._on(this.handles[i],{mousedown:r._mouseDown})),this.elementIsWrapper&&this.originalElement[0].nodeName.match(/^(textarea|input|select|button)$/i)&&(s=t(this.handles[i],this.element),o=/sw|ne|nw|se|n|s/.test(i)?s.outerHeight():s.outerWidth(),n=["padding",/ne|nw|n/.test(i)?"Top":/se|sw|s/.test(i)?"Bottom":/^e$/.test(i)?"Right":"Left"].join(""),e.css(n,o),this._proportionallyResize()),this._handles=this._handles.add(this.handles[i])},this._renderAxis(this.element),this._handles=this._handles.add(this.element.find(".ui-resizable-handle")),this._handles.disableSelection(),this._handles.on("mouseover",function(){r.resizing||(this.className&&(o=this.className.match(/ui-resizable-(se|sw|ne|nw|n|e|s|w)/i)),r.axis=o&&o[1]?o[1]:"se")}),a.autoHide&&(this._handles.hide(),this._addClass("ui-resizable-autohide"))},_removeHandles:function(){this._handles.remove()},_mouseCapture:function(e){var i,s,n=!1;for(i in this.handles)s=t(this.handles[i])[0],(s===e.target||t.contains(s,e.target))&&(n=!0);return!this.options.disabled&&n},_mouseStart:function(e){var i,s,n,o=this.options,a=this.element;return this.resizing=!0,this._renderProxy(),i=this._num(this.helper.css("left")),s=this._num(this.helper.css("top")),o.containment&&(i+=t(o.containment).scrollLeft()||0,s+=t(o.containment).scrollTop()||0),this.offset=this.helper.offset(),this.position={left:i,top:s},this.size=this._helper?{width:this.helper.width(),height:this.helper.height()}:{width:a.width(),height:a.height()},this.originalSize=this._helper?{width:a.outerWidth(),height:a.outerHeight()}:{width:a.width(),height:a.height()},this.sizeDiff={width:a.outerWidth()-a.width(),height:a.outerHeight()-a.height()},this.originalPosition={left:i,top:s},this.originalMousePosition={left:e.pageX,top:e.pageY},this.aspectRatio="number"==typeof o.aspectRatio?o.aspectRatio:this.originalSize.width/this.originalSize.height||1,n=t(".ui-resizable-"+this.axis).css("cursor"),t("body").css("cursor","auto"===n?this.axis+"-resize":n),this._addClass("ui-resizable-resizing"),this._propagate("start",e),!0},_mouseDrag:function(e){var i,s,n=this.originalMousePosition,o=this.axis,a=e.pageX-n.left||0,r=e.pageY-n.top||0,h=this._change[o];return this._updatePrevProperties(),h?(i=h.apply(this,[e,a,r]),this._updateVirtualBoundaries(e.shiftKey),(this._aspectRatio||e.shiftKey)&&(i=this._updateRatio(i,e)),i=this._respectSize(i,e),this._updateCache(i),this._propagate("resize",e),s=this._applyChanges(),!this._helper&&this._proportionallyResizeElements.length&&this._proportionallyResize(),t.isEmptyObject(s)||(this._updatePrevProperties(),this._trigger("resize",e,this.ui()),this._applyChanges()),!1):!1},_mouseStop:function(e){this.resizing=!1;var i,s,n,o,a,r,h,l=this.options,c=this;return this._helper&&(i=this._proportionallyResizeElements,s=i.length&&/textarea/i.test(i[0].nodeName),n=s&&this._hasScroll(i[0],"left")?0:c.sizeDiff.height,o=s?0:c.sizeDiff.width,a={width:c.helper.width()-o,height:c.helper.height()-n},r=parseFloat(c.element.css("left"))+(c.position.left-c.originalPosition.left)||null,h=parseFloat(c.element.css("top"))+(c.position.top-c.originalPosition.top)||null,l.animate||this.element.css(t.extend(a,{top:h,left:r})),c.helper.height(c.size.height),c.helper.width(c.size.width),this._helper&&!l.animate&&this._proportionallyResize()),t("body").css("cursor","auto"),this._removeClass("ui-resizable-resizing"),this._propagate("stop",e),this._helper&&this.helper.remove(),!1},_updatePrevProperties:function(){this.prevPosition={top:this.position.top,left:this.position.left},this.prevSize={width:this.size.width,height:this.size.height}},_applyChanges:function(){var t={};return this.position.top!==this.prevPosition.top&&(t.top=this.position.top+"px"),this.position.left!==this.prevPosition.left&&(t.left=this.position.left+"px"),this.size.width!==this.prevSize.width&&(t.width=this.size.width+"px"),this.size.height!==this.prevSize.height&&(t.height=this.size.height+"px"),this.helper.css(t),t},_updateVirtualBoundaries:function(t){var e,i,s,n,o,a=this.options;o={minWidth:this._isNumber(a.minWidth)?a.minWidth:0,maxWidth:this._isNumber(a.maxWidth)?a.maxWidth:1/0,minHeight:this._isNumber(a.minHeight)?a.minHeight:0,maxHeight:this._isNumber(a.maxHeight)?a.maxHeight:1/0},(this._aspectRatio||t)&&(e=o.minHeight*this.aspectRatio,s=o.minWidth/this.aspectRatio,i=o.maxHeight*this.aspectRatio,n=o.maxWidth/this.aspectRatio,e>o.minWidth&&(o.minWidth=e),s>o.minHeight&&(o.minHeight=s),o.maxWidth>i&&(o.maxWidth=i),o.maxHeight>n&&(o.maxHeight=n)),this._vBoundaries=o},_updateCache:function(t){this.offset=this.helper.offset(),this._isNumber(t.left)&&(this.position.left=t.left),this._isNumber(t.top)&&(this.position.top=t.top),this._isNumber(t.height)&&(this.size.height=t.height),this._isNumber(t.width)&&(this.size.width=t.width)},_updateRatio:function(t){var e=this.position,i=this.size,s=this.axis;return this._isNumber(t.height)?t.width=t.height*this.aspectRatio:this._isNumber(t.width)&&(t.height=t.width/this.aspectRatio),"sw"===s&&(t.left=e.left+(i.width-t.width),t.top=null),"nw"===s&&(t.top=e.top+(i.height-t.height),t.left=e.left+(i.width-t.width)),t},_respectSize:function(t){var e=this._vBoundaries,i=this.axis,s=this._isNumber(t.width)&&e.maxWidth&&e.maxWidtht.width,a=this._isNumber(t.height)&&e.minHeight&&e.minHeight>t.height,r=this.originalPosition.left+this.originalSize.width,h=this.originalPosition.top+this.originalSize.height,l=/sw|nw|w/.test(i),c=/nw|ne|n/.test(i);return o&&(t.width=e.minWidth),a&&(t.height=e.minHeight),s&&(t.width=e.maxWidth),n&&(t.height=e.maxHeight),o&&l&&(t.left=r-e.minWidth),s&&l&&(t.left=r-e.maxWidth),a&&c&&(t.top=h-e.minHeight),n&&c&&(t.top=h-e.maxHeight),t.width||t.height||t.left||!t.top?t.width||t.height||t.top||!t.left||(t.left=null):t.top=null,t},_getPaddingPlusBorderDimensions:function(t){for(var e=0,i=[],s=[t.css("borderTopWidth"),t.css("borderRightWidth"),t.css("borderBottomWidth"),t.css("borderLeftWidth")],n=[t.css("paddingTop"),t.css("paddingRight"),t.css("paddingBottom"),t.css("paddingLeft")];4>e;e++)i[e]=parseFloat(s[e])||0,i[e]+=parseFloat(n[e])||0;return{height:i[0]+i[2],width:i[1]+i[3]}},_proportionallyResize:function(){if(this._proportionallyResizeElements.length)for(var t,e=0,i=this.helper||this.element;this._proportionallyResizeElements.length>e;e++)t=this._proportionallyResizeElements[e],this.outerDimensions||(this.outerDimensions=this._getPaddingPlusBorderDimensions(t)),t.css({height:i.height()-this.outerDimensions.height||0,width:i.width()-this.outerDimensions.width||0})},_renderProxy:function(){var e=this.element,i=this.options;this.elementOffset=e.offset(),this._helper?(this.helper=this.helper||t("
    "),this._addClass(this.helper,this._helper),this.helper.css({width:this.element.outerWidth(),height:this.element.outerHeight(),position:"absolute",left:this.elementOffset.left+"px",top:this.elementOffset.top+"px",zIndex:++i.zIndex}),this.helper.appendTo("body").disableSelection()):this.helper=this.element},_change:{e:function(t,e){return{width:this.originalSize.width+e}},w:function(t,e){var i=this.originalSize,s=this.originalPosition;return{left:s.left+e,width:i.width-e}},n:function(t,e,i){var s=this.originalSize,n=this.originalPosition;return{top:n.top+i,height:s.height-i}},s:function(t,e,i){return{height:this.originalSize.height+i}},se:function(e,i,s){return t.extend(this._change.s.apply(this,arguments),this._change.e.apply(this,[e,i,s]))},sw:function(e,i,s){return t.extend(this._change.s.apply(this,arguments),this._change.w.apply(this,[e,i,s]))},ne:function(e,i,s){return t.extend(this._change.n.apply(this,arguments),this._change.e.apply(this,[e,i,s]))},nw:function(e,i,s){return t.extend(this._change.n.apply(this,arguments),this._change.w.apply(this,[e,i,s]))}},_propagate:function(e,i){t.ui.plugin.call(this,e,[i,this.ui()]),"resize"!==e&&this._trigger(e,i,this.ui())},plugins:{},ui:function(){return{originalElement:this.originalElement,element:this.element,helper:this.helper,position:this.position,size:this.size,originalSize:this.originalSize,originalPosition:this.originalPosition}}}),t.ui.plugin.add("resizable","animate",{stop:function(e){var i=t(this).resizable("instance"),s=i.options,n=i._proportionallyResizeElements,o=n.length&&/textarea/i.test(n[0].nodeName),a=o&&i._hasScroll(n[0],"left")?0:i.sizeDiff.height,r=o?0:i.sizeDiff.width,h={width:i.size.width-r,height:i.size.height-a},l=parseFloat(i.element.css("left"))+(i.position.left-i.originalPosition.left)||null,c=parseFloat(i.element.css("top"))+(i.position.top-i.originalPosition.top)||null;i.element.animate(t.extend(h,c&&l?{top:c,left:l}:{}),{duration:s.animateDuration,easing:s.animateEasing,step:function(){var s={width:parseFloat(i.element.css("width")),height:parseFloat(i.element.css("height")),top:parseFloat(i.element.css("top")),left:parseFloat(i.element.css("left"))};n&&n.length&&t(n[0]).css({width:s.width,height:s.height}),i._updateCache(s),i._propagate("resize",e)}})}}),t.ui.plugin.add("resizable","containment",{start:function(){var e,i,s,n,o,a,r,h=t(this).resizable("instance"),l=h.options,c=h.element,u=l.containment,d=u instanceof t?u.get(0):/parent/.test(u)?c.parent().get(0):u;d&&(h.containerElement=t(d),/document/.test(u)||u===document?(h.containerOffset={left:0,top:0},h.containerPosition={left:0,top:0},h.parentData={element:t(document),left:0,top:0,width:t(document).width(),height:t(document).height()||document.body.parentNode.scrollHeight}):(e=t(d),i=[],t(["Top","Right","Left","Bottom"]).each(function(t,s){i[t]=h._num(e.css("padding"+s))}),h.containerOffset=e.offset(),h.containerPosition=e.position(),h.containerSize={height:e.innerHeight()-i[3],width:e.innerWidth()-i[1]},s=h.containerOffset,n=h.containerSize.height,o=h.containerSize.width,a=h._hasScroll(d,"left")?d.scrollWidth:o,r=h._hasScroll(d)?d.scrollHeight:n,h.parentData={element:d,left:s.left,top:s.top,width:a,height:r}))},resize:function(e){var i,s,n,o,a=t(this).resizable("instance"),r=a.options,h=a.containerOffset,l=a.position,c=a._aspectRatio||e.shiftKey,u={top:0,left:0},d=a.containerElement,p=!0;d[0]!==document&&/static/.test(d.css("position"))&&(u=h),l.left<(a._helper?h.left:0)&&(a.size.width=a.size.width+(a._helper?a.position.left-h.left:a.position.left-u.left),c&&(a.size.height=a.size.width/a.aspectRatio,p=!1),a.position.left=r.helper?h.left:0),l.top<(a._helper?h.top:0)&&(a.size.height=a.size.height+(a._helper?a.position.top-h.top:a.position.top),c&&(a.size.width=a.size.height*a.aspectRatio,p=!1),a.position.top=a._helper?h.top:0),n=a.containerElement.get(0)===a.element.parent().get(0),o=/relative|absolute/.test(a.containerElement.css("position")),n&&o?(a.offset.left=a.parentData.left+a.position.left,a.offset.top=a.parentData.top+a.position.top):(a.offset.left=a.element.offset().left,a.offset.top=a.element.offset().top),i=Math.abs(a.sizeDiff.width+(a._helper?a.offset.left-u.left:a.offset.left-h.left)),s=Math.abs(a.sizeDiff.height+(a._helper?a.offset.top-u.top:a.offset.top-h.top)),i+a.size.width>=a.parentData.width&&(a.size.width=a.parentData.width-i,c&&(a.size.height=a.size.width/a.aspectRatio,p=!1)),s+a.size.height>=a.parentData.height&&(a.size.height=a.parentData.height-s,c&&(a.size.width=a.size.height*a.aspectRatio,p=!1)),p||(a.position.left=a.prevPosition.left,a.position.top=a.prevPosition.top,a.size.width=a.prevSize.width,a.size.height=a.prevSize.height)},stop:function(){var e=t(this).resizable("instance"),i=e.options,s=e.containerOffset,n=e.containerPosition,o=e.containerElement,a=t(e.helper),r=a.offset(),h=a.outerWidth()-e.sizeDiff.width,l=a.outerHeight()-e.sizeDiff.height;e._helper&&!i.animate&&/relative/.test(o.css("position"))&&t(this).css({left:r.left-n.left-s.left,width:h,height:l}),e._helper&&!i.animate&&/static/.test(o.css("position"))&&t(this).css({left:r.left-n.left-s.left,width:h,height:l})}}),t.ui.plugin.add("resizable","alsoResize",{start:function(){var e=t(this).resizable("instance"),i=e.options;t(i.alsoResize).each(function(){var e=t(this);e.data("ui-resizable-alsoresize",{width:parseFloat(e.width()),height:parseFloat(e.height()),left:parseFloat(e.css("left")),top:parseFloat(e.css("top"))})})},resize:function(e,i){var s=t(this).resizable("instance"),n=s.options,o=s.originalSize,a=s.originalPosition,r={height:s.size.height-o.height||0,width:s.size.width-o.width||0,top:s.position.top-a.top||0,left:s.position.left-a.left||0};t(n.alsoResize).each(function(){var e=t(this),s=t(this).data("ui-resizable-alsoresize"),n={},o=e.parents(i.originalElement[0]).length?["width","height"]:["width","height","top","left"];t.each(o,function(t,e){var i=(s[e]||0)+(r[e]||0);i&&i>=0&&(n[e]=i||null)}),e.css(n)})},stop:function(){t(this).removeData("ui-resizable-alsoresize")}}),t.ui.plugin.add("resizable","ghost",{start:function(){var e=t(this).resizable("instance"),i=e.size;e.ghost=e.originalElement.clone(),e.ghost.css({opacity:.25,display:"block",position:"relative",height:i.height,width:i.width,margin:0,left:0,top:0}),e._addClass(e.ghost,"ui-resizable-ghost"),t.uiBackCompat!==!1&&"string"==typeof e.options.ghost&&e.ghost.addClass(this.options.ghost),e.ghost.appendTo(e.helper)},resize:function(){var e=t(this).resizable("instance");e.ghost&&e.ghost.css({position:"relative",height:e.size.height,width:e.size.width})},stop:function(){var e=t(this).resizable("instance");e.ghost&&e.helper&&e.helper.get(0).removeChild(e.ghost.get(0))}}),t.ui.plugin.add("resizable","grid",{resize:function(){var e,i=t(this).resizable("instance"),s=i.options,n=i.size,o=i.originalSize,a=i.originalPosition,r=i.axis,h="number"==typeof s.grid?[s.grid,s.grid]:s.grid,l=h[0]||1,c=h[1]||1,u=Math.round((n.width-o.width)/l)*l,d=Math.round((n.height-o.height)/c)*c,p=o.width+u,f=o.height+d,g=s.maxWidth&&p>s.maxWidth,m=s.maxHeight&&f>s.maxHeight,_=s.minWidth&&s.minWidth>p,v=s.minHeight&&s.minHeight>f;s.grid=h,_&&(p+=l),v&&(f+=c),g&&(p-=l),m&&(f-=c),/^(se|s|e)$/.test(r)?(i.size.width=p,i.size.height=f):/^(ne)$/.test(r)?(i.size.width=p,i.size.height=f,i.position.top=a.top-d):/^(sw)$/.test(r)?(i.size.width=p,i.size.height=f,i.position.left=a.left-u):((0>=f-c||0>=p-l)&&(e=i._getPaddingPlusBorderDimensions(this)),f-c>0?(i.size.height=f,i.position.top=a.top-d):(f=c-e.height,i.size.height=f,i.position.top=a.top+o.height-f),p-l>0?(i.size.width=p,i.position.left=a.left-u):(p=l-e.width,i.size.width=p,i.position.left=a.left+o.width-p))}}),t.ui.resizable,t.widget("ui.dialog",{version:"1.12.1",options:{appendTo:"body",autoOpen:!0,buttons:[],classes:{"ui-dialog":"ui-corner-all","ui-dialog-titlebar":"ui-corner-all"},closeOnEscape:!0,closeText:"Close",draggable:!0,hide:null,height:"auto",maxHeight:null,maxWidth:null,minHeight:150,minWidth:150,modal:!1,position:{my:"center",at:"center",of:window,collision:"fit",using:function(e){var i=t(this).css(e).offset().top;0>i&&t(this).css("top",e.top-i)}},resizable:!0,show:null,title:null,width:300,beforeClose:null,close:null,drag:null,dragStart:null,dragStop:null,focus:null,open:null,resize:null,resizeStart:null,resizeStop:null},sizeRelatedOptions:{buttons:!0,height:!0,maxHeight:!0,maxWidth:!0,minHeight:!0,minWidth:!0,width:!0},resizableRelatedOptions:{maxHeight:!0,maxWidth:!0,minHeight:!0,minWidth:!0},_create:function(){this.originalCss={display:this.element[0].style.display,width:this.element[0].style.width,minHeight:this.element[0].style.minHeight,maxHeight:this.element[0].style.maxHeight,height:this.element[0].style.height},this.originalPosition={parent:this.element.parent(),index:this.element.parent().children().index(this.element)},this.originalTitle=this.element.attr("title"),null==this.options.title&&null!=this.originalTitle&&(this.options.title=this.originalTitle),this.options.disabled&&(this.options.disabled=!1),this._createWrapper(),this.element.show().removeAttr("title").appendTo(this.uiDialog),this._addClass("ui-dialog-content","ui-widget-content"),this._createTitlebar(),this._createButtonPane(),this.options.draggable&&t.fn.draggable&&this._makeDraggable(),this.options.resizable&&t.fn.resizable&&this._makeResizable(),this._isOpen=!1,this._trackFocus()},_init:function(){this.options.autoOpen&&this.open()},_appendTo:function(){var e=this.options.appendTo;return e&&(e.jquery||e.nodeType)?t(e):this.document.find(e||"body").eq(0)},_destroy:function(){var t,e=this.originalPosition;this._untrackInstance(),this._destroyOverlay(),this.element.removeUniqueId().css(this.originalCss).detach(),this.uiDialog.remove(),this.originalTitle&&this.element.attr("title",this.originalTitle),t=e.parent.children().eq(e.index),t.length&&t[0]!==this.element[0]?t.before(this.element):e.parent.append(this.element)},widget:function(){return this.uiDialog +},disable:t.noop,enable:t.noop,close:function(e){var i=this;this._isOpen&&this._trigger("beforeClose",e)!==!1&&(this._isOpen=!1,this._focusedElement=null,this._destroyOverlay(),this._untrackInstance(),this.opener.filter(":focusable").trigger("focus").length||t.ui.safeBlur(t.ui.safeActiveElement(this.document[0])),this._hide(this.uiDialog,this.options.hide,function(){i._trigger("close",e)}))},isOpen:function(){return this._isOpen},moveToTop:function(){this._moveToTop()},_moveToTop:function(e,i){var s=!1,n=this.uiDialog.siblings(".ui-front:visible").map(function(){return+t(this).css("z-index")}).get(),o=Math.max.apply(null,n);return o>=+this.uiDialog.css("z-index")&&(this.uiDialog.css("z-index",o+1),s=!0),s&&!i&&this._trigger("focus",e),s},open:function(){var e=this;return this._isOpen?(this._moveToTop()&&this._focusTabbable(),void 0):(this._isOpen=!0,this.opener=t(t.ui.safeActiveElement(this.document[0])),this._size(),this._position(),this._createOverlay(),this._moveToTop(null,!0),this.overlay&&this.overlay.css("z-index",this.uiDialog.css("z-index")-1),this._show(this.uiDialog,this.options.show,function(){e._focusTabbable(),e._trigger("focus")}),this._makeFocusTarget(),this._trigger("open"),void 0)},_focusTabbable:function(){var t=this._focusedElement;t||(t=this.element.find("[autofocus]")),t.length||(t=this.element.find(":tabbable")),t.length||(t=this.uiDialogButtonPane.find(":tabbable")),t.length||(t=this.uiDialogTitlebarClose.filter(":tabbable")),t.length||(t=this.uiDialog),t.eq(0).trigger("focus")},_keepFocus:function(e){function i(){var e=t.ui.safeActiveElement(this.document[0]),i=this.uiDialog[0]===e||t.contains(this.uiDialog[0],e);i||this._focusTabbable()}e.preventDefault(),i.call(this),this._delay(i)},_createWrapper:function(){this.uiDialog=t("
    ").hide().attr({tabIndex:-1,role:"dialog"}).appendTo(this._appendTo()),this._addClass(this.uiDialog,"ui-dialog","ui-widget ui-widget-content ui-front"),this._on(this.uiDialog,{keydown:function(e){if(this.options.closeOnEscape&&!e.isDefaultPrevented()&&e.keyCode&&e.keyCode===t.ui.keyCode.ESCAPE)return e.preventDefault(),this.close(e),void 0;if(e.keyCode===t.ui.keyCode.TAB&&!e.isDefaultPrevented()){var i=this.uiDialog.find(":tabbable"),s=i.filter(":first"),n=i.filter(":last");e.target!==n[0]&&e.target!==this.uiDialog[0]||e.shiftKey?e.target!==s[0]&&e.target!==this.uiDialog[0]||!e.shiftKey||(this._delay(function(){n.trigger("focus")}),e.preventDefault()):(this._delay(function(){s.trigger("focus")}),e.preventDefault())}},mousedown:function(t){this._moveToTop(t)&&this._focusTabbable()}}),this.element.find("[aria-describedby]").length||this.uiDialog.attr({"aria-describedby":this.element.uniqueId().attr("id")})},_createTitlebar:function(){var e;this.uiDialogTitlebar=t("
    "),this._addClass(this.uiDialogTitlebar,"ui-dialog-titlebar","ui-widget-header ui-helper-clearfix"),this._on(this.uiDialogTitlebar,{mousedown:function(e){t(e.target).closest(".ui-dialog-titlebar-close")||this.uiDialog.trigger("focus")}}),this.uiDialogTitlebarClose=t("").button({label:t("").text(this.options.closeText).html(),icon:"ui-icon-closethick",showLabel:!1}).appendTo(this.uiDialogTitlebar),this._addClass(this.uiDialogTitlebarClose,"ui-dialog-titlebar-close"),this._on(this.uiDialogTitlebarClose,{click:function(t){t.preventDefault(),this.close(t)}}),e=t("").uniqueId().prependTo(this.uiDialogTitlebar),this._addClass(e,"ui-dialog-title"),this._title(e),this.uiDialogTitlebar.prependTo(this.uiDialog),this.uiDialog.attr({"aria-labelledby":e.attr("id")})},_title:function(t){this.options.title?t.text(this.options.title):t.html(" ")},_createButtonPane:function(){this.uiDialogButtonPane=t("
    "),this._addClass(this.uiDialogButtonPane,"ui-dialog-buttonpane","ui-widget-content ui-helper-clearfix"),this.uiButtonSet=t("
    ").appendTo(this.uiDialogButtonPane),this._addClass(this.uiButtonSet,"ui-dialog-buttonset"),this._createButtons()},_createButtons:function(){var e=this,i=this.options.buttons;return this.uiDialogButtonPane.remove(),this.uiButtonSet.empty(),t.isEmptyObject(i)||t.isArray(i)&&!i.length?(this._removeClass(this.uiDialog,"ui-dialog-buttons"),void 0):(t.each(i,function(i,s){var n,o;s=t.isFunction(s)?{click:s,text:i}:s,s=t.extend({type:"button"},s),n=s.click,o={icon:s.icon,iconPosition:s.iconPosition,showLabel:s.showLabel,icons:s.icons,text:s.text},delete s.click,delete s.icon,delete s.iconPosition,delete s.showLabel,delete s.icons,"boolean"==typeof s.text&&delete s.text,t("",s).button(o).appendTo(e.uiButtonSet).on("click",function(){n.apply(e.element[0],arguments)})}),this._addClass(this.uiDialog,"ui-dialog-buttons"),this.uiDialogButtonPane.appendTo(this.uiDialog),void 0)},_makeDraggable:function(){function e(t){return{position:t.position,offset:t.offset}}var i=this,s=this.options;this.uiDialog.draggable({cancel:".ui-dialog-content, .ui-dialog-titlebar-close",handle:".ui-dialog-titlebar",containment:"document",start:function(s,n){i._addClass(t(this),"ui-dialog-dragging"),i._blockFrames(),i._trigger("dragStart",s,e(n))},drag:function(t,s){i._trigger("drag",t,e(s))},stop:function(n,o){var a=o.offset.left-i.document.scrollLeft(),r=o.offset.top-i.document.scrollTop();s.position={my:"left top",at:"left"+(a>=0?"+":"")+a+" "+"top"+(r>=0?"+":"")+r,of:i.window},i._removeClass(t(this),"ui-dialog-dragging"),i._unblockFrames(),i._trigger("dragStop",n,e(o))}})},_makeResizable:function(){function e(t){return{originalPosition:t.originalPosition,originalSize:t.originalSize,position:t.position,size:t.size}}var i=this,s=this.options,n=s.resizable,o=this.uiDialog.css("position"),a="string"==typeof n?n:"n,e,s,w,se,sw,ne,nw";this.uiDialog.resizable({cancel:".ui-dialog-content",containment:"document",alsoResize:this.element,maxWidth:s.maxWidth,maxHeight:s.maxHeight,minWidth:s.minWidth,minHeight:this._minHeight(),handles:a,start:function(s,n){i._addClass(t(this),"ui-dialog-resizing"),i._blockFrames(),i._trigger("resizeStart",s,e(n))},resize:function(t,s){i._trigger("resize",t,e(s))},stop:function(n,o){var a=i.uiDialog.offset(),r=a.left-i.document.scrollLeft(),h=a.top-i.document.scrollTop();s.height=i.uiDialog.height(),s.width=i.uiDialog.width(),s.position={my:"left top",at:"left"+(r>=0?"+":"")+r+" "+"top"+(h>=0?"+":"")+h,of:i.window},i._removeClass(t(this),"ui-dialog-resizing"),i._unblockFrames(),i._trigger("resizeStop",n,e(o))}}).css("position",o)},_trackFocus:function(){this._on(this.widget(),{focusin:function(e){this._makeFocusTarget(),this._focusedElement=t(e.target)}})},_makeFocusTarget:function(){this._untrackInstance(),this._trackingInstances().unshift(this)},_untrackInstance:function(){var e=this._trackingInstances(),i=t.inArray(this,e);-1!==i&&e.splice(i,1)},_trackingInstances:function(){var t=this.document.data("ui-dialog-instances");return t||(t=[],this.document.data("ui-dialog-instances",t)),t},_minHeight:function(){var t=this.options;return"auto"===t.height?t.minHeight:Math.min(t.minHeight,t.height)},_position:function(){var t=this.uiDialog.is(":visible");t||this.uiDialog.show(),this.uiDialog.position(this.options.position),t||this.uiDialog.hide()},_setOptions:function(e){var i=this,s=!1,n={};t.each(e,function(t,e){i._setOption(t,e),t in i.sizeRelatedOptions&&(s=!0),t in i.resizableRelatedOptions&&(n[t]=e)}),s&&(this._size(),this._position()),this.uiDialog.is(":data(ui-resizable)")&&this.uiDialog.resizable("option",n)},_setOption:function(e,i){var s,n,o=this.uiDialog;"disabled"!==e&&(this._super(e,i),"appendTo"===e&&this.uiDialog.appendTo(this._appendTo()),"buttons"===e&&this._createButtons(),"closeText"===e&&this.uiDialogTitlebarClose.button({label:t("").text(""+this.options.closeText).html()}),"draggable"===e&&(s=o.is(":data(ui-draggable)"),s&&!i&&o.draggable("destroy"),!s&&i&&this._makeDraggable()),"position"===e&&this._position(),"resizable"===e&&(n=o.is(":data(ui-resizable)"),n&&!i&&o.resizable("destroy"),n&&"string"==typeof i&&o.resizable("option","handles",i),n||i===!1||this._makeResizable()),"title"===e&&this._title(this.uiDialogTitlebar.find(".ui-dialog-title")))},_size:function(){var t,e,i,s=this.options;this.element.show().css({width:"auto",minHeight:0,maxHeight:"none",height:0}),s.minWidth>s.width&&(s.width=s.minWidth),t=this.uiDialog.css({height:"auto",width:s.width}).outerHeight(),e=Math.max(0,s.minHeight-t),i="number"==typeof s.maxHeight?Math.max(0,s.maxHeight-t):"none","auto"===s.height?this.element.css({minHeight:e,maxHeight:i,height:"auto"}):this.element.height(Math.max(0,s.height-t)),this.uiDialog.is(":data(ui-resizable)")&&this.uiDialog.resizable("option","minHeight",this._minHeight())},_blockFrames:function(){this.iframeBlocks=this.document.find("iframe").map(function(){var e=t(this);return t("
    ").css({position:"absolute",width:e.outerWidth(),height:e.outerHeight()}).appendTo(e.parent()).offset(e.offset())[0]})},_unblockFrames:function(){this.iframeBlocks&&(this.iframeBlocks.remove(),delete this.iframeBlocks)},_allowInteraction:function(e){return t(e.target).closest(".ui-dialog").length?!0:!!t(e.target).closest(".ui-datepicker").length},_createOverlay:function(){if(this.options.modal){var e=!0;this._delay(function(){e=!1}),this.document.data("ui-dialog-overlays")||this._on(this.document,{focusin:function(t){e||this._allowInteraction(t)||(t.preventDefault(),this._trackingInstances()[0]._focusTabbable())}}),this.overlay=t("
    ").appendTo(this._appendTo()),this._addClass(this.overlay,null,"ui-widget-overlay ui-front"),this._on(this.overlay,{mousedown:"_keepFocus"}),this.document.data("ui-dialog-overlays",(this.document.data("ui-dialog-overlays")||0)+1)}},_destroyOverlay:function(){if(this.options.modal&&this.overlay){var t=this.document.data("ui-dialog-overlays")-1;t?this.document.data("ui-dialog-overlays",t):(this._off(this.document,"focusin"),this.document.removeData("ui-dialog-overlays")),this.overlay.remove(),this.overlay=null}}}),t.uiBackCompat!==!1&&t.widget("ui.dialog",t.ui.dialog,{options:{dialogClass:""},_createWrapper:function(){this._super(),this.uiDialog.addClass(this.options.dialogClass)},_setOption:function(t,e){"dialogClass"===t&&this.uiDialog.removeClass(this.options.dialogClass).addClass(e),this._superApply(arguments)}}),t.ui.dialog,t.widget("ui.droppable",{version:"1.12.1",widgetEventPrefix:"drop",options:{accept:"*",addClasses:!0,greedy:!1,scope:"default",tolerance:"intersect",activate:null,deactivate:null,drop:null,out:null,over:null},_create:function(){var e,i=this.options,s=i.accept;this.isover=!1,this.isout=!0,this.accept=t.isFunction(s)?s:function(t){return t.is(s)},this.proportions=function(){return arguments.length?(e=arguments[0],void 0):e?e:e={width:this.element[0].offsetWidth,height:this.element[0].offsetHeight}},this._addToManager(i.scope),i.addClasses&&this._addClass("ui-droppable")},_addToManager:function(e){t.ui.ddmanager.droppables[e]=t.ui.ddmanager.droppables[e]||[],t.ui.ddmanager.droppables[e].push(this)},_splice:function(t){for(var e=0;t.length>e;e++)t[e]===this&&t.splice(e,1)},_destroy:function(){var e=t.ui.ddmanager.droppables[this.options.scope];this._splice(e)},_setOption:function(e,i){if("accept"===e)this.accept=t.isFunction(i)?i:function(t){return t.is(i)};else if("scope"===e){var s=t.ui.ddmanager.droppables[this.options.scope];this._splice(s),this._addToManager(i)}this._super(e,i)},_activate:function(e){var i=t.ui.ddmanager.current;this._addActiveClass(),i&&this._trigger("activate",e,this.ui(i))},_deactivate:function(e){var i=t.ui.ddmanager.current;this._removeActiveClass(),i&&this._trigger("deactivate",e,this.ui(i))},_over:function(e){var i=t.ui.ddmanager.current;i&&(i.currentItem||i.element)[0]!==this.element[0]&&this.accept.call(this.element[0],i.currentItem||i.element)&&(this._addHoverClass(),this._trigger("over",e,this.ui(i)))},_out:function(e){var i=t.ui.ddmanager.current;i&&(i.currentItem||i.element)[0]!==this.element[0]&&this.accept.call(this.element[0],i.currentItem||i.element)&&(this._removeHoverClass(),this._trigger("out",e,this.ui(i)))},_drop:function(e,i){var s=i||t.ui.ddmanager.current,n=!1;return s&&(s.currentItem||s.element)[0]!==this.element[0]?(this.element.find(":data(ui-droppable)").not(".ui-draggable-dragging").each(function(){var i=t(this).droppable("instance");return i.options.greedy&&!i.options.disabled&&i.options.scope===s.options.scope&&i.accept.call(i.element[0],s.currentItem||s.element)&&v(s,t.extend(i,{offset:i.element.offset()}),i.options.tolerance,e)?(n=!0,!1):void 0}),n?!1:this.accept.call(this.element[0],s.currentItem||s.element)?(this._removeActiveClass(),this._removeHoverClass(),this._trigger("drop",e,this.ui(s)),this.element):!1):!1},ui:function(t){return{draggable:t.currentItem||t.element,helper:t.helper,position:t.position,offset:t.positionAbs}},_addHoverClass:function(){this._addClass("ui-droppable-hover")},_removeHoverClass:function(){this._removeClass("ui-droppable-hover")},_addActiveClass:function(){this._addClass("ui-droppable-active")},_removeActiveClass:function(){this._removeClass("ui-droppable-active")}});var v=t.ui.intersect=function(){function t(t,e,i){return t>=e&&e+i>t}return function(e,i,s,n){if(!i.offset)return!1;var o=(e.positionAbs||e.position.absolute).left+e.margins.left,a=(e.positionAbs||e.position.absolute).top+e.margins.top,r=o+e.helperProportions.width,h=a+e.helperProportions.height,l=i.offset.left,c=i.offset.top,u=l+i.proportions().width,d=c+i.proportions().height;switch(s){case"fit":return o>=l&&u>=r&&a>=c&&d>=h;case"intersect":return o+e.helperProportions.width/2>l&&u>r-e.helperProportions.width/2&&a+e.helperProportions.height/2>c&&d>h-e.helperProportions.height/2;case"pointer":return t(n.pageY,c,i.proportions().height)&&t(n.pageX,l,i.proportions().width);case"touch":return(a>=c&&d>=a||h>=c&&d>=h||c>a&&h>d)&&(o>=l&&u>=o||r>=l&&u>=r||l>o&&r>u);default:return!1}}}();t.ui.ddmanager={current:null,droppables:{"default":[]},prepareOffsets:function(e,i){var s,n,o=t.ui.ddmanager.droppables[e.options.scope]||[],a=i?i.type:null,r=(e.currentItem||e.element).find(":data(ui-droppable)").addBack();t:for(s=0;o.length>s;s++)if(!(o[s].options.disabled||e&&!o[s].accept.call(o[s].element[0],e.currentItem||e.element))){for(n=0;r.length>n;n++)if(r[n]===o[s].element[0]){o[s].proportions().height=0;continue t}o[s].visible="none"!==o[s].element.css("display"),o[s].visible&&("mousedown"===a&&o[s]._activate.call(o[s],i),o[s].offset=o[s].element.offset(),o[s].proportions({width:o[s].element[0].offsetWidth,height:o[s].element[0].offsetHeight}))}},drop:function(e,i){var s=!1;return t.each((t.ui.ddmanager.droppables[e.options.scope]||[]).slice(),function(){this.options&&(!this.options.disabled&&this.visible&&v(e,this,this.options.tolerance,i)&&(s=this._drop.call(this,i)||s),!this.options.disabled&&this.visible&&this.accept.call(this.element[0],e.currentItem||e.element)&&(this.isout=!0,this.isover=!1,this._deactivate.call(this,i)))}),s},dragStart:function(e,i){e.element.parentsUntil("body").on("scroll.droppable",function(){e.options.refreshPositions||t.ui.ddmanager.prepareOffsets(e,i)})},drag:function(e,i){e.options.refreshPositions&&t.ui.ddmanager.prepareOffsets(e,i),t.each(t.ui.ddmanager.droppables[e.options.scope]||[],function(){if(!this.options.disabled&&!this.greedyChild&&this.visible){var s,n,o,a=v(e,this,this.options.tolerance,i),r=!a&&this.isover?"isout":a&&!this.isover?"isover":null;r&&(this.options.greedy&&(n=this.options.scope,o=this.element.parents(":data(ui-droppable)").filter(function(){return t(this).droppable("instance").options.scope===n}),o.length&&(s=t(o[0]).droppable("instance"),s.greedyChild="isover"===r)),s&&"isover"===r&&(s.isover=!1,s.isout=!0,s._out.call(s,i)),this[r]=!0,this["isout"===r?"isover":"isout"]=!1,this["isover"===r?"_over":"_out"].call(this,i),s&&"isout"===r&&(s.isout=!1,s.isover=!0,s._over.call(s,i)))}})},dragStop:function(e,i){e.element.parentsUntil("body").off("scroll.droppable"),e.options.refreshPositions||t.ui.ddmanager.prepareOffsets(e,i)}},t.uiBackCompat!==!1&&t.widget("ui.droppable",t.ui.droppable,{options:{hoverClass:!1,activeClass:!1},_addActiveClass:function(){this._super(),this.options.activeClass&&this.element.addClass(this.options.activeClass)},_removeActiveClass:function(){this._super(),this.options.activeClass&&this.element.removeClass(this.options.activeClass)},_addHoverClass:function(){this._super(),this.options.hoverClass&&this.element.addClass(this.options.hoverClass)},_removeHoverClass:function(){this._super(),this.options.hoverClass&&this.element.removeClass(this.options.hoverClass)}}),t.ui.droppable,t.widget("ui.progressbar",{version:"1.12.1",options:{classes:{"ui-progressbar":"ui-corner-all","ui-progressbar-value":"ui-corner-left","ui-progressbar-complete":"ui-corner-right"},max:100,value:0,change:null,complete:null},min:0,_create:function(){this.oldValue=this.options.value=this._constrainedValue(),this.element.attr({role:"progressbar","aria-valuemin":this.min}),this._addClass("ui-progressbar","ui-widget ui-widget-content"),this.valueDiv=t("
    ").appendTo(this.element),this._addClass(this.valueDiv,"ui-progressbar-value","ui-widget-header"),this._refreshValue()},_destroy:function(){this.element.removeAttr("role aria-valuemin aria-valuemax aria-valuenow"),this.valueDiv.remove()},value:function(t){return void 0===t?this.options.value:(this.options.value=this._constrainedValue(t),this._refreshValue(),void 0)},_constrainedValue:function(t){return void 0===t&&(t=this.options.value),this.indeterminate=t===!1,"number"!=typeof t&&(t=0),this.indeterminate?!1:Math.min(this.options.max,Math.max(this.min,t))},_setOptions:function(t){var e=t.value;delete t.value,this._super(t),this.options.value=this._constrainedValue(e),this._refreshValue()},_setOption:function(t,e){"max"===t&&(e=Math.max(this.min,e)),this._super(t,e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t),this._toggleClass(null,"ui-state-disabled",!!t)},_percentage:function(){return this.indeterminate?100:100*(this.options.value-this.min)/(this.options.max-this.min)},_refreshValue:function(){var e=this.options.value,i=this._percentage();this.valueDiv.toggle(this.indeterminate||e>this.min).width(i.toFixed(0)+"%"),this._toggleClass(this.valueDiv,"ui-progressbar-complete",null,e===this.options.max)._toggleClass("ui-progressbar-indeterminate",null,this.indeterminate),this.indeterminate?(this.element.removeAttr("aria-valuenow"),this.overlayDiv||(this.overlayDiv=t("
    ").appendTo(this.valueDiv),this._addClass(this.overlayDiv,"ui-progressbar-overlay"))):(this.element.attr({"aria-valuemax":this.options.max,"aria-valuenow":e}),this.overlayDiv&&(this.overlayDiv.remove(),this.overlayDiv=null)),this.oldValue!==e&&(this.oldValue=e,this._trigger("change")),e===this.options.max&&this._trigger("complete")}}),t.widget("ui.selectable",t.ui.mouse,{version:"1.12.1",options:{appendTo:"body",autoRefresh:!0,distance:0,filter:"*",tolerance:"touch",selected:null,selecting:null,start:null,stop:null,unselected:null,unselecting:null},_create:function(){var e=this;this._addClass("ui-selectable"),this.dragged=!1,this.refresh=function(){e.elementPos=t(e.element[0]).offset(),e.selectees=t(e.options.filter,e.element[0]),e._addClass(e.selectees,"ui-selectee"),e.selectees.each(function(){var i=t(this),s=i.offset(),n={left:s.left-e.elementPos.left,top:s.top-e.elementPos.top};t.data(this,"selectable-item",{element:this,$element:i,left:n.left,top:n.top,right:n.left+i.outerWidth(),bottom:n.top+i.outerHeight(),startselected:!1,selected:i.hasClass("ui-selected"),selecting:i.hasClass("ui-selecting"),unselecting:i.hasClass("ui-unselecting")})})},this.refresh(),this._mouseInit(),this.helper=t("
    "),this._addClass(this.helper,"ui-selectable-helper")},_destroy:function(){this.selectees.removeData("selectable-item"),this._mouseDestroy()},_mouseStart:function(e){var i=this,s=this.options;this.opos=[e.pageX,e.pageY],this.elementPos=t(this.element[0]).offset(),this.options.disabled||(this.selectees=t(s.filter,this.element[0]),this._trigger("start",e),t(s.appendTo).append(this.helper),this.helper.css({left:e.pageX,top:e.pageY,width:0,height:0}),s.autoRefresh&&this.refresh(),this.selectees.filter(".ui-selected").each(function(){var s=t.data(this,"selectable-item");s.startselected=!0,e.metaKey||e.ctrlKey||(i._removeClass(s.$element,"ui-selected"),s.selected=!1,i._addClass(s.$element,"ui-unselecting"),s.unselecting=!0,i._trigger("unselecting",e,{unselecting:s.element}))}),t(e.target).parents().addBack().each(function(){var s,n=t.data(this,"selectable-item");return n?(s=!e.metaKey&&!e.ctrlKey||!n.$element.hasClass("ui-selected"),i._removeClass(n.$element,s?"ui-unselecting":"ui-selected")._addClass(n.$element,s?"ui-selecting":"ui-unselecting"),n.unselecting=!s,n.selecting=s,n.selected=s,s?i._trigger("selecting",e,{selecting:n.element}):i._trigger("unselecting",e,{unselecting:n.element}),!1):void 0}))},_mouseDrag:function(e){if(this.dragged=!0,!this.options.disabled){var i,s=this,n=this.options,o=this.opos[0],a=this.opos[1],r=e.pageX,h=e.pageY;return o>r&&(i=r,r=o,o=i),a>h&&(i=h,h=a,a=i),this.helper.css({left:o,top:a,width:r-o,height:h-a}),this.selectees.each(function(){var i=t.data(this,"selectable-item"),l=!1,c={};i&&i.element!==s.element[0]&&(c.left=i.left+s.elementPos.left,c.right=i.right+s.elementPos.left,c.top=i.top+s.elementPos.top,c.bottom=i.bottom+s.elementPos.top,"touch"===n.tolerance?l=!(c.left>r||o>c.right||c.top>h||a>c.bottom):"fit"===n.tolerance&&(l=c.left>o&&r>c.right&&c.top>a&&h>c.bottom),l?(i.selected&&(s._removeClass(i.$element,"ui-selected"),i.selected=!1),i.unselecting&&(s._removeClass(i.$element,"ui-unselecting"),i.unselecting=!1),i.selecting||(s._addClass(i.$element,"ui-selecting"),i.selecting=!0,s._trigger("selecting",e,{selecting:i.element}))):(i.selecting&&((e.metaKey||e.ctrlKey)&&i.startselected?(s._removeClass(i.$element,"ui-selecting"),i.selecting=!1,s._addClass(i.$element,"ui-selected"),i.selected=!0):(s._removeClass(i.$element,"ui-selecting"),i.selecting=!1,i.startselected&&(s._addClass(i.$element,"ui-unselecting"),i.unselecting=!0),s._trigger("unselecting",e,{unselecting:i.element}))),i.selected&&(e.metaKey||e.ctrlKey||i.startselected||(s._removeClass(i.$element,"ui-selected"),i.selected=!1,s._addClass(i.$element,"ui-unselecting"),i.unselecting=!0,s._trigger("unselecting",e,{unselecting:i.element})))))}),!1}},_mouseStop:function(e){var i=this;return this.dragged=!1,t(".ui-unselecting",this.element[0]).each(function(){var s=t.data(this,"selectable-item");i._removeClass(s.$element,"ui-unselecting"),s.unselecting=!1,s.startselected=!1,i._trigger("unselected",e,{unselected:s.element})}),t(".ui-selecting",this.element[0]).each(function(){var s=t.data(this,"selectable-item");i._removeClass(s.$element,"ui-selecting")._addClass(s.$element,"ui-selected"),s.selecting=!1,s.selected=!0,s.startselected=!0,i._trigger("selected",e,{selected:s.element})}),this._trigger("stop",e),this.helper.remove(),!1}}),t.widget("ui.selectmenu",[t.ui.formResetMixin,{version:"1.12.1",defaultElement:"",widgetEventPrefix:"spin",options:{classes:{"ui-spinner":"ui-corner-all","ui-spinner-down":"ui-corner-br","ui-spinner-up":"ui-corner-tr"},culture:null,icons:{down:"ui-icon-triangle-1-s",up:"ui-icon-triangle-1-n"},incremental:!0,max:null,min:null,numberFormat:null,page:10,step:1,change:null,spin:null,start:null,stop:null},_create:function(){this._setOption("max",this.options.max),this._setOption("min",this.options.min),this._setOption("step",this.options.step),""!==this.value()&&this._value(this.element.val(),!0),this._draw(),this._on(this._events),this._refresh(),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_getCreateOptions:function(){var e=this._super(),i=this.element;return t.each(["min","max","step"],function(t,s){var n=i.attr(s);null!=n&&n.length&&(e[s]=n)}),e},_events:{keydown:function(t){this._start(t)&&this._keydown(t)&&t.preventDefault()},keyup:"_stop",focus:function(){this.previous=this.element.val()},blur:function(t){return this.cancelBlur?(delete this.cancelBlur,void 0):(this._stop(),this._refresh(),this.previous!==this.element.val()&&this._trigger("change",t),void 0)},mousewheel:function(t,e){if(e){if(!this.spinning&&!this._start(t))return!1;this._spin((e>0?1:-1)*this.options.step,t),clearTimeout(this.mousewheelTimer),this.mousewheelTimer=this._delay(function(){this.spinning&&this._stop(t)},100),t.preventDefault()}},"mousedown .ui-spinner-button":function(e){function i(){var e=this.element[0]===t.ui.safeActiveElement(this.document[0]);e||(this.element.trigger("focus"),this.previous=s,this._delay(function(){this.previous=s}))}var s;s=this.element[0]===t.ui.safeActiveElement(this.document[0])?this.previous:this.element.val(),e.preventDefault(),i.call(this),this.cancelBlur=!0,this._delay(function(){delete this.cancelBlur,i.call(this)}),this._start(e)!==!1&&this._repeat(null,t(e.currentTarget).hasClass("ui-spinner-up")?1:-1,e)},"mouseup .ui-spinner-button":"_stop","mouseenter .ui-spinner-button":function(e){return t(e.currentTarget).hasClass("ui-state-active")?this._start(e)===!1?!1:(this._repeat(null,t(e.currentTarget).hasClass("ui-spinner-up")?1:-1,e),void 0):void 0},"mouseleave .ui-spinner-button":"_stop"},_enhance:function(){this.uiSpinner=this.element.attr("autocomplete","off").wrap("").parent().append("")},_draw:function(){this._enhance(),this._addClass(this.uiSpinner,"ui-spinner","ui-widget ui-widget-content"),this._addClass("ui-spinner-input"),this.element.attr("role","spinbutton"),this.buttons=this.uiSpinner.children("a").attr("tabIndex",-1).attr("aria-hidden",!0).button({classes:{"ui-button":""}}),this._removeClass(this.buttons,"ui-corner-all"),this._addClass(this.buttons.first(),"ui-spinner-button ui-spinner-up"),this._addClass(this.buttons.last(),"ui-spinner-button ui-spinner-down"),this.buttons.first().button({icon:this.options.icons.up,showLabel:!1}),this.buttons.last().button({icon:this.options.icons.down,showLabel:!1}),this.buttons.height()>Math.ceil(.5*this.uiSpinner.height())&&this.uiSpinner.height()>0&&this.uiSpinner.height(this.uiSpinner.height())},_keydown:function(e){var i=this.options,s=t.ui.keyCode;switch(e.keyCode){case s.UP:return this._repeat(null,1,e),!0;case s.DOWN:return this._repeat(null,-1,e),!0;case s.PAGE_UP:return this._repeat(null,i.page,e),!0;case s.PAGE_DOWN:return this._repeat(null,-i.page,e),!0}return!1},_start:function(t){return this.spinning||this._trigger("start",t)!==!1?(this.counter||(this.counter=1),this.spinning=!0,!0):!1},_repeat:function(t,e,i){t=t||500,clearTimeout(this.timer),this.timer=this._delay(function(){this._repeat(40,e,i)},t),this._spin(e*this.options.step,i)},_spin:function(t,e){var i=this.value()||0;this.counter||(this.counter=1),i=this._adjustValue(i+t*this._increment(this.counter)),this.spinning&&this._trigger("spin",e,{value:i})===!1||(this._value(i),this.counter++)},_increment:function(e){var i=this.options.incremental;return i?t.isFunction(i)?i(e):Math.floor(e*e*e/5e4-e*e/500+17*e/200+1):1},_precision:function(){var t=this._precisionOf(this.options.step);return null!==this.options.min&&(t=Math.max(t,this._precisionOf(this.options.min))),t},_precisionOf:function(t){var e=""+t,i=e.indexOf(".");return-1===i?0:e.length-i-1},_adjustValue:function(t){var e,i,s=this.options;return e=null!==s.min?s.min:0,i=t-e,i=Math.round(i/s.step)*s.step,t=e+i,t=parseFloat(t.toFixed(this._precision())),null!==s.max&&t>s.max?s.max:null!==s.min&&s.min>t?s.min:t},_stop:function(t){this.spinning&&(clearTimeout(this.timer),clearTimeout(this.mousewheelTimer),this.counter=0,this.spinning=!1,this._trigger("stop",t))},_setOption:function(t,e){var i,s,n;return"culture"===t||"numberFormat"===t?(i=this._parse(this.element.val()),this.options[t]=e,this.element.val(this._format(i)),void 0):(("max"===t||"min"===t||"step"===t)&&"string"==typeof e&&(e=this._parse(e)),"icons"===t&&(s=this.buttons.first().find(".ui-icon"),this._removeClass(s,null,this.options.icons.up),this._addClass(s,null,e.up),n=this.buttons.last().find(".ui-icon"),this._removeClass(n,null,this.options.icons.down),this._addClass(n,null,e.down)),this._super(t,e),void 0)},_setOptionDisabled:function(t){this._super(t),this._toggleClass(this.uiSpinner,null,"ui-state-disabled",!!t),this.element.prop("disabled",!!t),this.buttons.button(t?"disable":"enable")},_setOptions:r(function(t){this._super(t)}),_parse:function(t){return"string"==typeof t&&""!==t&&(t=window.Globalize&&this.options.numberFormat?Globalize.parseFloat(t,10,this.options.culture):+t),""===t||isNaN(t)?null:t},_format:function(t){return""===t?"":window.Globalize&&this.options.numberFormat?Globalize.format(t,this.options.numberFormat,this.options.culture):t},_refresh:function(){this.element.attr({"aria-valuemin":this.options.min,"aria-valuemax":this.options.max,"aria-valuenow":this._parse(this.element.val())})},isValid:function(){var t=this.value();return null===t?!1:t===this._adjustValue(t)},_value:function(t,e){var i;""!==t&&(i=this._parse(t),null!==i&&(e||(i=this._adjustValue(i)),t=this._format(i))),this.element.val(t),this._refresh()},_destroy:function(){this.element.prop("disabled",!1).removeAttr("autocomplete role aria-valuemin aria-valuemax aria-valuenow"),this.uiSpinner.replaceWith(this.element)},stepUp:r(function(t){this._stepUp(t)}),_stepUp:function(t){this._start()&&(this._spin((t||1)*this.options.step),this._stop())},stepDown:r(function(t){this._stepDown(t)}),_stepDown:function(t){this._start()&&(this._spin((t||1)*-this.options.step),this._stop())},pageUp:r(function(t){this._stepUp((t||1)*this.options.page)}),pageDown:r(function(t){this._stepDown((t||1)*this.options.page)}),value:function(t){return arguments.length?(r(this._value).call(this,t),void 0):this._parse(this.element.val())},widget:function(){return this.uiSpinner}}),t.uiBackCompat!==!1&&t.widget("ui.spinner",t.ui.spinner,{_enhance:function(){this.uiSpinner=this.element.attr("autocomplete","off").wrap(this._uiSpinnerHtml()).parent().append(this._buttonHtml())},_uiSpinnerHtml:function(){return""},_buttonHtml:function(){return""}}),t.ui.spinner,t.widget("ui.tabs",{version:"1.12.1",delay:300,options:{active:null,classes:{"ui-tabs":"ui-corner-all","ui-tabs-nav":"ui-corner-all","ui-tabs-panel":"ui-corner-bottom","ui-tabs-tab":"ui-corner-top"},collapsible:!1,event:"click",heightStyle:"content",hide:null,show:null,activate:null,beforeActivate:null,beforeLoad:null,load:null},_isLocal:function(){var t=/#.*$/;return function(e){var i,s;i=e.href.replace(t,""),s=location.href.replace(t,"");try{i=decodeURIComponent(i)}catch(n){}try{s=decodeURIComponent(s)}catch(n){}return e.hash.length>1&&i===s}}(),_create:function(){var e=this,i=this.options;this.running=!1,this._addClass("ui-tabs","ui-widget ui-widget-content"),this._toggleClass("ui-tabs-collapsible",null,i.collapsible),this._processTabs(),i.active=this._initialActive(),t.isArray(i.disabled)&&(i.disabled=t.unique(i.disabled.concat(t.map(this.tabs.filter(".ui-state-disabled"),function(t){return e.tabs.index(t)}))).sort()),this.active=this.options.active!==!1&&this.anchors.length?this._findActive(i.active):t(),this._refresh(),this.active.length&&this.load(i.active)},_initialActive:function(){var e=this.options.active,i=this.options.collapsible,s=location.hash.substring(1);return null===e&&(s&&this.tabs.each(function(i,n){return t(n).attr("aria-controls")===s?(e=i,!1):void 0}),null===e&&(e=this.tabs.index(this.tabs.filter(".ui-tabs-active"))),(null===e||-1===e)&&(e=this.tabs.length?0:!1)),e!==!1&&(e=this.tabs.index(this.tabs.eq(e)),-1===e&&(e=i?!1:0)),!i&&e===!1&&this.anchors.length&&(e=0),e},_getCreateEventData:function(){return{tab:this.active,panel:this.active.length?this._getPanelForTab(this.active):t()}},_tabKeydown:function(e){var i=t(t.ui.safeActiveElement(this.document[0])).closest("li"),s=this.tabs.index(i),n=!0;if(!this._handlePageNav(e)){switch(e.keyCode){case t.ui.keyCode.RIGHT:case t.ui.keyCode.DOWN:s++;break;case t.ui.keyCode.UP:case t.ui.keyCode.LEFT:n=!1,s--;break;case t.ui.keyCode.END:s=this.anchors.length-1;break;case t.ui.keyCode.HOME:s=0;break;case t.ui.keyCode.SPACE:return e.preventDefault(),clearTimeout(this.activating),this._activate(s),void 0;case t.ui.keyCode.ENTER:return e.preventDefault(),clearTimeout(this.activating),this._activate(s===this.options.active?!1:s),void 0;default:return}e.preventDefault(),clearTimeout(this.activating),s=this._focusNextTab(s,n),e.ctrlKey||e.metaKey||(i.attr("aria-selected","false"),this.tabs.eq(s).attr("aria-selected","true"),this.activating=this._delay(function(){this.option("active",s)},this.delay))}},_panelKeydown:function(e){this._handlePageNav(e)||e.ctrlKey&&e.keyCode===t.ui.keyCode.UP&&(e.preventDefault(),this.active.trigger("focus"))},_handlePageNav:function(e){return e.altKey&&e.keyCode===t.ui.keyCode.PAGE_UP?(this._activate(this._focusNextTab(this.options.active-1,!1)),!0):e.altKey&&e.keyCode===t.ui.keyCode.PAGE_DOWN?(this._activate(this._focusNextTab(this.options.active+1,!0)),!0):void 0},_findNextTab:function(e,i){function s(){return e>n&&(e=0),0>e&&(e=n),e}for(var n=this.tabs.length-1;-1!==t.inArray(s(),this.options.disabled);)e=i?e+1:e-1;return e},_focusNextTab:function(t,e){return t=this._findNextTab(t,e),this.tabs.eq(t).trigger("focus"),t},_setOption:function(t,e){return"active"===t?(this._activate(e),void 0):(this._super(t,e),"collapsible"===t&&(this._toggleClass("ui-tabs-collapsible",null,e),e||this.options.active!==!1||this._activate(0)),"event"===t&&this._setupEvents(e),"heightStyle"===t&&this._setupHeightStyle(e),void 0)},_sanitizeSelector:function(t){return t?t.replace(/[!"$%&'()*+,.\/:;<=>?@\[\]\^`{|}~]/g,"\\$&"):""},refresh:function(){var e=this.options,i=this.tablist.children(":has(a[href])");e.disabled=t.map(i.filter(".ui-state-disabled"),function(t){return i.index(t)}),this._processTabs(),e.active!==!1&&this.anchors.length?this.active.length&&!t.contains(this.tablist[0],this.active[0])?this.tabs.length===e.disabled.length?(e.active=!1,this.active=t()):this._activate(this._findNextTab(Math.max(0,e.active-1),!1)):e.active=this.tabs.index(this.active):(e.active=!1,this.active=t()),this._refresh()},_refresh:function(){this._setOptionDisabled(this.options.disabled),this._setupEvents(this.options.event),this._setupHeightStyle(this.options.heightStyle),this.tabs.not(this.active).attr({"aria-selected":"false","aria-expanded":"false",tabIndex:-1}),this.panels.not(this._getPanelForTab(this.active)).hide().attr({"aria-hidden":"true"}),this.active.length?(this.active.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0}),this._addClass(this.active,"ui-tabs-active","ui-state-active"),this._getPanelForTab(this.active).show().attr({"aria-hidden":"false"})):this.tabs.eq(0).attr("tabIndex",0)},_processTabs:function(){var e=this,i=this.tabs,s=this.anchors,n=this.panels;this.tablist=this._getList().attr("role","tablist"),this._addClass(this.tablist,"ui-tabs-nav","ui-helper-reset ui-helper-clearfix ui-widget-header"),this.tablist.on("mousedown"+this.eventNamespace,"> li",function(e){t(this).is(".ui-state-disabled")&&e.preventDefault()}).on("focus"+this.eventNamespace,".ui-tabs-anchor",function(){t(this).closest("li").is(".ui-state-disabled")&&this.blur()}),this.tabs=this.tablist.find("> li:has(a[href])").attr({role:"tab",tabIndex:-1}),this._addClass(this.tabs,"ui-tabs-tab","ui-state-default"),this.anchors=this.tabs.map(function(){return t("a",this)[0]}).attr({role:"presentation",tabIndex:-1}),this._addClass(this.anchors,"ui-tabs-anchor"),this.panels=t(),this.anchors.each(function(i,s){var n,o,a,r=t(s).uniqueId().attr("id"),h=t(s).closest("li"),l=h.attr("aria-controls");e._isLocal(s)?(n=s.hash,a=n.substring(1),o=e.element.find(e._sanitizeSelector(n))):(a=h.attr("aria-controls")||t({}).uniqueId()[0].id,n="#"+a,o=e.element.find(n),o.length||(o=e._createPanel(a),o.insertAfter(e.panels[i-1]||e.tablist)),o.attr("aria-live","polite")),o.length&&(e.panels=e.panels.add(o)),l&&h.data("ui-tabs-aria-controls",l),h.attr({"aria-controls":a,"aria-labelledby":r}),o.attr("aria-labelledby",r)}),this.panels.attr("role","tabpanel"),this._addClass(this.panels,"ui-tabs-panel","ui-widget-content"),i&&(this._off(i.not(this.tabs)),this._off(s.not(this.anchors)),this._off(n.not(this.panels)))},_getList:function(){return this.tablist||this.element.find("ol, ul").eq(0)},_createPanel:function(e){return t("
    ").attr("id",e).data("ui-tabs-destroy",!0)},_setOptionDisabled:function(e){var i,s,n;for(t.isArray(e)&&(e.length?e.length===this.anchors.length&&(e=!0):e=!1),n=0;s=this.tabs[n];n++)i=t(s),e===!0||-1!==t.inArray(n,e)?(i.attr("aria-disabled","true"),this._addClass(i,null,"ui-state-disabled")):(i.removeAttr("aria-disabled"),this._removeClass(i,null,"ui-state-disabled"));this.options.disabled=e,this._toggleClass(this.widget(),this.widgetFullName+"-disabled",null,e===!0)},_setupEvents:function(e){var i={};e&&t.each(e.split(" "),function(t,e){i[e]="_eventHandler"}),this._off(this.anchors.add(this.tabs).add(this.panels)),this._on(!0,this.anchors,{click:function(t){t.preventDefault()}}),this._on(this.anchors,i),this._on(this.tabs,{keydown:"_tabKeydown"}),this._on(this.panels,{keydown:"_panelKeydown"}),this._focusable(this.tabs),this._hoverable(this.tabs)},_setupHeightStyle:function(e){var i,s=this.element.parent();"fill"===e?(i=s.height(),i-=this.element.outerHeight()-this.element.height(),this.element.siblings(":visible").each(function(){var e=t(this),s=e.css("position");"absolute"!==s&&"fixed"!==s&&(i-=e.outerHeight(!0))}),this.element.children().not(this.panels).each(function(){i-=t(this).outerHeight(!0)}),this.panels.each(function(){t(this).height(Math.max(0,i-t(this).innerHeight()+t(this).height()))}).css("overflow","auto")):"auto"===e&&(i=0,this.panels.each(function(){i=Math.max(i,t(this).height("").height())}).height(i))},_eventHandler:function(e){var i=this.options,s=this.active,n=t(e.currentTarget),o=n.closest("li"),a=o[0]===s[0],r=a&&i.collapsible,h=r?t():this._getPanelForTab(o),l=s.length?this._getPanelForTab(s):t(),c={oldTab:s,oldPanel:l,newTab:r?t():o,newPanel:h};e.preventDefault(),o.hasClass("ui-state-disabled")||o.hasClass("ui-tabs-loading")||this.running||a&&!i.collapsible||this._trigger("beforeActivate",e,c)===!1||(i.active=r?!1:this.tabs.index(o),this.active=a?t():o,this.xhr&&this.xhr.abort(),l.length||h.length||t.error("jQuery UI Tabs: Mismatching fragment identifier."),h.length&&this.load(this.tabs.index(o),e),this._toggle(e,c))},_toggle:function(e,i){function s(){o.running=!1,o._trigger("activate",e,i)}function n(){o._addClass(i.newTab.closest("li"),"ui-tabs-active","ui-state-active"),a.length&&o.options.show?o._show(a,o.options.show,s):(a.show(),s())}var o=this,a=i.newPanel,r=i.oldPanel;this.running=!0,r.length&&this.options.hide?this._hide(r,this.options.hide,function(){o._removeClass(i.oldTab.closest("li"),"ui-tabs-active","ui-state-active"),n()}):(this._removeClass(i.oldTab.closest("li"),"ui-tabs-active","ui-state-active"),r.hide(),n()),r.attr("aria-hidden","true"),i.oldTab.attr({"aria-selected":"false","aria-expanded":"false"}),a.length&&r.length?i.oldTab.attr("tabIndex",-1):a.length&&this.tabs.filter(function(){return 0===t(this).attr("tabIndex")}).attr("tabIndex",-1),a.attr("aria-hidden","false"),i.newTab.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0})},_activate:function(e){var i,s=this._findActive(e);s[0]!==this.active[0]&&(s.length||(s=this.active),i=s.find(".ui-tabs-anchor")[0],this._eventHandler({target:i,currentTarget:i,preventDefault:t.noop}))},_findActive:function(e){return e===!1?t():this.tabs.eq(e)},_getIndex:function(e){return"string"==typeof e&&(e=this.anchors.index(this.anchors.filter("[href$='"+t.ui.escapeSelector(e)+"']"))),e},_destroy:function(){this.xhr&&this.xhr.abort(),this.tablist.removeAttr("role").off(this.eventNamespace),this.anchors.removeAttr("role tabIndex").removeUniqueId(),this.tabs.add(this.panels).each(function(){t.data(this,"ui-tabs-destroy")?t(this).remove():t(this).removeAttr("role tabIndex aria-live aria-busy aria-selected aria-labelledby aria-hidden aria-expanded")}),this.tabs.each(function(){var e=t(this),i=e.data("ui-tabs-aria-controls");i?e.attr("aria-controls",i).removeData("ui-tabs-aria-controls"):e.removeAttr("aria-controls")}),this.panels.show(),"content"!==this.options.heightStyle&&this.panels.css("height","")},enable:function(e){var i=this.options.disabled;i!==!1&&(void 0===e?i=!1:(e=this._getIndex(e),i=t.isArray(i)?t.map(i,function(t){return t!==e?t:null}):t.map(this.tabs,function(t,i){return i!==e?i:null})),this._setOptionDisabled(i))},disable:function(e){var i=this.options.disabled;if(i!==!0){if(void 0===e)i=!0;else{if(e=this._getIndex(e),-1!==t.inArray(e,i))return;i=t.isArray(i)?t.merge([e],i).sort():[e]}this._setOptionDisabled(i)}},load:function(e,i){e=this._getIndex(e);var s=this,n=this.tabs.eq(e),o=n.find(".ui-tabs-anchor"),a=this._getPanelForTab(n),r={tab:n,panel:a},h=function(t,e){"abort"===e&&s.panels.stop(!1,!0),s._removeClass(n,"ui-tabs-loading"),a.removeAttr("aria-busy"),t===s.xhr&&delete s.xhr};this._isLocal(o[0])||(this.xhr=t.ajax(this._ajaxSettings(o,i,r)),this.xhr&&"canceled"!==this.xhr.statusText&&(this._addClass(n,"ui-tabs-loading"),a.attr("aria-busy","true"),this.xhr.done(function(t,e,n){setTimeout(function(){a.html(t),s._trigger("load",i,r),h(n,e)},1)}).fail(function(t,e){setTimeout(function(){h(t,e)},1)})))},_ajaxSettings:function(e,i,s){var n=this;return{url:e.attr("href").replace(/#.*$/,""),beforeSend:function(e,o){return n._trigger("beforeLoad",i,t.extend({jqXHR:e,ajaxSettings:o},s))}}},_getPanelForTab:function(e){var i=t(e).attr("aria-controls");return this.element.find(this._sanitizeSelector("#"+i))}}),t.uiBackCompat!==!1&&t.widget("ui.tabs",t.ui.tabs,{_processTabs:function(){this._superApply(arguments),this._addClass(this.tabs,"ui-tab")}}),t.ui.tabs,t.widget("ui.tooltip",{version:"1.12.1",options:{classes:{"ui-tooltip":"ui-corner-all ui-widget-shadow"},content:function(){var e=t(this).attr("title")||"";return t("").text(e).html()},hide:!0,items:"[title]:not([disabled])",position:{my:"left top+15",at:"left bottom",collision:"flipfit flip"},show:!0,track:!1,close:null,open:null},_addDescribedBy:function(e,i){var s=(e.attr("aria-describedby")||"").split(/\s+/);s.push(i),e.data("ui-tooltip-id",i).attr("aria-describedby",t.trim(s.join(" ")))},_removeDescribedBy:function(e){var i=e.data("ui-tooltip-id"),s=(e.attr("aria-describedby")||"").split(/\s+/),n=t.inArray(i,s);-1!==n&&s.splice(n,1),e.removeData("ui-tooltip-id"),s=t.trim(s.join(" ")),s?e.attr("aria-describedby",s):e.removeAttr("aria-describedby")},_create:function(){this._on({mouseover:"open",focusin:"open"}),this.tooltips={},this.parents={},this.liveRegion=t("
    ").attr({role:"log","aria-live":"assertive","aria-relevant":"additions"}).appendTo(this.document[0].body),this._addClass(this.liveRegion,null,"ui-helper-hidden-accessible"),this.disabledTitles=t([])},_setOption:function(e,i){var s=this;this._super(e,i),"content"===e&&t.each(this.tooltips,function(t,e){s._updateContent(e.element)})},_setOptionDisabled:function(t){this[t?"_disable":"_enable"]()},_disable:function(){var e=this;t.each(this.tooltips,function(i,s){var n=t.Event("blur");n.target=n.currentTarget=s.element[0],e.close(n,!0)}),this.disabledTitles=this.disabledTitles.add(this.element.find(this.options.items).addBack().filter(function(){var e=t(this);return e.is("[title]")?e.data("ui-tooltip-title",e.attr("title")).removeAttr("title"):void 0}))},_enable:function(){this.disabledTitles.each(function(){var e=t(this);e.data("ui-tooltip-title")&&e.attr("title",e.data("ui-tooltip-title"))}),this.disabledTitles=t([])},open:function(e){var i=this,s=t(e?e.target:this.element).closest(this.options.items);s.length&&!s.data("ui-tooltip-id")&&(s.attr("title")&&s.data("ui-tooltip-title",s.attr("title")),s.data("ui-tooltip-open",!0),e&&"mouseover"===e.type&&s.parents().each(function(){var e,s=t(this);s.data("ui-tooltip-open")&&(e=t.Event("blur"),e.target=e.currentTarget=this,i.close(e,!0)),s.attr("title")&&(s.uniqueId(),i.parents[this.id]={element:this,title:s.attr("title")},s.attr("title",""))}),this._registerCloseHandlers(e,s),this._updateContent(s,e))},_updateContent:function(t,e){var i,s=this.options.content,n=this,o=e?e.type:null;return"string"==typeof s||s.nodeType||s.jquery?this._open(e,t,s):(i=s.call(t[0],function(i){n._delay(function(){t.data("ui-tooltip-open")&&(e&&(e.type=o),this._open(e,t,i))})}),i&&this._open(e,t,i),void 0)},_open:function(e,i,s){function n(t){l.of=t,a.is(":hidden")||a.position(l)}var o,a,r,h,l=t.extend({},this.options.position);if(s){if(o=this._find(i))return o.tooltip.find(".ui-tooltip-content").html(s),void 0;i.is("[title]")&&(e&&"mouseover"===e.type?i.attr("title",""):i.removeAttr("title")),o=this._tooltip(i),a=o.tooltip,this._addDescribedBy(i,a.attr("id")),a.find(".ui-tooltip-content").html(s),this.liveRegion.children().hide(),h=t("
    ").html(a.find(".ui-tooltip-content").html()),h.removeAttr("name").find("[name]").removeAttr("name"),h.removeAttr("id").find("[id]").removeAttr("id"),h.appendTo(this.liveRegion),this.options.track&&e&&/^mouse/.test(e.type)?(this._on(this.document,{mousemove:n}),n(e)):a.position(t.extend({of:i},this.options.position)),a.hide(),this._show(a,this.options.show),this.options.track&&this.options.show&&this.options.show.delay&&(r=this.delayedShow=setInterval(function(){a.is(":visible")&&(n(l.of),clearInterval(r))},t.fx.interval)),this._trigger("open",e,{tooltip:a})}},_registerCloseHandlers:function(e,i){var s={keyup:function(e){if(e.keyCode===t.ui.keyCode.ESCAPE){var s=t.Event(e);s.currentTarget=i[0],this.close(s,!0)}}};i[0]!==this.element[0]&&(s.remove=function(){this._removeTooltip(this._find(i).tooltip)}),e&&"mouseover"!==e.type||(s.mouseleave="close"),e&&"focusin"!==e.type||(s.focusout="close"),this._on(!0,i,s)},close:function(e){var i,s=this,n=t(e?e.currentTarget:this.element),o=this._find(n);return o?(i=o.tooltip,o.closing||(clearInterval(this.delayedShow),n.data("ui-tooltip-title")&&!n.attr("title")&&n.attr("title",n.data("ui-tooltip-title")),this._removeDescribedBy(n),o.hiding=!0,i.stop(!0),this._hide(i,this.options.hide,function(){s._removeTooltip(t(this))}),n.removeData("ui-tooltip-open"),this._off(n,"mouseleave focusout keyup"),n[0]!==this.element[0]&&this._off(n,"remove"),this._off(this.document,"mousemove"),e&&"mouseleave"===e.type&&t.each(this.parents,function(e,i){t(i.element).attr("title",i.title),delete s.parents[e]}),o.closing=!0,this._trigger("close",e,{tooltip:i}),o.hiding||(o.closing=!1)),void 0):(n.removeData("ui-tooltip-open"),void 0)},_tooltip:function(e){var i=t("
    ").attr("role","tooltip"),s=t("
    ").appendTo(i),n=i.uniqueId().attr("id");return this._addClass(s,"ui-tooltip-content"),this._addClass(i,"ui-tooltip","ui-widget ui-widget-content"),i.appendTo(this._appendTo(e)),this.tooltips[n]={element:e,tooltip:i}},_find:function(t){var e=t.data("ui-tooltip-id");return e?this.tooltips[e]:null},_removeTooltip:function(t){t.remove(),delete this.tooltips[t.attr("id")]},_appendTo:function(t){var e=t.closest(".ui-front, dialog");return e.length||(e=this.document[0].body),e},_destroy:function(){var e=this;t.each(this.tooltips,function(i,s){var n=t.Event("blur"),o=s.element;n.target=n.currentTarget=o[0],e.close(n,!0),t("#"+i).remove(),o.data("ui-tooltip-title")&&(o.attr("title")||o.attr("title",o.data("ui-tooltip-title")),o.removeData("ui-tooltip-title"))}),this.liveRegion.remove()}}),t.uiBackCompat!==!1&&t.widget("ui.tooltip",t.ui.tooltip,{options:{tooltipClass:null},_tooltip:function(){var t=this._superApply(arguments);return this.options.tooltipClass&&t.tooltip.addClass(this.options.tooltipClass),t}}),t.ui.tooltip}); \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.9.1.custom.min.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.9.1.custom.min.js deleted file mode 100644 index aa7a923e73..0000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.9.1.custom.min.js +++ /dev/null @@ -1,6 +0,0 @@ -/*! jQuery UI - v1.9.1 - 2012-10-25 -* http://jqueryui.com -* Includes: jquery.ui.core.js, jquery.ui.widget.js, jquery.ui.mouse.js, jquery.ui.position.js, jquery.ui.accordion.js, jquery.ui.autocomplete.js, jquery.ui.button.js, jquery.ui.datepicker.js, jquery.ui.dialog.js, jquery.ui.draggable.js, jquery.ui.droppable.js, jquery.ui.effect.js, jquery.ui.effect-blind.js, jquery.ui.effect-bounce.js, jquery.ui.effect-clip.js, jquery.ui.effect-drop.js, jquery.ui.effect-explode.js, jquery.ui.effect-fade.js, jquery.ui.effect-fold.js, jquery.ui.effect-highlight.js, jquery.ui.effect-pulsate.js, jquery.ui.effect-scale.js, jquery.ui.effect-shake.js, jquery.ui.effect-slide.js, jquery.ui.effect-transfer.js, jquery.ui.menu.js, jquery.ui.progressbar.js, jquery.ui.resizable.js, jquery.ui.selectable.js, jquery.ui.slider.js, jquery.ui.sortable.js, jquery.ui.spinner.js, jquery.ui.tabs.js, jquery.ui.tooltip.js -* Copyright (c) 2012 jQuery Foundation and other contributors Licensed MIT */ - -(function(e,t){function i(t,n){var r,i,o,u=t.nodeName.toLowerCase();return"area"===u?(r=t.parentNode,i=r.name,!t.href||!i||r.nodeName.toLowerCase()!=="map"?!1:(o=e("img[usemap=#"+i+"]")[0],!!o&&s(o))):(/input|select|textarea|button|object/.test(u)?!t.disabled:"a"===u?t.href||n:n)&&s(t)}function s(t){return e.expr.filters.visible(t)&&!e(t).parents().andSelf().filter(function(){return e.css(this,"visibility")==="hidden"}).length}var n=0,r=/^ui-id-\d+$/;e.ui=e.ui||{};if(e.ui.version)return;e.extend(e.ui,{version:"1.9.1",keyCode:{BACKSPACE:8,COMMA:188,DELETE:46,DOWN:40,END:35,ENTER:13,ESCAPE:27,HOME:36,LEFT:37,NUMPAD_ADD:107,NUMPAD_DECIMAL:110,NUMPAD_DIVIDE:111,NUMPAD_ENTER:108,NUMPAD_MULTIPLY:106,NUMPAD_SUBTRACT:109,PAGE_DOWN:34,PAGE_UP:33,PERIOD:190,RIGHT:39,SPACE:32,TAB:9,UP:38}}),e.fn.extend({_focus:e.fn.focus,focus:function(t,n){return typeof t=="number"?this.each(function(){var r=this;setTimeout(function(){e(r).focus(),n&&n.call(r)},t)}):this._focus.apply(this,arguments)},scrollParent:function(){var t;return e.ui.ie&&/(static|relative)/.test(this.css("position"))||/absolute/.test(this.css("position"))?t=this.parents().filter(function(){return/(relative|absolute|fixed)/.test(e.css(this,"position"))&&/(auto|scroll)/.test(e.css(this,"overflow")+e.css(this,"overflow-y")+e.css(this,"overflow-x"))}).eq(0):t=this.parents().filter(function(){return/(auto|scroll)/.test(e.css(this,"overflow")+e.css(this,"overflow-y")+e.css(this,"overflow-x"))}).eq(0),/fixed/.test(this.css("position"))||!t.length?e(document):t},zIndex:function(n){if(n!==t)return this.css("zIndex",n);if(this.length){var r=e(this[0]),i,s;while(r.length&&r[0]!==document){i=r.css("position");if(i==="absolute"||i==="relative"||i==="fixed"){s=parseInt(r.css("zIndex"),10);if(!isNaN(s)&&s!==0)return s}r=r.parent()}}return 0},uniqueId:function(){return this.each(function(){this.id||(this.id="ui-id-"+ ++n)})},removeUniqueId:function(){return this.each(function(){r.test(this.id)&&e(this).removeAttr("id")})}}),e("").outerWidth(1).jquery||e.each(["Width","Height"],function(n,r){function u(t,n,r,s){return e.each(i,function(){n-=parseFloat(e.css(t,"padding"+this))||0,r&&(n-=parseFloat(e.css(t,"border"+this+"Width"))||0),s&&(n-=parseFloat(e.css(t,"margin"+this))||0)}),n}var i=r==="Width"?["Left","Right"]:["Top","Bottom"],s=r.toLowerCase(),o={innerWidth:e.fn.innerWidth,innerHeight:e.fn.innerHeight,outerWidth:e.fn.outerWidth,outerHeight:e.fn.outerHeight};e.fn["inner"+r]=function(n){return n===t?o["inner"+r].call(this):this.each(function(){e(this).css(s,u(this,n)+"px")})},e.fn["outer"+r]=function(t,n){return typeof t!="number"?o["outer"+r].call(this,t):this.each(function(){e(this).css(s,u(this,t,!0,n)+"px")})}}),e.extend(e.expr[":"],{data:e.expr.createPseudo?e.expr.createPseudo(function(t){return function(n){return!!e.data(n,t)}}):function(t,n,r){return!!e.data(t,r[3])},focusable:function(t){return i(t,!isNaN(e.attr(t,"tabindex")))},tabbable:function(t){var n=e.attr(t,"tabindex"),r=isNaN(n);return(r||n>=0)&&i(t,!r)}}),e(function(){var t=document.body,n=t.appendChild(n=document.createElement("div"));n.offsetHeight,e.extend(n.style,{minHeight:"100px",height:"auto",padding:0,borderWidth:0}),e.support.minHeight=n.offsetHeight===100,e.support.selectstart="onselectstart"in n,t.removeChild(n).style.display="none"}),function(){var t=/msie ([\w.]+)/.exec(navigator.userAgent.toLowerCase())||[];e.ui.ie=t.length?!0:!1,e.ui.ie6=parseFloat(t[1],10)===6}(),e.fn.extend({disableSelection:function(){return this.bind((e.support.selectstart?"selectstart":"mousedown")+".ui-disableSelection",function(e){e.preventDefault()})},enableSelection:function(){return this.unbind(".ui-disableSelection")}}),e.extend(e.ui,{plugin:{add:function(t,n,r){var i,s=e.ui[t].prototype;for(i in r)s.plugins[i]=s.plugins[i]||[],s.plugins[i].push([n,r[i]])},call:function(e,t,n){var r,i=e.plugins[t];if(!i||!e.element[0].parentNode||e.element[0].parentNode.nodeType===11)return;for(r=0;r0?!0:(t[r]=1,i=t[r]>0,t[r]=0,i)},isOverAxis:function(e,t,n){return e>t&&e",options:{disabled:!1,create:null},_createWidget:function(t,r){r=e(r||this.defaultElement||this)[0],this.element=e(r),this.uuid=n++,this.eventNamespace="."+this.widgetName+this.uuid,this.options=e.widget.extend({},this.options,this._getCreateOptions(),t),this.bindings=e(),this.hoverable=e(),this.focusable=e(),r!==this&&(e.data(r,this.widgetName,this),e.data(r,this.widgetFullName,this),this._on(this.element,{remove:function(e){e.target===r&&this.destroy()}}),this.document=e(r.style?r.ownerDocument:r.document||r),this.window=e(this.document[0].defaultView||this.document[0].parentWindow)),this._create(),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:e.noop,_getCreateEventData:e.noop,_create:e.noop,_init:e.noop,destroy:function(){this._destroy(),this.element.unbind(this.eventNamespace).removeData(this.widgetName).removeData(this.widgetFullName).removeData(e.camelCase(this.widgetFullName)),this.widget().unbind(this.eventNamespace).removeAttr("aria-disabled").removeClass(this.widgetFullName+"-disabled "+"ui-state-disabled"),this.bindings.unbind(this.eventNamespace),this.hoverable.removeClass("ui-state-hover"),this.focusable.removeClass("ui-state-focus")},_destroy:e.noop,widget:function(){return this.element},option:function(n,r){var i=n,s,o,u;if(arguments.length===0)return e.widget.extend({},this.options);if(typeof n=="string"){i={},s=n.split("."),n=s.shift();if(s.length){o=i[n]=e.widget.extend({},this.options[n]);for(u=0;u=9||!!t.button?this._mouseStarted?(this._mouseDrag(t),t.preventDefault()):(this._mouseDistanceMet(t)&&this._mouseDelayMet(t)&&(this._mouseStarted=this._mouseStart(this._mouseDownEvent,t)!==!1,this._mouseStarted?this._mouseDrag(t):this._mouseUp(t)),!this._mouseStarted):this._mouseUp(t)},_mouseUp:function(t){return e(document).unbind("mousemove."+this.widgetName,this._mouseMoveDelegate).unbind("mouseup."+this.widgetName,this._mouseUpDelegate),this._mouseStarted&&(this._mouseStarted=!1,t.target===this._mouseDownEvent.target&&e.data(t.target,this.widgetName+".preventClickEvent",!0),this._mouseStop(t)),!1},_mouseDistanceMet:function(e){return Math.max(Math.abs(this._mouseDownEvent.pageX-e.pageX),Math.abs(this._mouseDownEvent.pageY-e.pageY))>=this.options.distance},_mouseDelayMet:function(e){return this.mouseDelayMet},_mouseStart:function(e){},_mouseDrag:function(e){},_mouseStop:function(e){},_mouseCapture:function(e){return!0}})})(jQuery);(function(e,t){function h(e,t,n){return[parseInt(e[0],10)*(l.test(e[0])?t/100:1),parseInt(e[1],10)*(l.test(e[1])?n/100:1)]}function p(t,n){return parseInt(e.css(t,n),10)||0}e.ui=e.ui||{};var n,r=Math.max,i=Math.abs,s=Math.round,o=/left|center|right/,u=/top|center|bottom/,a=/[\+\-]\d+%?/,f=/^\w+/,l=/%$/,c=e.fn.position;e.position={scrollbarWidth:function(){if(n!==t)return n;var r,i,s=e("
    "),o=s.children()[0];return e("body").append(s),r=o.offsetWidth,s.css("overflow","scroll"),i=o.offsetWidth,r===i&&(i=s[0].clientWidth),s.remove(),n=r-i},getScrollInfo:function(t){var n=t.isWindow?"":t.element.css("overflow-x"),r=t.isWindow?"":t.element.css("overflow-y"),i=n==="scroll"||n==="auto"&&t.width0?"right":"center",vertical:u<0?"top":o>0?"bottom":"middle"};lr(i(o),i(u))?h.important="horizontal":h.important="vertical",t.using.call(this,e,h)}),a.offset(e.extend(C,{using:u}))})},e.ui.position={fit:{left:function(e,t){var n=t.within,i=n.isWindow?n.scrollLeft:n.offset.left,s=n.width,o=e.left-t.collisionPosition.marginLeft,u=i-o,a=o+t.collisionWidth-s-i,f;t.collisionWidth>s?u>0&&a<=0?(f=e.left+u+t.collisionWidth-s-i,e.left+=u-f):a>0&&u<=0?e.left=i:u>a?e.left=i+s-t.collisionWidth:e.left=i:u>0?e.left+=u:a>0?e.left-=a:e.left=r(e.left-o,e.left)},top:function(e,t){var n=t.within,i=n.isWindow?n.scrollTop:n.offset.top,s=t.within.height,o=e.top-t.collisionPosition.marginTop,u=i-o,a=o+t.collisionHeight-s-i,f;t.collisionHeight>s?u>0&&a<=0?(f=e.top+u+t.collisionHeight-s-i,e.top+=u-f):a>0&&u<=0?e.top=i:u>a?e.top=i+s-t.collisionHeight:e.top=i:u>0?e.top+=u:a>0?e.top-=a:e.top=r(e.top-o,e.top)}},flip:{left:function(e,t){var n=t.within,r=n.offset.left+n.scrollLeft,s=n.width,o=n.isWindow?n.scrollLeft:n.offset.left,u=e.left-t.collisionPosition.marginLeft,a=u-o,f=u+t.collisionWidth-s-o,l=t.my[0]==="left"?-t.elemWidth:t.my[0]==="right"?t.elemWidth:0,c=t.at[0]==="left"?t.targetWidth:t.at[0]==="right"?-t.targetWidth:0,h=-2*t.offset[0],p,d;if(a<0){p=e.left+l+c+h+t.collisionWidth-s-r;if(p<0||p0){d=e.left-t.collisionPosition.marginLeft+l+c+h-o;if(d>0||i(d)a&&(v<0||v0&&(d=e.top-t.collisionPosition.marginTop+c+h+p-o,e.top+c+h+p>f&&(d>0||i(d)10&&i<11,t.innerHTML="",n.removeChild(t)}(),e.uiBackCompat!==!1&&function(e){var n=e.fn.position;e.fn.position=function(r){if(!r||!r.offset)return n.call(this,r);var i=r.offset.split(" "),s=r.at.split(" ");return i.length===1&&(i[1]=i[0]),/^\d/.test(i[0])&&(i[0]="+"+i[0]),/^\d/.test(i[1])&&(i[1]="+"+i[1]),s.length===1&&(/left|center|right/.test(s[0])?s[1]="center":(s[1]=s[0],s[0]="center")),n.call(this,e.extend(r,{at:s[0]+i[0]+" "+s[1]+i[1],offset:t}))}}(jQuery)})(jQuery);(function(e,t){var n=0,r={},i={};r.height=r.paddingTop=r.paddingBottom=r.borderTopWidth=r.borderBottomWidth="hide",i.height=i.paddingTop=i.paddingBottom=i.borderTopWidth=i.borderBottomWidth="show",e.widget("ui.accordion",{version:"1.9.1",options:{active:0,animate:{},collapsible:!1,event:"click",header:"> li > :first-child,> :not(li):even",heightStyle:"auto",icons:{activeHeader:"ui-icon-triangle-1-s",header:"ui-icon-triangle-1-e"},activate:null,beforeActivate:null},_create:function(){var t=this.accordionId="ui-accordion-"+(this.element.attr("id")||++n),r=this.options;this.prevShow=this.prevHide=e(),this.element.addClass("ui-accordion ui-widget ui-helper-reset"),this.headers=this.element.find(r.header).addClass("ui-accordion-header ui-helper-reset ui-state-default ui-corner-all"),this._hoverable(this.headers),this._focusable(this.headers),this.headers.next().addClass("ui-accordion-content ui-helper-reset ui-widget-content ui-corner-bottom").hide(),!r.collapsible&&(r.active===!1||r.active==null)&&(r.active=0),r.active<0&&(r.active+=this.headers.length),this.active=this._findActive(r.active).addClass("ui-accordion-header-active ui-state-active").toggleClass("ui-corner-all ui-corner-top"),this.active.next().addClass("ui-accordion-content-active").show(),this._createIcons(),this.refresh(),this.element.attr("role","tablist"),this.headers.attr("role","tab").each(function(n){var r=e(this),i=r.attr("id"),s=r.next(),o=s.attr("id");i||(i=t+"-header-"+n,r.attr("id",i)),o||(o=t+"-panel-"+n,s.attr("id",o)),r.attr("aria-controls",o),s.attr("aria-labelledby",i)}).next().attr("role","tabpanel"),this.headers.not(this.active).attr({"aria-selected":"false",tabIndex:-1}).next().attr({"aria-expanded":"false","aria-hidden":"true"}).hide(),this.active.length?this.active.attr({"aria-selected":"true",tabIndex:0}).next().attr({"aria-expanded":"true","aria-hidden":"false"}):this.headers.eq(0).attr("tabIndex",0),this._on(this.headers,{keydown:"_keydown"}),this._on(this.headers.next(),{keydown:"_panelKeyDown"}),this._setupEvents(r.event)},_getCreateEventData:function(){return{header:this.active,content:this.active.length?this.active.next():e()}},_createIcons:function(){var t=this.options.icons;t&&(e("").addClass("ui-accordion-header-icon ui-icon "+t.header).prependTo(this.headers),this.active.children(".ui-accordion-header-icon").removeClass(t.header).addClass(t.activeHeader),this.headers.addClass("ui-accordion-icons"))},_destroyIcons:function(){this.headers.removeClass("ui-accordion-icons").children(".ui-accordion-header-icon").remove()},_destroy:function(){var e;this.element.removeClass("ui-accordion ui-widget ui-helper-reset").removeAttr("role"),this.headers.removeClass("ui-accordion-header ui-accordion-header-active ui-helper-reset ui-state-default ui-corner-all ui-state-active ui-state-disabled ui-corner-top").removeAttr("role").removeAttr("aria-selected").removeAttr("aria-controls").removeAttr("tabIndex").each(function(){/^ui-accordion/.test(this.id)&&this.removeAttribute("id")}),this._destroyIcons(),e=this.headers.next().css("display","").removeAttr("role").removeAttr("aria-expanded").removeAttr("aria-hidden").removeAttr("aria-labelledby").removeClass("ui-helper-reset ui-widget-content ui-corner-bottom ui-accordion-content ui-accordion-content-active ui-state-disabled").each(function(){/^ui-accordion/.test(this.id)&&this.removeAttribute("id")}),this.options.heightStyle!=="content"&&e.css("height","")},_setOption:function(e,t){if(e==="active"){this._activate(t);return}e==="event"&&(this.options.event&&this._off(this.headers,this.options.event),this._setupEvents(t)),this._super(e,t),e==="collapsible"&&!t&&this.options.active===!1&&this._activate(0),e==="icons"&&(this._destroyIcons(),t&&this._createIcons()),e==="disabled"&&this.headers.add(this.headers.next()).toggleClass("ui-state-disabled",!!t)},_keydown:function(t){if(t.altKey||t.ctrlKey)return;var n=e.ui.keyCode,r=this.headers.length,i=this.headers.index(t.target),s=!1;switch(t.keyCode){case n.RIGHT:case n.DOWN:s=this.headers[(i+1)%r];break;case n.LEFT:case n.UP:s=this.headers[(i-1+r)%r];break;case n.SPACE:case n.ENTER:this._eventHandler(t);break;case n.HOME:s=this.headers[0];break;case n.END:s=this.headers[r-1]}s&&(e(t.target).attr("tabIndex",-1),e(s).attr("tabIndex",0),s.focus(),t.preventDefault())},_panelKeyDown:function(t){t.keyCode===e.ui.keyCode.UP&&t.ctrlKey&&e(t.currentTarget).prev().focus()},refresh:function(){var t,n,r=this.options.heightStyle,i=this.element.parent();r==="fill"?(e.support.minHeight||(n=i.css("overflow"),i.css("overflow","hidden")),t=i.height(),this.element.siblings(":visible").each(function(){var n=e(this),r=n.css("position");if(r==="absolute"||r==="fixed")return;t-=n.outerHeight(!0)}),n&&i.css("overflow",n),this.headers.each(function(){t-=e(this).outerHeight(!0)}),this.headers.next().each(function(){e(this).height(Math.max(0,t-e(this).innerHeight()+e(this).height()))}).css("overflow","auto")):r==="auto"&&(t=0,this.headers.next().each(function(){t=Math.max(t,e(this).height("").height())}).height(t))},_activate:function(t){var n=this._findActive(t)[0];if(n===this.active[0])return;n=n||this.active[0],this._eventHandler({target:n,currentTarget:n,preventDefault:e.noop})},_findActive:function(t){return typeof t=="number"?this.headers.eq(t):e()},_setupEvents:function(t){var n={};if(!t)return;e.each(t.split(" "),function(e,t){n[t]="_eventHandler"}),this._on(this.headers,n)},_eventHandler:function(t){var n=this.options,r=this.active,i=e(t.currentTarget),s=i[0]===r[0],o=s&&n.collapsible,u=o?e():i.next(),a=r.next(),f={oldHeader:r,oldPanel:a,newHeader:o?e():i,newPanel:u};t.preventDefault();if(s&&!n.collapsible||this._trigger("beforeActivate",t,f)===!1)return;n.active=o?!1:this.headers.index(i),this.active=s?e():i,this._toggle(f),r.removeClass("ui-accordion-header-active ui-state-active"),n.icons&&r.children(".ui-accordion-header-icon").removeClass(n.icons.activeHeader).addClass(n.icons.header),s||(i.removeClass("ui-corner-all").addClass("ui-accordion-header-active ui-state-active ui-corner-top"),n.icons&&i.children(".ui-accordion-header-icon").removeClass(n.icons.header).addClass(n.icons.activeHeader),i.next().addClass("ui-accordion-content-active"))},_toggle:function(t){var n=t.newPanel,r=this.prevShow.length?this.prevShow:t.oldPanel;this.prevShow.add(this.prevHide).stop(!0,!0),this.prevShow=n,this.prevHide=r,this.options.animate?this._animate(n,r,t):(r.hide(),n.show(),this._toggleComplete(t)),r.attr({"aria-expanded":"false","aria-hidden":"true"}),r.prev().attr("aria-selected","false"),n.length&&r.length?r.prev().attr("tabIndex",-1):n.length&&this.headers.filter(function(){return e(this).attr("tabIndex")===0}).attr("tabIndex",-1),n.attr({"aria-expanded":"true","aria-hidden":"false"}).prev().attr({"aria-selected":"true",tabIndex:0})},_animate:function(e,t,n){var s,o,u,a=this,f=0,l=e.length&&(!t.length||e.index()",options:{appendTo:"body",autoFocus:!1,delay:300,minLength:1,position:{my:"left top",at:"left bottom",collision:"none"},source:null,change:null,close:null,focus:null,open:null,response:null,search:null,select:null},pending:0,_create:function(){var t,n,r;this.isMultiLine=this._isMultiLine(),this.valueMethod=this.element[this.element.is("input,textarea")?"val":"text"],this.isNewMenu=!0,this.element.addClass("ui-autocomplete-input").attr("autocomplete","off"),this._on(this.element,{keydown:function(i){if(this.element.prop("readOnly")){t=!0,r=!0,n=!0;return}t=!1,r=!1,n=!1;var s=e.ui.keyCode;switch(i.keyCode){case s.PAGE_UP:t=!0,this._move("previousPage",i);break;case s.PAGE_DOWN:t=!0,this._move("nextPage",i);break;case s.UP:t=!0,this._keyEvent("previous",i);break;case s.DOWN:t=!0,this._keyEvent("next",i);break;case s.ENTER:case s.NUMPAD_ENTER:this.menu.active&&(t=!0,i.preventDefault(),this.menu.select(i));break;case s.TAB:this.menu.active&&this.menu.select(i);break;case s.ESCAPE:this.menu.element.is(":visible")&&(this._value(this.term),this.close(i),i.preventDefault());break;default:n=!0,this._searchTimeout(i)}},keypress:function(r){if(t){t=!1,r.preventDefault();return}if(n)return;var i=e.ui.keyCode;switch(r.keyCode){case i.PAGE_UP:this._move("previousPage",r);break;case i.PAGE_DOWN:this._move("nextPage",r);break;case i.UP:this._keyEvent("previous",r);break;case i.DOWN:this._keyEvent("next",r)}},input:function(e){if(r){r=!1,e.preventDefault();return}this._searchTimeout(e)},focus:function(){this.selectedItem=null,this.previous=this._value()},blur:function(e){if(this.cancelBlur){delete this.cancelBlur;return}clearTimeout(this.searching),this.close(e),this._change(e)}}),this._initSource(),this.menu=e("
    "+(o[0]>0&&I==o[1]-1?'
    ':""):""),F+=U}B+=F}return B+=x+($.ui.ie6&&!e.inline?'':""),e._keyEvent=!1,B},_generateMonthYearHeader:function(e,t,n,r,i,s,o,u){var a=this._get(e,"changeMonth"),f=this._get(e,"changeYear"),l=this._get(e,"showMonthAfterYear"),c='
    ',h="";if(s||!a)h+=''+o[t]+"";else{var p=r&&r.getFullYear()==n,d=i&&i.getFullYear()==n;h+='"}l||(c+=h+(s||!a||!f?" ":""));if(!e.yearshtml){e.yearshtml="";if(s||!f)c+=''+n+"";else{var m=this._get(e,"yearRange").split(":"),g=(new Date).getFullYear(),y=function(e){var t=e.match(/c[+-].*/)?n+parseInt(e.substring(1),10):e.match(/[+-].*/)?g+parseInt(e,10):parseInt(e,10);return isNaN(t)?g:t},b=y(m[0]),w=Math.max(b,y(m[1]||""));b=r?Math.max(b,r.getFullYear()):b,w=i?Math.min(w,i.getFullYear()):w,e.yearshtml+='",c+=e.yearshtml,e.yearshtml=null}}return c+=this._get(e,"yearSuffix"),l&&(c+=(s||!a||!f?" ":"")+h),c+="
    ",c},_adjustInstDate:function(e,t,n){var r=e.drawYear+(n=="Y"?t:0),i=e.drawMonth+(n=="M"?t:0),s=Math.min(e.selectedDay,this._getDaysInMonth(r,i))+(n=="D"?t:0),o=this._restrictMinMax(e,this._daylightSavingAdjust(new Date(r,i,s)));e.selectedDay=o.getDate(),e.drawMonth=e.selectedMonth=o.getMonth(),e.drawYear=e.selectedYear=o.getFullYear(),(n=="M"||n=="Y")&&this._notifyChange(e)},_restrictMinMax:function(e,t){var n=this._getMinMaxDate(e,"min"),r=this._getMinMaxDate(e,"max"),i=n&&tr?r:i,i},_notifyChange:function(e){var t=this._get(e,"onChangeMonthYear");t&&t.apply(e.input?e.input[0]:null,[e.selectedYear,e.selectedMonth+1,e])},_getNumberOfMonths:function(e){var t=this._get(e,"numberOfMonths");return t==null?[1,1]:typeof t=="number"?[1,t]:t},_getMinMaxDate:function(e,t){return this._determineDate(e,this._get(e,t+"Date"),null)},_getDaysInMonth:function(e,t){return 32-this._daylightSavingAdjust(new Date(e,t,32)).getDate()},_getFirstDayOfMonth:function(e,t){return(new Date(e,t,1)).getDay()},_canAdjustMonth:function(e,t,n,r){var i=this._getNumberOfMonths(e),s=this._daylightSavingAdjust(new Date(n,r+(t<0?t:i[0]*i[1]),1));return t<0&&s.setDate(this._getDaysInMonth(s.getFullYear(),s.getMonth())),this._isInRange(e,s)},_isInRange:function(e,t){var n=this._getMinMaxDate(e,"min"),r=this._getMinMaxDate(e,"max");return(!n||t.getTime()>=n.getTime())&&(!r||t.getTime()<=r.getTime())},_getFormatConfig:function(e){var t=this._get(e,"shortYearCutoff");return t=typeof t!="string"?t:(new Date).getFullYear()%100+parseInt(t,10),{shortYearCutoff:t,dayNamesShort:this._get(e,"dayNamesShort"),dayNames:this._get(e,"dayNames"),monthNamesShort:this._get(e,"monthNamesShort"),monthNames:this._get(e,"monthNames")}},_formatDate:function(e,t,n,r){t||(e.currentDay=e.selectedDay,e.currentMonth=e.selectedMonth,e.currentYear=e.selectedYear);var i=t?typeof t=="object"?t:this._daylightSavingAdjust(new Date(r,n,t)):this._daylightSavingAdjust(new Date(e.currentYear,e.currentMonth,e.currentDay));return this.formatDate(this._get(e,"dateFormat"),i,this._getFormatConfig(e))}}),$.fn.datepicker=function(e){if(!this.length)return this;$.datepicker.initialized||($(document).mousedown($.datepicker._checkExternalClick).find(document.body).append($.datepicker.dpDiv),$.datepicker.initialized=!0);var t=Array.prototype.slice.call(arguments,1);return typeof e!="string"||e!="isDisabled"&&e!="getDate"&&e!="widget"?e=="option"&&arguments.length==2&&typeof arguments[1]=="string"?$.datepicker["_"+e+"Datepicker"].apply($.datepicker,[this[0]].concat(t)):this.each(function(){typeof e=="string"?$.datepicker["_"+e+"Datepicker"].apply($.datepicker,[this].concat(t)):$.datepicker._attachDatepicker(this,e)}):$.datepicker["_"+e+"Datepicker"].apply($.datepicker,[this[0]].concat(t))},$.datepicker=new Datepicker,$.datepicker.initialized=!1,$.datepicker.uuid=(new Date).getTime(),$.datepicker.version="1.9.1",window["DP_jQuery_"+dpuuid]=$})(jQuery);(function(e,t){var n="ui-dialog ui-widget ui-widget-content ui-corner-all ",r={buttons:!0,height:!0,maxHeight:!0,maxWidth:!0,minHeight:!0,minWidth:!0,width:!0},i={maxHeight:!0,maxWidth:!0,minHeight:!0,minWidth:!0};e.widget("ui.dialog",{version:"1.9.1",options:{autoOpen:!0,buttons:{},closeOnEscape:!0,closeText:"close",dialogClass:"",draggable:!0,hide:null,height:"auto",maxHeight:!1,maxWidth:!1,minHeight:150,minWidth:150,modal:!1,position:{my:"center",at:"center",of:window,collision:"fit",using:function(t){var n=e(this).css(t).offset().top;n<0&&e(this).css("top",t.top-n)}},resizable:!0,show:null,stack:!0,title:"",width:300,zIndex:1e3},_create:function(){this.originalTitle=this.element.attr("title"),typeof this.originalTitle!="string"&&(this.originalTitle=""),this.oldPosition={parent:this.element.parent(),index:this.element.parent().children().index(this.element)},this.options.title=this.options.title||this.originalTitle;var t=this,r=this.options,i=r.title||" ",s,o,u,a,f;s=(this.uiDialog=e("
    ")).addClass(n+r.dialogClass).css({display:"none",outline:0,zIndex:r.zIndex}).attr("tabIndex",-1).keydown(function(n){r.closeOnEscape&&!n.isDefaultPrevented()&&n.keyCode&&n.keyCode===e.ui.keyCode.ESCAPE&&(t.close(n),n.preventDefault())}).mousedown(function(e){t.moveToTop(!1,e)}).appendTo("body"),this.element.show().removeAttr("title").addClass("ui-dialog-content ui-widget-content").appendTo(s),o=(this.uiDialogTitlebar=e("
    ")).addClass("ui-dialog-titlebar ui-widget-header ui-corner-all ui-helper-clearfix").bind("mousedown",function(){s.focus()}).prependTo(s),u=e("").addClass("ui-dialog-titlebar-close ui-corner-all").attr("role","button").click(function(e){e.preventDefault(),t.close(e)}).appendTo(o),(this.uiDialogTitlebarCloseText=e("")).addClass("ui-icon ui-icon-closethick").text(r.closeText).appendTo(u),a=e("").uniqueId().addClass("ui-dialog-title").html(i).prependTo(o),f=(this.uiDialogButtonPane=e("
    ")).addClass("ui-dialog-buttonpane ui-widget-content ui-helper-clearfix"),(this.uiButtonSet=e("
    ")).addClass("ui-dialog-buttonset").appendTo(f),s.attr({role:"dialog","aria-labelledby":a.attr("id")}),o.find("*").add(o).disableSelection(),this._hoverable(u),this._focusable(u),r.draggable&&e.fn.draggable&&this._makeDraggable(),r.resizable&&e.fn.resizable&&this._makeResizable(),this._createButtons(r.buttons),this._isOpen=!1,e.fn.bgiframe&&s.bgiframe(),this._on(s,{keydown:function(t){if(!r.modal||t.keyCode!==e.ui.keyCode.TAB)return;var n=e(":tabbable",s),i=n.filter(":first"),o=n.filter(":last");if(t.target===o[0]&&!t.shiftKey)return i.focus(1),!1;if(t.target===i[0]&&t.shiftKey)return o.focus(1),!1}})},_init:function(){this.options.autoOpen&&this.open()},_destroy:function(){var e,t=this.oldPosition;this.overlay&&this.overlay.destroy(),this.uiDialog.hide(),this.element.removeClass("ui-dialog-content ui-widget-content").hide().appendTo("body"),this.uiDialog.remove(),this.originalTitle&&this.element.attr("title",this.originalTitle),e=t.parent.children().eq(t.index),e.length&&e[0]!==this.element[0]?e.before(this.element):t.parent.append(this.element)},widget:function(){return this.uiDialog},close:function(t){var n=this,r,i;if(!this._isOpen)return;if(!1===this._trigger("beforeClose",t))return;return this._isOpen=!1,this.overlay&&this.overlay.destroy(),this.options.hide?this._hide(this.uiDialog,this.options.hide,function(){n._trigger("close",t)}):(this.uiDialog.hide(),this._trigger("close",t)),e.ui.dialog.overlay.resize(),this.options.modal&&(r=0,e(".ui-dialog").each(function(){this!==n.uiDialog[0]&&(i=e(this).css("z-index"),isNaN(i)||(r=Math.max(r,i)))}),e.ui.dialog.maxZ=r),this},isOpen:function(){return this._isOpen},moveToTop:function(t,n){var r=this.options,i;return r.modal&&!t||!r.stack&&!r.modal?this._trigger("focus",n):(r.zIndex>e.ui.dialog.maxZ&&(e.ui.dialog.maxZ=r.zIndex),this.overlay&&(e.ui.dialog.maxZ+=1,e.ui.dialog.overlay.maxZ=e.ui.dialog.maxZ,this.overlay.$el.css("z-index",e.ui.dialog.overlay.maxZ)),i={scrollTop:this.element.scrollTop(),scrollLeft:this.element.scrollLeft()},e.ui.dialog.maxZ+=1,this.uiDialog.css("z-index",e.ui.dialog.maxZ),this.element.attr(i),this._trigger("focus",n),this)},open:function(){if(this._isOpen)return;var t,n=this.options,r=this.uiDialog;return this._size(),this._position(n.position),r.show(n.show),this.overlay=n.modal?new e.ui.dialog.overlay(this):null,this.moveToTop(!0),t=this.element.find(":tabbable"),t.length||(t=this.uiDialogButtonPane.find(":tabbable"),t.length||(t=r)),t.eq(0).focus(),this._isOpen=!0,this._trigger("open"),this},_createButtons:function(t){var n=this,r=!1;this.uiDialogButtonPane.remove(),this.uiButtonSet.empty(),typeof t=="object"&&t!==null&&e.each(t,function(){return!(r=!0)}),r?(e.each(t,function(t,r){r=e.isFunction(r)?{click:r,text:t}:r;var i=e("").attr(r,!0).unbind("click").click(function(){r.click.apply(n.element[0],arguments)}).appendTo(n.uiButtonSet);e.fn.button&&i.button()}),this.uiDialog.addClass("ui-dialog-buttons"),this.uiDialogButtonPane.appendTo(this.uiDialog)):this.uiDialog.removeClass("ui-dialog-buttons")},_makeDraggable:function(){function r(e){return{position:e.position,offset:e.offset}}var t=this,n=this.options;this.uiDialog.draggable({cancel:".ui-dialog-content, .ui-dialog-titlebar-close",handle:".ui-dialog-titlebar",containment:"document",start:function(n,i){e(this).addClass("ui-dialog-dragging"),t._trigger("dragStart",n,r(i))},drag:function(e,n){t._trigger("drag",e,r(n))},stop:function(i,s){n.position=[s.position.left-t.document.scrollLeft(),s.position.top-t.document.scrollTop()],e(this).removeClass("ui-dialog-dragging"),t._trigger("dragStop",i,r(s)),e.ui.dialog.overlay.resize()}})},_makeResizable:function(n){function u(e){return{originalPosition:e.originalPosition,originalSize:e.originalSize,position:e.position,size:e.size}}n=n===t?this.options.resizable:n;var r=this,i=this.options,s=this.uiDialog.css("position"),o=typeof n=="string"?n:"n,e,s,w,se,sw,ne,nw";this.uiDialog.resizable({cancel:".ui-dialog-content",containment:"document",alsoResize:this.element,maxWidth:i.maxWidth,maxHeight:i.maxHeight,minWidth:i.minWidth,minHeight:this._minHeight(),handles:o,start:function(t,n){e(this).addClass("ui-dialog-resizing"),r._trigger("resizeStart",t,u(n))},resize:function(e,t){r._trigger("resize",e,u(t))},stop:function(t,n){e(this).removeClass("ui-dialog-resizing"),i.height=e(this).height(),i.width=e(this).width(),r._trigger("resizeStop",t,u(n)),e.ui.dialog.overlay.resize()}}).css("position",s).find(".ui-resizable-se").addClass("ui-icon ui-icon-grip-diagonal-se")},_minHeight:function(){var e=this.options;return e.height==="auto"?e.minHeight:Math.min(e.minHeight,e.height)},_position:function(t){var n=[],r=[0,0],i;if(t){if(typeof t=="string"||typeof t=="object"&&"0"in t)n=t.split?t.split(" "):[t[0],t[1]],n.length===1&&(n[1]=n[0]),e.each(["left","top"],function(e,t){+n[e]===n[e]&&(r[e]=n[e],n[e]=t)}),t={my:n[0]+(r[0]<0?r[0]:"+"+r[0])+" "+n[1]+(r[1]<0?r[1]:"+"+r[1]),at:n.join(" ")};t=e.extend({},e.ui.dialog.prototype.options.position,t)}else t=e.ui.dialog.prototype.options.position;i=this.uiDialog.is(":visible"),i||this.uiDialog.show(),this.uiDialog.position(t),i||this.uiDialog.hide()},_setOptions:function(t){var n=this,s={},o=!1;e.each(t,function(e,t){n._setOption(e,t),e in r&&(o=!0),e in i&&(s[e]=t)}),o&&this._size(),this.uiDialog.is(":data(resizable)")&&this.uiDialog.resizable("option",s)},_setOption:function(t,r){var i,s,o=this.uiDialog;switch(t){case"buttons":this._createButtons(r);break;case"closeText":this.uiDialogTitlebarCloseText.text(""+r);break;case"dialogClass":o.removeClass(this.options.dialogClass).addClass(n+r);break;case"disabled":r?o.addClass("ui-dialog-disabled"):o.removeClass("ui-dialog-disabled");break;case"draggable":i=o.is(":data(draggable)"),i&&!r&&o.draggable("destroy"),!i&&r&&this._makeDraggable();break;case"position":this._position(r);break;case"resizable":s=o.is(":data(resizable)"),s&&!r&&o.resizable("destroy"),s&&typeof r=="string"&&o.resizable("option","handles",r),!s&&r!==!1&&this._makeResizable(r);break;case"title":e(".ui-dialog-title",this.uiDialogTitlebar).html(""+(r||" "))}this._super(t,r)},_size:function(){var t,n,r,i=this.options,s=this.uiDialog.is(":visible");this.element.show().css({width:"auto",minHeight:0,height:0}),i.minWidth>i.width&&(i.width=i.minWidth),t=this.uiDialog.css({height:"auto",width:i.width}).outerHeight(),n=Math.max(0,i.minHeight-t),i.height==="auto"?e.support.minHeight?this.element.css({minHeight:n,height:"auto"}):(this.uiDialog.show(),r=this.element.css("height","auto").height(),s||this.uiDialog.hide(),this.element.height(Math.max(r,n))):this.element.height(Math.max(i.height-t,0)),this.uiDialog.is(":data(resizable)")&&this.uiDialog.resizable("option","minHeight",this._minHeight())}}),e.extend(e.ui.dialog,{uuid:0,maxZ:0,getTitleId:function(e){var t=e.attr("id");return t||(this.uuid+=1,t=this.uuid),"ui-dialog-title-"+t},overlay:function(t){this.$el=e.ui.dialog.overlay.create(t)}}),e.extend(e.ui.dialog.overlay,{instances:[],oldInstances:[],maxZ:0,events:e.map("focus,mousedown,mouseup,keydown,keypress,click".split(","),function(e){return e+".dialog-overlay"}).join(" "),create:function(t){this.instances.length===0&&(setTimeout(function(){e.ui.dialog.overlay.instances.length&&e(document).bind(e.ui.dialog.overlay.events,function(t){if(e(t.target).zIndex()").addClass("ui-widget-overlay");return e(document).bind("keydown.dialog-overlay",function(r){var i=e.ui.dialog.overlay.instances;i.length!==0&&i[i.length-1]===n&&t.options.closeOnEscape&&!r.isDefaultPrevented()&&r.keyCode&&r.keyCode===e.ui.keyCode.ESCAPE&&(t.close(r),r.preventDefault())}),n.appendTo(document.body).css({width:this.width(),height:this.height()}),e.fn.bgiframe&&n.bgiframe(),this.instances.push(n),n},destroy:function(t){var n=e.inArray(t,this.instances),r=0;n!==-1&&this.oldInstances.push(this.instances.splice(n,1)[0]),this.instances.length===0&&e([document,window]).unbind(".dialog-overlay"),t.height(0).width(0).remove(),e.each(this.instances,function(){r=Math.max(r,this.css("z-index"))}),this.maxZ=r},height:function(){var t,n;return e.ui.ie?(t=Math.max(document.documentElement.scrollHeight,document.body.scrollHeight),n=Math.max(document.documentElement.offsetHeight,document.body.offsetHeight),t
    ').css({width:this.offsetWidth+"px",height:this.offsetHeight+"px",position:"absolute",opacity:"0.001",zIndex:1e3}).css(e(this).offset()).appendTo("body")}),!0):!1)},_mouseStart:function(t){var n=this.options;return this.helper=this._createHelper(t),this.helper.addClass("ui-draggable-dragging"),this._cacheHelperProportions(),e.ui.ddmanager&&(e.ui.ddmanager.current=this),this._cacheMargins(),this.cssPosition=this.helper.css("position"),this.scrollParent=this.helper.scrollParent(),this.offset=this.positionAbs=this.element.offset(),this.offset={top:this.offset.top-this.margins.top,left:this.offset.left-this.margins.left},e.extend(this.offset,{click:{left:t.pageX-this.offset.left,top:t.pageY-this.offset.top},parent:this._getParentOffset(),relative:this._getRelativeOffset()}),this.originalPosition=this.position=this._generatePosition(t),this.originalPageX=t.pageX,this.originalPageY=t.pageY,n.cursorAt&&this._adjustOffsetFromHelper(n.cursorAt),n.containment&&this._setContainment(),this._trigger("start",t)===!1?(this._clear(),!1):(this._cacheHelperProportions(),e.ui.ddmanager&&!n.dropBehaviour&&e.ui.ddmanager.prepareOffsets(this,t),this._mouseDrag(t,!0),e.ui.ddmanager&&e.ui.ddmanager.dragStart(this,t),!0)},_mouseDrag:function(t,n){this.position=this._generatePosition(t),this.positionAbs=this._convertPositionTo("absolute");if(!n){var r=this._uiHash();if(this._trigger("drag",t,r)===!1)return this._mouseUp({}),!1;this.position=r.position}if(!this.options.axis||this.options.axis!="y")this.helper[0].style.left=this.position.left+"px";if(!this.options.axis||this.options.axis!="x")this.helper[0].style.top=this.position.top+"px";return e.ui.ddmanager&&e.ui.ddmanager.drag(this,t),!1},_mouseStop:function(t){var n=!1;e.ui.ddmanager&&!this.options.dropBehaviour&&(n=e.ui.ddmanager.drop(this,t)),this.dropped&&(n=this.dropped,this.dropped=!1);var r=this.element[0],i=!1;while(r&&(r=r.parentNode))r==document&&(i=!0);if(!i&&this.options.helper==="original")return!1;if(this.options.revert=="invalid"&&!n||this.options.revert=="valid"&&n||this.options.revert===!0||e.isFunction(this.options.revert)&&this.options.revert.call(this.element,n)){var s=this;e(this.helper).animate(this.originalPosition,parseInt(this.options.revertDuration,10),function(){s._trigger("stop",t)!==!1&&s._clear()})}else this._trigger("stop",t)!==!1&&this._clear();return!1},_mouseUp:function(t){return e("div.ui-draggable-iframeFix").each(function(){this.parentNode.removeChild(this)}),e.ui.ddmanager&&e.ui.ddmanager.dragStop(this,t),e.ui.mouse.prototype._mouseUp.call(this,t)},cancel:function(){return this.helper.is(".ui-draggable-dragging")?this._mouseUp({}):this._clear(),this},_getHandle:function(t){var n=!this.options.handle||!e(this.options.handle,this.element).length?!0:!1;return e(this.options.handle,this.element).find("*").andSelf().each(function(){this==t.target&&(n=!0)}),n},_createHelper:function(t){var n=this.options,r=e.isFunction(n.helper)?e(n.helper.apply(this.element[0],[t])):n.helper=="clone"?this.element.clone().removeAttr("id"):this.element;return r.parents("body").length||r.appendTo(n.appendTo=="parent"?this.element[0].parentNode:n.appendTo),r[0]!=this.element[0]&&!/(fixed|absolute)/.test(r.css("position"))&&r.css("position","absolute"),r},_adjustOffsetFromHelper:function(t){typeof t=="string"&&(t=t.split(" ")),e.isArray(t)&&(t={left:+t[0],top:+t[1]||0}),"left"in t&&(this.offset.click.left=t.left+this.margins.left),"right"in t&&(this.offset.click.left=this.helperProportions.width-t.right+this.margins.left),"top"in t&&(this.offset.click.top=t.top+this.margins.top),"bottom"in t&&(this.offset.click.top=this.helperProportions.height-t.bottom+this.margins.top)},_getParentOffset:function(){this.offsetParent=this.helper.offsetParent();var t=this.offsetParent.offset();this.cssPosition=="absolute"&&this.scrollParent[0]!=document&&e.contains(this.scrollParent[0],this.offsetParent[0])&&(t.left+=this.scrollParent.scrollLeft(),t.top+=this.scrollParent.scrollTop());if(this.offsetParent[0]==document.body||this.offsetParent[0].tagName&&this.offsetParent[0].tagName.toLowerCase()=="html"&&e.ui.ie)t={top:0,left:0};return{top:t.top+(parseInt(this.offsetParent.css("borderTopWidth"),10)||0),left:t.left+(parseInt(this.offsetParent.css("borderLeftWidth"),10)||0)}},_getRelativeOffset:function(){if(this.cssPosition=="relative"){var e=this.element.position();return{top:e.top-(parseInt(this.helper.css("top"),10)||0)+this.scrollParent.scrollTop(),left:e.left-(parseInt(this.helper.css("left"),10)||0)+this.scrollParent.scrollLeft()}}return{top:0,left:0}},_cacheMargins:function(){this.margins={left:parseInt(this.element.css("marginLeft"),10)||0,top:parseInt(this.element.css("marginTop"),10)||0,right:parseInt(this.element.css("marginRight"),10)||0,bottom:parseInt(this.element.css("marginBottom"),10)||0}},_cacheHelperProportions:function(){this.helperProportions={width:this.helper.outerWidth(),height:this.helper.outerHeight()}},_setContainment:function(){var t=this.options;t.containment=="parent"&&(t.containment=this.helper[0].parentNode);if(t.containment=="document"||t.containment=="window")this.containment=[t.containment=="document"?0:e(window).scrollLeft()-this.offset.relative.left-this.offset.parent.left,t.containment=="document"?0:e(window).scrollTop()-this.offset.relative.top-this.offset.parent.top,(t.containment=="document"?0:e(window).scrollLeft())+e(t.containment=="document"?document:window).width()-this.helperProportions.width-this.margins.left,(t.containment=="document"?0:e(window).scrollTop())+(e(t.containment=="document"?document:window).height()||document.body.parentNode.scrollHeight)-this.helperProportions.height-this.margins.top];if(!/^(document|window|parent)$/.test(t.containment)&&t.containment.constructor!=Array){var n=e(t.containment),r=n[0];if(!r)return;var i=n.offset(),s=e(r).css("overflow")!="hidden";this.containment=[(parseInt(e(r).css("borderLeftWidth"),10)||0)+(parseInt(e(r).css("paddingLeft"),10)||0),(parseInt(e(r).css("borderTopWidth"),10)||0)+(parseInt(e(r).css("paddingTop"),10)||0),(s?Math.max(r.scrollWidth,r.offsetWidth):r.offsetWidth)-(parseInt(e(r).css("borderLeftWidth"),10)||0)-(parseInt(e(r).css("paddingRight"),10)||0)-this.helperProportions.width-this.margins.left-this.margins.right,(s?Math.max(r.scrollHeight,r.offsetHeight):r.offsetHeight)-(parseInt(e(r).css("borderTopWidth"),10)||0)-(parseInt(e(r).css("paddingBottom"),10)||0)-this.helperProportions.height-this.margins.top-this.margins.bottom],this.relative_container=n}else t.containment.constructor==Array&&(this.containment=t.containment)},_convertPositionTo:function(t,n){n||(n=this.position);var r=t=="absolute"?1:-1,i=this.options,s=this.cssPosition!="absolute"||this.scrollParent[0]!=document&&!!e.contains(this.scrollParent[0],this.offsetParent[0])?this.scrollParent:this.offsetParent,o=/(html|body)/i.test(s[0].tagName);return{top:n.top+this.offset.relative.top*r+this.offset.parent.top*r-(this.cssPosition=="fixed"?-this.scrollParent.scrollTop():o?0:s.scrollTop())*r,left:n.left+this.offset.relative.left*r+this.offset.parent.left*r-(this.cssPosition=="fixed"?-this.scrollParent.scrollLeft():o?0:s.scrollLeft())*r}},_generatePosition:function(t){var n=this.options,r=this.cssPosition!="absolute"||this.scrollParent[0]!=document&&!!e.contains(this.scrollParent[0],this.offsetParent[0])?this.scrollParent:this.offsetParent,i=/(html|body)/i.test(r[0].tagName),s=t.pageX,o=t.pageY;if(this.originalPosition){var u;if(this.containment){if(this.relative_container){var a=this.relative_container.offset();u=[this.containment[0]+a.left,this.containment[1]+a.top,this.containment[2]+a.left,this.containment[3]+a.top]}else u=this.containment;t.pageX-this.offset.click.leftu[2]&&(s=u[2]+this.offset.click.left),t.pageY-this.offset.click.top>u[3]&&(o=u[3]+this.offset.click.top)}if(n.grid){var f=n.grid[1]?this.originalPageY+Math.round((o-this.originalPageY)/n.grid[1])*n.grid[1]:this.originalPageY;o=u?f-this.offset.click.topu[3]?f-this.offset.click.topu[2]?l-this.offset.click.left=0;l--){var c=r.snapElements[l].left,h=c+r.snapElements[l].width,p=r.snapElements[l].top,d=p+r.snapElements[l].height;if(!(c-s=l&&o<=c||u>=l&&u<=c||oc)&&(i>=a&&i<=f||s>=a&&s<=f||if);default:return!1}},e.ui.ddmanager={current:null,droppables:{"default":[]},prepareOffsets:function(t,n){var r=e.ui.ddmanager.droppables[t.options.scope]||[],i=n?n.type:null,s=(t.currentItem||t.element).find(":data(droppable)").andSelf();e:for(var o=0;oe?0:r.max")[0],c,h=t.each;l.style.cssText="background-color:rgba(1,1,1,.5)",f.rgba=l.style.backgroundColor.indexOf("rgba")>-1,h(u,function(e,t){t.cache="_"+e,t.props.alpha={idx:3,type:"percent",def:1}}),o.fn=t.extend(o.prototype,{parse:function(r,i,s,a){if(r===n)return this._rgba=[null,null,null,null],this;if(r.jquery||r.nodeType)r=t(r).css(i),i=n;var f=this,l=t.type(r),v=this._rgba=[];i!==n&&(r=[r,i,s,a],l="array");if(l==="string")return this.parse(d(r)||c._default);if(l==="array")return h(u.rgba.props,function(e,t){v[t.idx]=p(r[t.idx],t)}),this;if(l==="object")return r instanceof o?h(u,function(e,t){r[t.cache]&&(f[t.cache]=r[t.cache].slice())}):h(u,function(t,n){var i=n.cache;h(n.props,function(e,t){if(!f[i]&&n.to){if(e==="alpha"||r[e]==null)return;f[i]=n.to(f._rgba)}f[i][t.idx]=p(r[e],t,!0)}),f[i]&&e.inArray(null,f[i].slice(0,3))<0&&(f[i][3]=1,n.from&&(f._rgba=n.from(f[i])))}),this},is:function(e){var t=o(e),n=!0,r=this;return h(u,function(e,i){var s,o=t[i.cache];return o&&(s=r[i.cache]||i.to&&i.to(r._rgba)||[],h(i.props,function(e,t){if(o[t.idx]!=null)return n=o[t.idx]===s[t.idx],n})),n}),n},_space:function(){var e=[],t=this;return h(u,function(n,r){t[r.cache]&&e.push(n)}),e.pop()},transition:function(e,t){var n=o(e),r=n._space(),i=u[r],s=this.alpha()===0?o("transparent"):this,f=s[i.cache]||i.to(s._rgba),l=f.slice();return n=n[i.cache],h(i.props,function(e,r){var i=r.idx,s=f[i],o=n[i],u=a[r.type]||{};if(o===null)return;s===null?l[i]=o:(u.mod&&(o-s>u.mod/2?s+=u.mod:s-o>u.mod/2&&(s-=u.mod)),l[i]=p((o-s)*t+s,r))}),this[r](l)},blend:function(e){if(this._rgba[3]===1)return this;var n=this._rgba.slice(),r=n.pop(),i=o(e)._rgba;return o(t.map(n,function(e,t){return(1-r)*i[t]+r*e}))},toRgbaString:function(){var e="rgba(",n=t.map(this._rgba,function(e,t){return e==null?t>2?1:0:e});return n[3]===1&&(n.pop(),e="rgb("),e+n.join()+")"},toHslaString:function(){var e="hsla(",n=t.map(this.hsla(),function(e,t){return e==null&&(e=t>2?1:0),t&&t<3&&(e=Math.round(e*100)+"%"),e});return n[3]===1&&(n.pop(),e="hsl("),e+n.join()+")"},toHexString:function(e){var n=this._rgba.slice(),r=n.pop();return e&&n.push(~~(r*255)),"#"+t.map(n,function(e){return e=(e||0).toString(16),e.length===1?"0"+e:e}).join("")},toString:function(){return this._rgba[3]===0?"transparent":this.toRgbaString()}}),o.fn.parse.prototype=o.fn,u.hsla.to=function(e){if(e[0]==null||e[1]==null||e[2]==null)return[null,null,null,e[3]];var t=e[0]/255,n=e[1]/255,r=e[2]/255,i=e[3],s=Math.max(t,n,r),o=Math.min(t,n,r),u=s-o,a=s+o,f=a*.5,l,c;return o===s?l=0:t===s?l=60*(n-r)/u+360:n===s?l=60*(r-t)/u+120:l=60*(t-n)/u+240,f===0||f===1?c=f:f<=.5?c=u/a:c=u/(2-a),[Math.round(l)%360,c,f,i==null?1:i]},u.hsla.from=function(e){if(e[0]==null||e[1]==null||e[2]==null)return[null,null,null,e[3]];var t=e[0]/360,n=e[1],r=e[2],i=e[3],s=r<=.5?r*(1+n):r+n-r*n,o=2*r-s;return[Math.round(v(o,s,t+1/3)*255),Math.round(v(o,s,t)*255),Math.round(v(o,s,t-1/3)*255),i]},h(u,function(e,r){var s=r.props,u=r.cache,a=r.to,f=r.from;o.fn[e]=function(e){a&&!this[u]&&(this[u]=a(this._rgba));if(e===n)return this[u].slice();var r,i=t.type(e),l=i==="array"||i==="object"?e:arguments,c=this[u].slice();return h(s,function(e,t){var n=l[i==="object"?e:t.idx];n==null&&(n=c[t.idx]),c[t.idx]=p(n,t)}),f?(r=o(f(c)),r[u]=c,r):o(c)},h(s,function(n,r){if(o.fn[n])return;o.fn[n]=function(s){var o=t.type(s),u=n==="alpha"?this._hsla?"hsla":"rgba":e,a=this[u](),f=a[r.idx],l;return o==="undefined"?f:(o==="function"&&(s=s.call(this,f),o=t.type(s)),s==null&&r.empty?this:(o==="string"&&(l=i.exec(s),l&&(s=f+parseFloat(l[2])*(l[1]==="+"?1:-1))),a[r.idx]=s,this[u](a)))}})}),h(r,function(e,n){t.cssHooks[n]={set:function(e,r){var i,s,u="";if(t.type(r)!=="string"||(i=d(r))){r=o(i||r);if(!f.rgba&&r._rgba[3]!==1){s=n==="backgroundColor"?e.parentNode:e;while((u===""||u==="transparent")&&s&&s.style)try{u=t.css(s,"backgroundColor"),s=s.parentNode}catch(a){}r=r.blend(u&&u!=="transparent"?u:"_default")}r=r.toRgbaString()}try{e.style[n]=r}catch(l){}}},t.fx.step[n]=function(e){e.colorInit||(e.start=o(e.elem,n),e.end=o(e.end),e.colorInit=!0),t.cssHooks[n].set(e.elem,e.start.transition(e.end,e.pos))}}),t.cssHooks.borderColor={expand:function(e){var t={};return h(["Top","Right","Bottom","Left"],function(n,r){t["border"+r+"Color"]=e}),t}},c=t.Color.names={aqua:"#00ffff",black:"#000000",blue:"#0000ff",fuchsia:"#ff00ff",gray:"#808080",green:"#008000",lime:"#00ff00",maroon:"#800000",navy:"#000080",olive:"#808000",purple:"#800080",red:"#ff0000",silver:"#c0c0c0",teal:"#008080",white:"#ffffff",yellow:"#ffff00",transparent:[null,null,null,0],_default:"#ffffff"}}(jQuery),function(){function i(){var t=this.ownerDocument.defaultView?this.ownerDocument.defaultView.getComputedStyle(this,null):this.currentStyle,n={},r,i;if(t&&t.length&&t[0]&&t[t[0]]){i=t.length;while(i--)r=t[i],typeof t[r]=="string"&&(n[e.camelCase(r)]=t[r])}else for(r in t)typeof t[r]=="string"&&(n[r]=t[r]);return n}function s(t,n){var i={},s,o;for(s in n)o=n[s],t[s]!==o&&!r[s]&&(e.fx.step[s]||!isNaN(parseFloat(o)))&&(i[s]=o);return i}var n=["add","remove","toggle"],r={border:1,borderBottom:1,borderColor:1,borderLeft:1,borderRight:1,borderTop:1,borderWidth:1,margin:1,padding:1};e.each(["borderLeftStyle","borderRightStyle","borderBottomStyle","borderTopStyle"],function(t,n){e.fx.step[n]=function(e){if(e.end!=="none"&&!e.setAttr||e.pos===1&&!e.setAttr)jQuery.style(e.elem,n,e.end),e.setAttr=!0}}),e.effects.animateClass=function(t,r,o,u){var a=e.speed(r,o,u);return this.queue(function(){var r=e(this),o=r.attr("class")||"",u,f=a.children?r.find("*").andSelf():r;f=f.map(function(){var t=e(this);return{el:t,start:i.call(this)}}),u=function(){e.each(n,function(e,n){t[n]&&r[n+"Class"](t[n])})},u(),f=f.map(function(){return this.end=i.call(this.el[0]),this.diff=s(this.start,this.end),this}),r.attr("class",o),f=f.map(function(){var t=this,n=e.Deferred(),r=jQuery.extend({},a,{queue:!1,complete:function(){n.resolve(t)}});return this.el.animate(this.diff,r),n.promise()}),e.when.apply(e,f.get()).done(function(){u(),e.each(arguments,function(){var t=this.el;e.each(this.diff,function(e){t.css(e,"")})}),a.complete.call(r[0])})})},e.fn.extend({_addClass:e.fn.addClass,addClass:function(t,n,r,i){return n?e.effects.animateClass.call(this,{add:t},n,r,i):this._addClass(t)},_removeClass:e.fn.removeClass,removeClass:function(t,n,r,i){return n?e.effects.animateClass.call(this,{remove:t},n,r,i):this._removeClass(t)},_toggleClass:e.fn.toggleClass,toggleClass:function(n,r,i,s,o){return typeof r=="boolean"||r===t?i?e.effects.animateClass.call(this,r?{add:n}:{remove:n},i,s,o):this._toggleClass(n,r):e.effects.animateClass.call(this,{toggle:n},r,i,s)},switchClass:function(t,n,r,i,s){return e.effects.animateClass.call(this,{add:n,remove:t},r,i,s)}})}(),function(){function i(t,n,r,i){e.isPlainObject(t)&&(n=t,t=t.effect),t={effect:t},n==null&&(n={}),e.isFunction(n)&&(i=n,r=null,n={});if(typeof n=="number"||e.fx.speeds[n])i=r,r=n,n={};return e.isFunction(r)&&(i=r,r=null),n&&e.extend(t,n),r=r||n.duration,t.duration=e.fx.off?0:typeof r=="number"?r:r in e.fx.speeds?e.fx.speeds[r]:e.fx.speeds._default,t.complete=i||n.complete,t}function s(t){return!t||typeof t=="number"||e.fx.speeds[t]?!0:typeof t=="string"&&!e.effects.effect[t]?n&&e.effects[t]?!1:!0:!1}e.extend(e.effects,{version:"1.9.1",save:function(e,t){for(var n=0;n
    ").addClass("ui-effects-wrapper").css({fontSize:"100%",background:"transparent",border:"none",margin:0,padding:0}),i={width:t.width(),height:t.height()},s=document.activeElement;try{s.id}catch(o){s=document.body}return t.wrap(r),(t[0]===s||e.contains(t[0],s))&&e(s).focus(),r=t.parent(),t.css("position")==="static"?(r.css({position:"relative"}),t.css({position:"relative"})):(e.extend(n,{position:t.css("position"),zIndex:t.css("z-index")}),e.each(["top","left","bottom","right"],function(e,r){n[r]=t.css(r),isNaN(parseInt(n[r],10))&&(n[r]="auto")}),t.css({position:"relative",top:0,left:0,right:"auto",bottom:"auto"})),t.css(i),r.css(n).show()},removeWrapper:function(t){var n=document.activeElement;return t.parent().is(".ui-effects-wrapper")&&(t.parent().replaceWith(t),(t[0]===n||e.contains(t[0],n))&&e(n).focus()),t},setTransition:function(t,n,r,i){return i=i||{},e.each(n,function(e,n){var s=t.cssUnit(n);s[0]>0&&(i[n]=s[0]*r+s[1])}),i}}),e.fn.extend({effect:function(){function a(n){function u(){e.isFunction(i)&&i.call(r[0]),e.isFunction(n)&&n()}var r=e(this),i=t.complete,s=t.mode;(r.is(":hidden")?s==="hide":s==="show")?u():o.call(r[0],t,u)}var t=i.apply(this,arguments),r=t.mode,s=t.queue,o=e.effects.effect[t.effect],u=!o&&n&&e.effects[t.effect];return e.fx.off||!o&&!u?r?this[r](t.duration,t.complete):this.each(function(){t.complete&&t.complete.call(this)}):o?s===!1?this.each(a):this.queue(s||"fx",a):u.call(this,{options:t,duration:t.duration,callback:t.complete,mode:t.mode})},_show:e.fn.show,show:function(e){if(s(e))return this._show.apply(this,arguments);var t=i.apply(this,arguments);return t.mode="show",this.effect.call(this,t)},_hide:e.fn.hide,hide:function(e){if(s(e))return this._hide.apply(this,arguments);var t=i.apply(this,arguments);return t.mode="hide",this.effect.call(this,t)},__toggle:e.fn.toggle,toggle:function(t){if(s(t)||typeof t=="boolean"||e.isFunction(t))return this.__toggle.apply(this,arguments);var n=i.apply(this,arguments);return n.mode="toggle",this.effect.call(this,n)},cssUnit:function(t){var n=this.css(t),r=[];return e.each(["em","px","%","pt"],function(e,t){n.indexOf(t)>0&&(r=[parseFloat(n),t])}),r}})}(),function(){var t={};e.each(["Quad","Cubic","Quart","Quint","Expo"],function(e,n){t[n]=function(t){return Math.pow(t,e+2)}}),e.extend(t,{Sine:function(e){return 1-Math.cos(e*Math.PI/2)},Circ:function(e){return 1-Math.sqrt(1-e*e)},Elastic:function(e){return e===0||e===1?e:-Math.pow(2,8*(e-1))*Math.sin(((e-1)*80-7.5)*Math.PI/15)},Back:function(e){return e*e*(3*e-2)},Bounce:function(e){var t,n=4;while(e<((t=Math.pow(2,--n))-1)/11);return 1/Math.pow(4,3-n)-7.5625*Math.pow((t*3-2)/22-e,2)}}),e.each(t,function(t,n){e.easing["easeIn"+t]=n,e.easing["easeOut"+t]=function(e){return 1-n(1-e)},e.easing["easeInOut"+t]=function(e){return e<.5?n(e*2)/2:1-n(e*-2+2)/2}})}()}(jQuery);(function(e,t){var n=/up|down|vertical/,r=/up|left|vertical|horizontal/;e.effects.effect.blind=function(t,i){var s=e(this),o=["position","top","bottom","left","right","height","width"],u=e.effects.setMode(s,t.mode||"hide"),a=t.direction||"up",f=n.test(a),l=f?"height":"width",c=f?"top":"left",h=r.test(a),p={},d=u==="show",v,m,g;s.parent().is(".ui-effects-wrapper")?e.effects.save(s.parent(),o):e.effects.save(s,o),s.show(),v=e.effects.createWrapper(s).css({overflow:"hidden"}),m=v[l](),g=parseFloat(v.css(c))||0,p[l]=d?m:0,h||(s.css(f?"bottom":"right",0).css(f?"top":"left","auto").css({position:"absolute"}),p[c]=d?g:m+g),d&&(v.css(l,0),h||v.css(c,g+m)),v.animate(p,{duration:t.duration,easing:t.easing,queue:!1,complete:function(){u==="hide"&&s.hide(),e.effects.restore(s,o),e.effects.removeWrapper(s),i()}})}})(jQuery);(function(e,t){e.effects.effect.bounce=function(t,n){var r=e(this),i=["position","top","bottom","left","right","height","width"],s=e.effects.setMode(r,t.mode||"effect"),o=s==="hide",u=s==="show",a=t.direction||"up",f=t.distance,l=t.times||5,c=l*2+(u||o?1:0),h=t.duration/c,p=t.easing,d=a==="up"||a==="down"?"top":"left",v=a==="up"||a==="left",m,g,y,b=r.queue(),w=b.length;(u||o)&&i.push("opacity"),e.effects.save(r,i),r.show(),e.effects.createWrapper(r),f||(f=r[d==="top"?"outerHeight":"outerWidth"]()/3),u&&(y={opacity:1},y[d]=0,r.css("opacity",0).css(d,v?-f*2:f*2).animate(y,h,p)),o&&(f/=Math.pow(2,l-1)),y={},y[d]=0;for(m=0;m1&&b.splice.apply(b,[1,0].concat(b.splice(w,c+1))),r.dequeue()}})(jQuery);(function(e,t){e.effects.effect.clip=function(t,n){var r=e(this),i=["position","top","bottom","left","right","height","width"],s=e.effects.setMode(r,t.mode||"hide"),o=s==="show",u=t.direction||"vertical",a=u==="vertical",f=a?"height":"width",l=a?"top":"left",c={},h,p,d;e.effects.save(r,i),r.show(),h=e.effects.createWrapper(r).css({overflow:"hidden"}),p=r[0].tagName==="IMG"?h:r,d=p[f](),o&&(p.css(f,0),p.css(l,d/2)),c[f]=o?d:0,c[l]=o?0:d/2,p.animate(c,{queue:!1,duration:t.duration,easing:t.easing,complete:function(){o||r.hide(),e.effects.restore(r,i),e.effects.removeWrapper(r),n()}})}})(jQuery);(function(e,t){e.effects.effect.drop=function(t,n){var r=e(this),i=["position","top","bottom","left","right","opacity","height","width"],s=e.effects.setMode(r,t.mode||"hide"),o=s==="show",u=t.direction||"left",a=u==="up"||u==="down"?"top":"left",f=u==="up"||u==="left"?"pos":"neg",l={opacity:o?1:0},c;e.effects.save(r,i),r.show(),e.effects.createWrapper(r),c=t.distance||r[a==="top"?"outerHeight":"outerWidth"](!0)/2,o&&r.css("opacity",0).css(a,f==="pos"?-c:c),l[a]=(o?f==="pos"?"+=":"-=":f==="pos"?"-=":"+=")+c,r.animate(l,{queue:!1,duration:t.duration,easing:t.easing,complete:function(){s==="hide"&&r.hide(),e.effects.restore(r,i),e.effects.removeWrapper(r),n()}})}})(jQuery);(function(e,t){e.effects.effect.explode=function(t,n){function y(){c.push(this),c.length===r*i&&b()}function b(){s.css({visibility:"visible"}),e(c).remove(),u||s.hide(),n()}var r=t.pieces?Math.round(Math.sqrt(t.pieces)):3,i=r,s=e(this),o=e.effects.setMode(s,t.mode||"hide"),u=o==="show",a=s.show().css("visibility","hidden").offset(),f=Math.ceil(s.outerWidth()/i),l=Math.ceil(s.outerHeight()/r),c=[],h,p,d,v,m,g;for(h=0;h
    ").css({position:"absolute",visibility:"visible",left:-p*f,top:-h*l}).parent().addClass("ui-effects-explode").css({position:"absolute",overflow:"hidden",width:f,height:l,left:d+(u?m*f:0),top:v+(u?g*l:0),opacity:u?0:1}).animate({left:d+(u?0:m*f),top:v+(u?0:g*l),opacity:u?1:0},t.duration||500,t.easing,y)}}})(jQuery);(function(e,t){e.effects.effect.fade=function(t,n){var r=e(this),i=e.effects.setMode(r,t.mode||"toggle");r.animate({opacity:i},{queue:!1,duration:t.duration,easing:t.easing,complete:n})}})(jQuery);(function(e,t){e.effects.effect.fold=function(t,n){var r=e(this),i=["position","top","bottom","left","right","height","width"],s=e.effects.setMode(r,t.mode||"hide"),o=s==="show",u=s==="hide",a=t.size||15,f=/([0-9]+)%/.exec(a),l=!!t.horizFirst,c=o!==l,h=c?["width","height"]:["height","width"],p=t.duration/2,d,v,m={},g={};e.effects.save(r,i),r.show(),d=e.effects.createWrapper(r).css({overflow:"hidden"}),v=c?[d.width(),d.height()]:[d.height(),d.width()],f&&(a=parseInt(f[1],10)/100*v[u?0:1]),o&&d.css(l?{height:0,width:a}:{height:a,width:0}),m[h[0]]=o?v[0]:a,g[h[1]]=o?v[1]:0,d.animate(m,p,t.easing).animate(g,p,t.easing,function(){u&&r.hide(),e.effects.restore(r,i),e.effects.removeWrapper(r),n()})}})(jQuery);(function(e,t){e.effects.effect.highlight=function(t,n){var r=e(this),i=["backgroundImage","backgroundColor","opacity"],s=e.effects.setMode(r,t.mode||"show"),o={backgroundColor:r.css("backgroundColor")};s==="hide"&&(o.opacity=0),e.effects.save(r,i),r.show().css({backgroundImage:"none",backgroundColor:t.color||"#ffff99"}).animate(o,{queue:!1,duration:t.duration,easing:t.easing,complete:function(){s==="hide"&&r.hide(),e.effects.restore(r,i),n()}})}})(jQuery);(function(e,t){e.effects.effect.pulsate=function(t,n){var r=e(this),i=e.effects.setMode(r,t.mode||"show"),s=i==="show",o=i==="hide",u=s||i==="hide",a=(t.times||5)*2+(u?1:0),f=t.duration/a,l=0,c=r.queue(),h=c.length,p;if(s||!r.is(":visible"))r.css("opacity",0).show(),l=1;for(p=1;p1&&c.splice.apply(c,[1,0].concat(c.splice(h,a+1))),r.dequeue()}})(jQuery);(function(e,t){e.effects.effect.puff=function(t,n){var r=e(this),i=e.effects.setMode(r,t.mode||"hide"),s=i==="hide",o=parseInt(t.percent,10)||150,u=o/100,a={height:r.height(),width:r.width()};e.extend(t,{effect:"scale",queue:!1,fade:!0,mode:i,complete:n,percent:s?o:100,from:s?a:{height:a.height*u,width:a.width*u}}),r.effect(t)},e.effects.effect.scale=function(t,n){var r=e(this),i=e.extend(!0,{},t),s=e.effects.setMode(r,t.mode||"effect"),o=parseInt(t.percent,10)||(parseInt(t.percent,10)===0?0:s==="hide"?0:100),u=t.direction||"both",a=t.origin,f={height:r.height(),width:r.width(),outerHeight:r.outerHeight(),outerWidth:r.outerWidth()},l={y:u!=="horizontal"?o/100:1,x:u!=="vertical"?o/100:1};i.effect="size",i.queue=!1,i.complete=n,s!=="effect"&&(i.origin=a||["middle","center"],i.restore=!0),i.from=t.from||(s==="show"?{height:0,width:0}:f),i.to={height:f.height*l.y,width:f.width*l.x,outerHeight:f.outerHeight*l.y,outerWidth:f.outerWidth*l.x},i.fade&&(s==="show"&&(i.from.opacity=0,i.to.opacity=1),s==="hide"&&(i.from.opacity=1,i.to.opacity=0)),r.effect(i)},e.effects.effect.size=function(t,n){var r,i,s,o=e(this),u=["position","top","bottom","left","right","width","height","overflow","opacity"],a=["position","top","bottom","left","right","overflow","opacity"],f=["width","height","overflow"],l=["fontSize"],c=["borderTopWidth","borderBottomWidth","paddingTop","paddingBottom"],h=["borderLeftWidth","borderRightWidth","paddingLeft","paddingRight"],p=e.effects.setMode(o,t.mode||"effect"),d=t.restore||p!=="effect",v=t.scale||"both",m=t.origin||["middle","center"],g=o.css("position"),y=d?u:a,b={height:0,width:0};p==="show"&&o.show(),r={height:o.height(),width:o.width(),outerHeight:o.outerHeight(),outerWidth:o.outerWidth()},t.mode==="toggle"&&p==="show"?(o.from=t.to||b,o.to=t.from||r):(o.from=t.from||(p==="show"?b:r),o.to=t.to||(p==="hide"?b:r)),s={from:{y:o.from.height/r.height,x:o.from.width/r.width},to:{y:o.to.height/r.height,x:o.to.width/r.width}};if(v==="box"||v==="both")s.from.y!==s.to.y&&(y=y.concat(c),o.from=e.effects.setTransition(o,c,s.from.y,o.from),o.to=e.effects.setTransition(o,c,s.to.y,o.to)),s.from.x!==s.to.x&&(y=y.concat(h),o.from=e.effects.setTransition(o,h,s.from.x,o.from),o.to=e.effects.setTransition(o,h,s.to.x,o.to));(v==="content"||v==="both")&&s.from.y!==s.to.y&&(y=y.concat(l).concat(f),o.from=e.effects.setTransition(o,l,s.from.y,o.from),o.to=e.effects.setTransition(o,l,s.to.y,o.to)),e.effects.save(o,y),o.show(),e.effects.createWrapper(o),o.css("overflow","hidden").css(o.from),m&&(i=e.effects.getBaseline(m,r),o.from.top=(r.outerHeight-o.outerHeight())*i.y,o.from.left=(r.outerWidth-o.outerWidth())*i.x,o.to.top=(r.outerHeight-o.to.outerHeight)*i.y,o.to.left=(r.outerWidth-o.to.outerWidth)*i.x),o.css(o.from);if(v==="content"||v==="both")c=c.concat(["marginTop","marginBottom"]).concat(l),h=h.concat(["marginLeft","marginRight"]),f=u.concat(c).concat(h),o.find("*[width]").each(function(){var n=e(this),r={height:n.height(),width:n.width()};d&&e.effects.save(n,f),n.from={height:r.height*s.from.y,width:r.width*s.from.x},n.to={height:r.height*s.to.y,width:r.width*s.to.x},s.from.y!==s.to.y&&(n.from=e.effects.setTransition(n,c,s.from.y,n.from),n.to=e.effects.setTransition(n,c,s.to.y,n.to)),s.from.x!==s.to.x&&(n.from=e.effects.setTransition(n,h,s.from.x,n.from),n.to=e.effects.setTransition(n,h,s.to.x,n.to)),n.css(n.from),n.animate(n.to,t.duration,t.easing,function(){d&&e.effects.restore(n,f)})});o.animate(o.to,{queue:!1,duration:t.duration,easing:t.easing,complete:function(){o.to.opacity===0&&o.css("opacity",o.from.opacity),p==="hide"&&o.hide(),e.effects.restore(o,y),d||(g==="static"?o.css({position:"relative",top:o.to.top,left:o.to.left}):e.each(["top","left"],function(e,t){o.css(t,function(t,n){var r=parseInt(n,10),i=e?o.to.left:o.to.top;return n==="auto"?i+"px":r+i+"px"})})),e.effects.removeWrapper(o),n()}})}})(jQuery);(function(e,t){e.effects.effect.shake=function(t,n){var r=e(this),i=["position","top","bottom","left","right","height","width"],s=e.effects.setMode(r,t.mode||"effect"),o=t.direction||"left",u=t.distance||20,a=t.times||3,f=a*2+1,l=Math.round(t.duration/f),c=o==="up"||o==="down"?"top":"left",h=o==="up"||o==="left",p={},d={},v={},m,g=r.queue(),y=g.length;e.effects.save(r,i),r.show(),e.effects.createWrapper(r),p[c]=(h?"-=":"+=")+u,d[c]=(h?"+=":"-=")+u*2,v[c]=(h?"-=":"+=")+u*2,r.animate(p,l,t.easing);for(m=1;m1&&g.splice.apply(g,[1,0].concat(g.splice(y,f+1))),r.dequeue()}})(jQuery);(function(e,t){e.effects.effect.slide=function(t,n){var r=e(this),i=["position","top","bottom","left","right","width","height"],s=e.effects.setMode(r,t.mode||"show"),o=s==="show",u=t.direction||"left",a=u==="up"||u==="down"?"top":"left",f=u==="up"||u==="left",l,c={};e.effects.save(r,i),r.show(),l=t.distance||r[a==="top"?"outerHeight":"outerWidth"](!0),e.effects.createWrapper(r).css({overflow:"hidden"}),o&&r.css(a,f?isNaN(l)?"-"+l:-l:l),c[a]=(o?f?"+=":"-=":f?"-=":"+=")+l,r.animate(c,{queue:!1,duration:t.duration,easing:t.easing,complete:function(){s==="hide"&&r.hide(),e.effects.restore(r,i),e.effects.removeWrapper(r),n()}})}})(jQuery);(function(e,t){e.effects.effect.transfer=function(t,n){var r=e(this),i=e(t.to),s=i.css("position")==="fixed",o=e("body"),u=s?o.scrollTop():0,a=s?o.scrollLeft():0,f=i.offset(),l={top:f.top-u,left:f.left-a,height:i.innerHeight(),width:i.innerWidth()},c=r.offset(),h=e('
    ').appendTo(document.body).addClass(t.className).css({top:c.top-u,left:c.left-a,height:r.innerHeight(),width:r.innerWidth(),position:s?"fixed":"absolute"}).animate(l,t.duration,t.easing,function(){h.remove(),n()})}})(jQuery);(function(e,t){var n=!1;e.widget("ui.menu",{version:"1.9.1",defaultElement:"
      ",delay:300,options:{icons:{submenu:"ui-icon-carat-1-e"},menus:"ul",position:{my:"left top",at:"right top"},role:"menu",blur:null,focus:null,select:null},_create:function(){this.activeMenu=this.element,this.element.uniqueId().addClass("ui-menu ui-widget ui-widget-content ui-corner-all").toggleClass("ui-menu-icons",!!this.element.find(".ui-icon").length).attr({role:this.options.role,tabIndex:0}).bind("click"+this.eventNamespace,e.proxy(function(e){this.options.disabled&&e.preventDefault()},this)),this.options.disabled&&this.element.addClass("ui-state-disabled").attr("aria-disabled","true"),this._on({"mousedown .ui-menu-item > a":function(e){e.preventDefault()},"click .ui-state-disabled > a":function(e){e.preventDefault()},"click .ui-menu-item:has(a)":function(t){var r=e(t.target).closest(".ui-menu-item");!n&&r.not(".ui-state-disabled").length&&(n=!0,this.select(t),r.has(".ui-menu").length?this.expand(t):this.element.is(":focus")||(this.element.trigger("focus",[!0]),this.active&&this.active.parents(".ui-menu").length===1&&clearTimeout(this.timer)))},"mouseenter .ui-menu-item":function(t){var n=e(t.currentTarget);n.siblings().children(".ui-state-active").removeClass("ui-state-active"),this.focus(t,n)},mouseleave:"collapseAll","mouseleave .ui-menu":"collapseAll",focus:function(e,t){var n=this.active||this.element.children(".ui-menu-item").eq(0);t||this.focus(e,n)},blur:function(t){this._delay(function(){e.contains(this.element[0],this.document[0].activeElement)||this.collapseAll(t)})},keydown:"_keydown"}),this.refresh(),this._on(this.document,{click:function(t){e(t.target).closest(".ui-menu").length||this.collapseAll(t),n=!1}})},_destroy:function(){this.element.removeAttr("aria-activedescendant").find(".ui-menu").andSelf().removeClass("ui-menu ui-widget ui-widget-content ui-corner-all ui-menu-icons").removeAttr("role").removeAttr("tabIndex").removeAttr("aria-labelledby").removeAttr("aria-expanded").removeAttr("aria-hidden").removeAttr("aria-disabled").removeUniqueId().show(),this.element.find(".ui-menu-item").removeClass("ui-menu-item").removeAttr("role").removeAttr("aria-disabled").children("a").removeUniqueId().removeClass("ui-corner-all ui-state-hover").removeAttr("tabIndex").removeAttr("role").removeAttr("aria-haspopup").children().each(function(){var t=e(this);t.data("ui-menu-submenu-carat")&&t.remove()}),this.element.find(".ui-menu-divider").removeClass("ui-menu-divider ui-widget-content")},_keydown:function(t){function a(e){return e.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g,"\\$&")}var n,r,i,s,o,u=!0;switch(t.keyCode){case e.ui.keyCode.PAGE_UP:this.previousPage(t);break;case e.ui.keyCode.PAGE_DOWN:this.nextPage(t);break;case e.ui.keyCode.HOME:this._move("first","first",t);break;case e.ui.keyCode.END:this._move("last","last",t);break;case e.ui.keyCode.UP:this.previous(t);break;case e.ui.keyCode.DOWN:this.next(t);break;case e.ui.keyCode.LEFT:this.collapse(t);break;case e.ui.keyCode.RIGHT:this.active&&!this.active.is(".ui-state-disabled")&&this.expand(t);break;case e.ui.keyCode.ENTER:case e.ui.keyCode.SPACE:this._activate(t);break;case e.ui.keyCode.ESCAPE:this.collapse(t);break;default:u=!1,r=this.previousFilter||"",i=String.fromCharCode(t.keyCode),s=!1,clearTimeout(this.filterTimer),i===r?s=!0:i=r+i,o=new RegExp("^"+a(i),"i"),n=this.activeMenu.children(".ui-menu-item").filter(function(){return o.test(e(this).children("a").text())}),n=s&&n.index(this.active.next())!==-1?this.active.nextAll(".ui-menu-item"):n,n.length||(i=String.fromCharCode(t.keyCode),o=new RegExp("^"+a(i),"i"),n=this.activeMenu.children(".ui-menu-item").filter(function(){return o.test(e(this).children("a").text())})),n.length?(this.focus(t,n),n.length>1?(this.previousFilter=i,this.filterTimer=this._delay(function(){delete this.previousFilter},1e3)):delete this.previousFilter):delete this.previousFilter}u&&t.preventDefault()},_activate:function(e){this.active.is(".ui-state-disabled")||(this.active.children("a[aria-haspopup='true']").length?this.expand(e):this.select(e))},refresh:function(){var t,n=this.options.icons.submenu,r=this.element.find(this.options.menus+":not(.ui-menu)").addClass("ui-menu ui-widget ui-widget-content ui-corner-all").hide().attr({role:this.options.role,"aria-hidden":"true","aria-expanded":"false"});t=r.add(this.element),t.children(":not(.ui-menu-item):has(a)").addClass("ui-menu-item").attr("role","presentation").children("a").uniqueId().addClass("ui-corner-all").attr({tabIndex:-1,role:this._itemRole()}),t.children(":not(.ui-menu-item)").each(function(){var t=e(this);/[^\-—–\s]/.test(t.text())||t.addClass("ui-widget-content ui-menu-divider")}),t.children(".ui-state-disabled").attr("aria-disabled","true"),r.each(function(){var t=e(this),r=t.prev("a"),i=e("").addClass("ui-menu-icon ui-icon "+n).data("ui-menu-submenu-carat",!0);r.attr("aria-haspopup","true").prepend(i),t.attr("aria-labelledby",r.attr("id"))}),this.active&&!e.contains(this.element[0],this.active[0])&&this.blur()},_itemRole:function(){return{menu:"menuitem",listbox:"option"}[this.options.role]},focus:function(e,t){var n,r;this.blur(e,e&&e.type==="focus"),this._scrollIntoView(t),this.active=t.first(),r=this.active.children("a").addClass("ui-state-focus"),this.options.role&&this.element.attr("aria-activedescendant",r.attr("id")),this.active.parent().closest(".ui-menu-item").children("a:first").addClass("ui-state-active"),e&&e.type==="keydown"?this._close():this.timer=this._delay(function(){this._close()},this.delay),n=t.children(".ui-menu"),n.length&&/^mouse/.test(e.type)&&this._startOpening(n),this.activeMenu=t.parent(),this._trigger("focus",e,{item:t})},_scrollIntoView:function(t){var n,r,i,s,o,u;this._hasScroll()&&(n=parseFloat(e.css(this.activeMenu[0],"borderTopWidth"))||0,r=parseFloat(e.css(this.activeMenu[0],"paddingTop"))||0,i=t.offset().top-this.activeMenu.offset().top-n-r,s=this.activeMenu.scrollTop(),o=this.activeMenu.height(),u=t.height(),i<0?this.activeMenu.scrollTop(s+i):i+u>o&&this.activeMenu.scrollTop(s+i-o+u))},blur:function(e,t){t||clearTimeout(this.timer);if(!this.active)return;this.active.children("a").removeClass("ui-state-focus"),this.active=null,this._trigger("blur",e,{item:this.active})},_startOpening:function(e){clearTimeout(this.timer);if(e.attr("aria-hidden")!=="true")return;this.timer=this._delay(function(){this._close(),this._open(e)},this.delay)},_open:function(t){var n=e.extend({of:this.active},this.options.position);clearTimeout(this.timer),this.element.find(".ui-menu").not(t.parents(".ui-menu")).hide().attr("aria-hidden","true"),t.show().removeAttr("aria-hidden").attr("aria-expanded","true").position(n)},collapseAll:function(t,n){clearTimeout(this.timer),this.timer=this._delay(function(){var r=n?this.element:e(t&&t.target).closest(this.element.find(".ui-menu"));r.length||(r=this.element),this._close(r),this.blur(t),this.activeMenu=r},this.delay)},_close:function(e){e||(e=this.active?this.active.parent():this.element),e.find(".ui-menu").hide().attr("aria-hidden","true").attr("aria-expanded","false").end().find("a.ui-state-active").removeClass("ui-state-active")},collapse:function(e){var t=this.active&&this.active.parent().closest(".ui-menu-item",this.element);t&&t.length&&(this._close(),this.focus(e,t))},expand:function(e){var t=this.active&&this.active.children(".ui-menu ").children(".ui-menu-item").first();t&&t.length&&(this._open(t.parent()),this._delay(function(){this.focus(e,t)}))},next:function(e){this._move("next","first",e)},previous:function(e){this._move("prev","last",e)},isFirstItem:function(){return this.active&&!this.active.prevAll(".ui-menu-item").length},isLastItem:function(){return this.active&&!this.active.nextAll(".ui-menu-item").length},_move:function(e,t,n){var r;this.active&&(e==="first"||e==="last"?r=this.active[e==="first"?"prevAll":"nextAll"](".ui-menu-item").eq(-1):r=this.active[e+"All"](".ui-menu-item").eq(0));if(!r||!r.length||!this.active)r=this.activeMenu.children(".ui-menu-item")[t]();this.focus(n,r)},nextPage:function(t){var n,r,i;if(!this.active){this.next(t);return}if(this.isLastItem())return;this._hasScroll()?(r=this.active.offset().top,i=this.element.height(),this.active.nextAll(".ui-menu-item").each(function(){return n=e(this),n.offset().top-r-i<0}),this.focus(t,n)):this.focus(t,this.activeMenu.children(".ui-menu-item")[this.active?"last":"first"]())},previousPage:function(t){var n,r,i;if(!this.active){this.next(t);return}if(this.isFirstItem())return;this._hasScroll()?(r=this.active.offset().top,i=this.element.height(),this.active.prevAll(".ui-menu-item").each(function(){return n=e(this),n.offset().top-r+i>0}),this.focus(t,n)):this.focus(t,this.activeMenu.children(".ui-menu-item").first())},_hasScroll:function(){return this.element.outerHeight()
    ").appendTo(this.element),this.oldValue=this._value(),this._refreshValue()},_destroy:function(){this.element.removeClass("ui-progressbar ui-widget ui-widget-content ui-corner-all").removeAttr("role").removeAttr("aria-valuemin").removeAttr("aria-valuemax").removeAttr("aria-valuenow"),this.valueDiv.remove()},value:function(e){return e===t?this._value():(this._setOption("value",e),this)},_setOption:function(e,t){e==="value"&&(this.options.value=t,this._refreshValue(),this._value()===this.options.max&&this._trigger("complete")),this._super(e,t)},_value:function(){var e=this.options.value;return typeof e!="number"&&(e=0),Math.min(this.options.max,Math.max(this.min,e))},_percentage:function(){return 100*this._value()/this.options.max},_refreshValue:function(){var e=this.value(),t=this._percentage();this.oldValue!==e&&(this.oldValue=e,this._trigger("change")),this.valueDiv.toggle(e>this.min).toggleClass("ui-corner-right",e===this.options.max).width(t.toFixed(0)+"%"),this.element.attr("aria-valuenow",e)}})})(jQuery);(function(e,t){e.widget("ui.resizable",e.ui.mouse,{version:"1.9.1",widgetEventPrefix:"resize",options:{alsoResize:!1,animate:!1,animateDuration:"slow",animateEasing:"swing",aspectRatio:!1,autoHide:!1,containment:!1,ghost:!1,grid:!1,handles:"e,s,se",helper:!1,maxHeight:null,maxWidth:null,minHeight:10,minWidth:10,zIndex:1e3},_create:function(){var t=this,n=this.options;this.element.addClass("ui-resizable"),e.extend(this,{_aspectRatio:!!n.aspectRatio,aspectRatio:n.aspectRatio,originalElement:this.element,_proportionallyResizeElements:[],_helper:n.helper||n.ghost||n.animate?n.helper||"ui-resizable-helper":null}),this.element[0].nodeName.match(/canvas|textarea|input|select|button|img/i)&&(this.element.wrap(e('
    ').css({position:this.element.css("position"),width:this.element.outerWidth(),height:this.element.outerHeight(),top:this.element.css("top"),left:this.element.css("left")})),this.element=this.element.parent().data("resizable",this.element.data("resizable")),this.elementIsWrapper=!0,this.element.css({marginLeft:this.originalElement.css("marginLeft"),marginTop:this.originalElement.css("marginTop"),marginRight:this.originalElement.css("marginRight"),marginBottom:this.originalElement.css("marginBottom")}),this.originalElement.css({marginLeft:0,marginTop:0,marginRight:0,marginBottom:0}),this.originalResizeStyle=this.originalElement.css("resize"),this.originalElement.css("resize","none"),this._proportionallyResizeElements.push(this.originalElement.css({position:"static",zoom:1,display:"block"})),this.originalElement.css({margin:this.originalElement.css("margin")}),this._proportionallyResize()),this.handles=n.handles||(e(".ui-resizable-handle",this.element).length?{n:".ui-resizable-n",e:".ui-resizable-e",s:".ui-resizable-s",w:".ui-resizable-w",se:".ui-resizable-se",sw:".ui-resizable-sw",ne:".ui-resizable-ne",nw:".ui-resizable-nw"}:"e,s,se");if(this.handles.constructor==String){this.handles=="all"&&(this.handles="n,e,s,w,se,sw,ne,nw");var r=this.handles.split(",");this.handles={};for(var i=0;i
    ');u.css({zIndex:n.zIndex}),"se"==s&&u.addClass("ui-icon ui-icon-gripsmall-diagonal-se"),this.handles[s]=".ui-resizable-"+s,this.element.append(u)}}this._renderAxis=function(t){t=t||this.element;for(var n in this.handles){this.handles[n].constructor==String&&(this.handles[n]=e(this.handles[n],this.element).show());if(this.elementIsWrapper&&this.originalElement[0].nodeName.match(/textarea|input|select|button/i)){var r=e(this.handles[n],this.element),i=0;i=/sw|ne|nw|se|n|s/.test(n)?r.outerHeight():r.outerWidth();var s=["padding",/ne|nw|n/.test(n)?"Top":/se|sw|s/.test(n)?"Bottom":/^e$/.test(n)?"Right":"Left"].join("");t.css(s,i),this._proportionallyResize()}if(!e(this.handles[n]).length)continue}},this._renderAxis(this.element),this._handles=e(".ui-resizable-handle",this.element).disableSelection(),this._handles.mouseover(function(){if(!t.resizing){if(this.className)var e=this.className.match(/ui-resizable-(se|sw|ne|nw|n|e|s|w)/i);t.axis=e&&e[1]?e[1]:"se"}}),n.autoHide&&(this._handles.hide(),e(this.element).addClass("ui-resizable-autohide").mouseenter(function(){if(n.disabled)return;e(this).removeClass("ui-resizable-autohide"),t._handles.show()}).mouseleave(function(){if(n.disabled)return;t.resizing||(e(this).addClass("ui-resizable-autohide"),t._handles.hide())})),this._mouseInit()},_destroy:function(){this._mouseDestroy();var t=function(t){e(t).removeClass("ui-resizable ui-resizable-disabled ui-resizable-resizing").removeData("resizable").removeData("ui-resizable").unbind(".resizable").find(".ui-resizable-handle").remove()};if(this.elementIsWrapper){t(this.element);var n=this.element;this.originalElement.css({position:n.css("position"),width:n.outerWidth(),height:n.outerHeight(),top:n.css("top"),left:n.css("left")}).insertAfter(n),n.remove()}return this.originalElement.css("resize",this.originalResizeStyle),t(this.originalElement),this},_mouseCapture:function(t){var n=!1;for(var r in this.handles)e(this.handles[r])[0]==t.target&&(n=!0);return!this.options.disabled&&n},_mouseStart:function(t){var r=this.options,i=this.element.position(),s=this.element;this.resizing=!0,this.documentScroll={top:e(document).scrollTop(),left:e(document).scrollLeft()},(s.is(".ui-draggable")||/absolute/.test(s.css("position")))&&s.css({position:"absolute",top:i.top,left:i.left}),this._renderProxy();var o=n(this.helper.css("left")),u=n(this.helper.css("top"));r.containment&&(o+=e(r.containment).scrollLeft()||0,u+=e(r.containment).scrollTop()||0),this.offset=this.helper.offset(),this.position={left:o,top:u},this.size=this._helper?{width:s.outerWidth(),height:s.outerHeight()}:{width:s.width(),height:s.height()},this.originalSize=this._helper?{width:s.outerWidth(),height:s.outerHeight()}:{width:s.width(),height:s.height()},this.originalPosition={left:o,top:u},this.sizeDiff={width:s.outerWidth()-s.width(),height:s.outerHeight()-s.height()},this.originalMousePosition={left:t.pageX,top:t.pageY},this.aspectRatio=typeof r.aspectRatio=="number"?r.aspectRatio:this.originalSize.width/this.originalSize.height||1;var a=e(".ui-resizable-"+this.axis).css("cursor");return e("body").css("cursor",a=="auto"?this.axis+"-resize":a),s.addClass("ui-resizable-resizing"),this._propagate("start",t),!0},_mouseDrag:function(e){var t=this.helper,n=this.options,r={},i=this,s=this.originalMousePosition,o=this.axis,u=e.pageX-s.left||0,a=e.pageY-s.top||0,f=this._change[o];if(!f)return!1;var l=f.apply(this,[e,u,a]);this._updateVirtualBoundaries(e.shiftKey);if(this._aspectRatio||e.shiftKey)l=this._updateRatio(l,e);return l=this._respectSize(l,e),this._propagate("resize",e),t.css({top:this.position.top+"px",left:this.position.left+"px",width:this.size.width+"px",height:this.size.height+"px"}),!this._helper&&this._proportionallyResizeElements.length&&this._proportionallyResize(),this._updateCache(l),this._trigger("resize",e,this.ui()),!1},_mouseStop:function(t){this.resizing=!1;var n=this.options,r=this;if(this._helper){var i=this._proportionallyResizeElements,s=i.length&&/textarea/i.test(i[0].nodeName),o=s&&e.ui.hasScroll(i[0],"left")?0:r.sizeDiff.height,u=s?0:r.sizeDiff.width,a={width:r.helper.width()-u,height:r.helper.height()-o},f=parseInt(r.element.css("left"),10)+(r.position.left-r.originalPosition.left)||null,l=parseInt(r.element.css("top"),10)+(r.position.top-r.originalPosition.top)||null;n.animate||this.element.css(e.extend(a,{top:l,left:f})),r.helper.height(r.size.height),r.helper.width(r.size.width),this._helper&&!n.animate&&this._proportionallyResize()}return e("body").css("cursor","auto"),this.element.removeClass("ui-resizable-resizing"),this._propagate("stop",t),this._helper&&this.helper.remove(),!1},_updateVirtualBoundaries:function(e){var t=this.options,n,i,s,o,u;u={minWidth:r(t.minWidth)?t.minWidth:0,maxWidth:r(t.maxWidth)?t.maxWidth:Infinity,minHeight:r(t.minHeight)?t.minHeight:0,maxHeight:r(t.maxHeight)?t.maxHeight:Infinity};if(this._aspectRatio||e)n=u.minHeight*this.aspectRatio,s=u.minWidth/this.aspectRatio,i=u.maxHeight*this.aspectRatio,o=u.maxWidth/this.aspectRatio,n>u.minWidth&&(u.minWidth=n),s>u.minHeight&&(u.minHeight=s),ie.width,l=r(e.height)&&i.minHeight&&i.minHeight>e.height;f&&(e.width=i.minWidth),l&&(e.height=i.minHeight),u&&(e.width=i.maxWidth),a&&(e.height=i.maxHeight);var c=this.originalPosition.left+this.originalSize.width,h=this.position.top+this.size.height,p=/sw|nw|w/.test(o),d=/nw|ne|n/.test(o);f&&p&&(e.left=c-i.minWidth),u&&p&&(e.left=c-i.maxWidth),l&&d&&(e.top=h-i.minHeight),a&&d&&(e.top=h-i.maxHeight);var v=!e.width&&!e.height;return v&&!e.left&&e.top?e.top=null:v&&!e.top&&e.left&&(e.left=null),e},_proportionallyResize:function(){var t=this.options;if(!this._proportionallyResizeElements.length)return;var n=this.helper||this.element;for(var r=0;r
    ');var r=e.ui.ie6?1:0,i=e.ui.ie6?2:-1;this.helper.addClass(this._helper).css({width:this.element.outerWidth()+i,height:this.element.outerHeight()+i,position:"absolute",left:this.elementOffset.left-r+"px",top:this.elementOffset.top-r+"px",zIndex:++n.zIndex}),this.helper.appendTo("body").disableSelection()}else this.helper=this.element},_change:{e:function(e,t,n){return{width:this.originalSize.width+t}},w:function(e,t,n){var r=this.options,i=this.originalSize,s=this.originalPosition;return{left:s.left+t,width:i.width-t}},n:function(e,t,n){var r=this.options,i=this.originalSize,s=this.originalPosition;return{top:s.top+n,height:i.height-n}},s:function(e,t,n){return{height:this.originalSize.height+n}},se:function(t,n,r){return e.extend(this._change.s.apply(this,arguments),this._change.e.apply(this,[t,n,r]))},sw:function(t,n,r){return e.extend(this._change.s.apply(this,arguments),this._change.w.apply(this,[t,n,r]))},ne:function(t,n,r){return e.extend(this._change.n.apply(this,arguments),this._change.e.apply(this,[t,n,r]))},nw:function(t,n,r){return e.extend(this._change.n.apply(this,arguments),this._change.w.apply(this,[t,n,r]))}},_propagate:function(t,n){e.ui.plugin.call(this,t,[n,this.ui()]),t!="resize"&&this._trigger(t,n,this.ui())},plugins:{},ui:function(){return{originalElement:this.originalElement,element:this.element,helper:this.helper,position:this.position,size:this.size,originalSize:this.originalSize,originalPosition:this.originalPosition}}}),e.ui.plugin.add("resizable","alsoResize",{start:function(t,n){var r=e(this).data("resizable"),i=r.options,s=function(t){e(t).each(function(){var t=e(this);t.data("resizable-alsoresize",{width:parseInt(t.width(),10),height:parseInt(t.height(),10),left:parseInt(t.css("left"),10),top:parseInt(t.css("top"),10)})})};typeof i.alsoResize=="object"&&!i.alsoResize.parentNode?i.alsoResize.length?(i.alsoResize=i.alsoResize[0],s(i.alsoResize)):e.each(i.alsoResize,function(e){s(e)}):s(i.alsoResize)},resize:function(t,n){var r=e(this).data("resizable"),i=r.options,s=r.originalSize,o=r.originalPosition,u={height:r.size.height-s.height||0,width:r.size.width-s.width||0,top:r.position.top-o.top||0,left:r.position.left-o.left||0},a=function(t,r){e(t).each(function(){var t=e(this),i=e(this).data("resizable-alsoresize"),s={},o=r&&r.length?r:t.parents(n.originalElement[0]).length?["width","height"]:["width","height","top","left"];e.each(o,function(e,t){var n=(i[t]||0)+(u[t]||0);n&&n>=0&&(s[t]=n||null)}),t.css(s)})};typeof i.alsoResize=="object"&&!i.alsoResize.nodeType?e.each(i.alsoResize,function(e,t){a(e,t)}):a(i.alsoResize)},stop:function(t,n){e(this).removeData("resizable-alsoresize")}}),e.ui.plugin.add("resizable","animate",{stop:function(t,n){var r=e(this).data("resizable"),i=r.options,s=r._proportionallyResizeElements,o=s.length&&/textarea/i.test(s[0].nodeName),u=o&&e.ui.hasScroll(s[0],"left")?0:r.sizeDiff.height,a=o?0:r.sizeDiff.width,f={width:r.size.width-a,height:r.size.height-u},l=parseInt(r.element.css("left"),10)+(r.position.left-r.originalPosition.left)||null,c=parseInt(r.element.css("top"),10)+(r.position.top-r.originalPosition.top)||null;r.element.animate(e.extend(f,c&&l?{top:c,left:l}:{}),{duration:i.animateDuration,easing:i.animateEasing,step:function(){var n={width:parseInt(r.element.css("width"),10),height:parseInt(r.element.css("height"),10),top:parseInt(r.element.css("top"),10),left:parseInt(r.element.css("left"),10)};s&&s.length&&e(s[0]).css({width:n.width,height:n.height}),r._updateCache(n),r._propagate("resize",t)}})}}),e.ui.plugin.add("resizable","containment",{start:function(t,r){var i=e(this).data("resizable"),s=i.options,o=i.element,u=s.containment,a=u instanceof e?u.get(0):/parent/.test(u)?o.parent().get(0):u;if(!a)return;i.containerElement=e(a);if(/document/.test(u)||u==document)i.containerOffset={left:0,top:0},i.containerPosition={left:0,top:0},i.parentData={element:e(document),left:0,top:0,width:e(document).width(),height:e(document).height()||document.body.parentNode.scrollHeight};else{var f=e(a),l=[];e(["Top","Right","Left","Bottom"]).each(function(e,t){l[e]=n(f.css("padding"+t))}),i.containerOffset=f.offset(),i.containerPosition=f.position(),i.containerSize={height:f.innerHeight()-l[3],width:f.innerWidth()-l[1]};var c=i.containerOffset,h=i.containerSize.height,p=i.containerSize.width,d=e.ui.hasScroll(a,"left")?a.scrollWidth:p,v=e.ui.hasScroll(a)?a.scrollHeight:h;i.parentData={element:a,left:c.left,top:c.top,width:d,height:v}}},resize:function(t,n){var r=e(this).data("resizable"),i=r.options,s=r.containerSize,o=r.containerOffset,u=r.size,a=r.position,f=r._aspectRatio||t.shiftKey,l={top:0,left:0},c=r.containerElement;c[0]!=document&&/static/.test(c.css("position"))&&(l=o),a.left<(r._helper?o.left:0)&&(r.size.width=r.size.width+(r._helper?r.position.left-o.left:r.position.left-l.left),f&&(r.size.height=r.size.width/r.aspectRatio),r.position.left=i.helper?o.left:0),a.top<(r._helper?o.top:0)&&(r.size.height=r.size.height+(r._helper?r.position.top-o.top:r.position.top),f&&(r.size.width=r.size.height*r.aspectRatio),r.position.top=r._helper?o.top:0),r.offset.left=r.parentData.left+r.position.left,r.offset.top=r.parentData.top+r.position.top;var h=Math.abs((r._helper?r.offset.left-l.left:r.offset.left-l.left)+r.sizeDiff.width),p=Math.abs((r._helper?r.offset.top-l.top:r.offset.top-o.top)+r.sizeDiff.height),d=r.containerElement.get(0)==r.element.parent().get(0),v=/relative|absolute/.test(r.containerElement.css("position"));d&&v&&(h-=r.parentData.left),h+r.size.width>=r.parentData.width&&(r.size.width=r.parentData.width-h,f&&(r.size.height=r.size.width/r.aspectRatio)),p+r.size.height>=r.parentData.height&&(r.size.height=r.parentData.height-p,f&&(r.size.width=r.size.height*r.aspectRatio))},stop:function(t,n){var r=e(this).data("resizable"),i=r.options,s=r.position,o=r.containerOffset,u=r.containerPosition,a=r.containerElement,f=e(r.helper),l=f.offset(),c=f.outerWidth()-r.sizeDiff.width,h=f.outerHeight()-r.sizeDiff.height;r._helper&&!i.animate&&/relative/.test(a.css("position"))&&e(this).css({left:l.left-u.left-o.left,width:c,height:h}),r._helper&&!i.animate&&/static/.test(a.css("position"))&&e(this).css({left:l.left-u.left-o.left,width:c,height:h})}}),e.ui.plugin.add("resizable","ghost",{start:function(t,n){var r=e(this).data("resizable"),i=r.options,s=r.size;r.ghost=r.originalElement.clone(),r.ghost.css({opacity:.25,display:"block",position:"relative",height:s.height,width:s.width,margin:0,left:0,top:0}).addClass("ui-resizable-ghost").addClass(typeof i.ghost=="string"?i.ghost:""),r.ghost.appendTo(r.helper)},resize:function(t,n){var r=e(this).data("resizable"),i=r.options;r.ghost&&r.ghost.css({position:"relative",height:r.size.height,width:r.size.width})},stop:function(t,n){var r=e(this).data("resizable"),i=r.options;r.ghost&&r.helper&&r.helper.get(0).removeChild(r.ghost.get(0))}}),e.ui.plugin.add("resizable","grid",{resize:function(t,n){var r=e(this).data("resizable"),i=r.options,s=r.size,o=r.originalSize,u=r.originalPosition,a=r.axis,f=i._aspectRatio||t.shiftKey;i.grid=typeof i.grid=="number"?[i.grid,i.grid]:i.grid;var l=Math.round((s.width-o.width)/(i.grid[0]||1))*(i.grid[0]||1),c=Math.round((s.height-o.height)/(i.grid[1]||1))*(i.grid[1]||1);/^(se|s|e)$/.test(a)?(r.size.width=o.width+l,r.size.height=o.height+c):/^(ne)$/.test(a)?(r.size.width=o.width+l,r.size.height=o.height+c,r.position.top=u.top-c):/^(sw)$/.test(a)?(r.size.width=o.width+l,r.size.height=o.height+c,r.position.left=u.left-l):(r.size.width=o.width+l,r.size.height=o.height+c,r.position.top=u.top-c,r.position.left=u.left-l)}});var n=function(e){return parseInt(e,10)||0},r=function(e){return!isNaN(parseInt(e,10))}})(jQuery);(function(e,t){e.widget("ui.selectable",e.ui.mouse,{version:"1.9.1",options:{appendTo:"body",autoRefresh:!0,distance:0,filter:"*",tolerance:"touch"},_create:function(){var t=this;this.element.addClass("ui-selectable"),this.dragged=!1;var n;this.refresh=function(){n=e(t.options.filter,t.element[0]),n.addClass("ui-selectee"),n.each(function(){var t=e(this),n=t.offset();e.data(this,"selectable-item",{element:this,$element:t,left:n.left,top:n.top,right:n.left+t.outerWidth(),bottom:n.top+t.outerHeight(),startselected:!1,selected:t.hasClass("ui-selected"),selecting:t.hasClass("ui-selecting"),unselecting:t.hasClass("ui-unselecting")})})},this.refresh(),this.selectees=n.addClass("ui-selectee"),this._mouseInit(),this.helper=e("
    ")},_destroy:function(){this.selectees.removeClass("ui-selectee").removeData("selectable-item"),this.element.removeClass("ui-selectable ui-selectable-disabled"),this._mouseDestroy()},_mouseStart:function(t){var n=this;this.opos=[t.pageX,t.pageY];if(this.options.disabled)return;var r=this.options;this.selectees=e(r.filter,this.element[0]),this._trigger("start",t),e(r.appendTo).append(this.helper),this.helper.css({left:t.clientX,top:t.clientY,width:0,height:0}),r.autoRefresh&&this.refresh(),this.selectees.filter(".ui-selected").each(function(){var r=e.data(this,"selectable-item");r.startselected=!0,!t.metaKey&&!t.ctrlKey&&(r.$element.removeClass("ui-selected"),r.selected=!1,r.$element.addClass("ui-unselecting"),r.unselecting=!0,n._trigger("unselecting",t,{unselecting:r.element}))}),e(t.target).parents().andSelf().each(function(){var r=e.data(this,"selectable-item");if(r){var i=!t.metaKey&&!t.ctrlKey||!r.$element.hasClass("ui-selected");return r.$element.removeClass(i?"ui-unselecting":"ui-selected").addClass(i?"ui-selecting":"ui-unselecting"),r.unselecting=!i,r.selecting=i,r.selected=i,i?n._trigger("selecting",t,{selecting:r.element}):n._trigger("unselecting",t,{unselecting:r.element}),!1}})},_mouseDrag:function(t){var n=this;this.dragged=!0;if(this.options.disabled)return;var r=this.options,i=this.opos[0],s=this.opos[1],o=t.pageX,u=t.pageY;if(i>o){var a=o;o=i,i=a}if(s>u){var a=u;u=s,s=a}return this.helper.css({left:i,top:s,width:o-i,height:u-s}),this.selectees.each(function(){var a=e.data(this,"selectable-item");if(!a||a.element==n.element[0])return;var f=!1;r.tolerance=="touch"?f=!(a.left>o||a.rightu||a.bottomi&&a.rights&&a.bottom
    ").appendTo(this.element).addClass("ui-slider-range ui-widget-header"+(i.range==="min"||i.range==="max"?" ui-slider-range-"+i.range:""))),r=i.values&&i.values.length||1;for(t=s.length;tn&&(i=n,s=e(this),o=t)}),c.range===!0&&this.values(1)===c.min&&(o+=1,s=e(this.handles[o])),u=this._start(t,o),u===!1?!1:(this._mouseSliding=!0,this._handleIndex=o,s.addClass("ui-state-active").focus(),a=s.offset(),f=!e(t.target).parents().andSelf().is(".ui-slider-handle"),this._clickOffset=f?{left:0,top:0}:{left:t.pageX-a.left-s.width()/2,top:t.pageY-a.top-s.height()/2-(parseInt(s.css("borderTopWidth"),10)||0)-(parseInt(s.css("borderBottomWidth"),10)||0)+(parseInt(s.css("marginTop"),10)||0)},this.handles.hasClass("ui-state-hover")||this._slide(t,o,r),this._animateOff=!0,!0))},_mouseStart:function(){return!0},_mouseDrag:function(e){var t={x:e.pageX,y:e.pageY},n=this._normValueFromMouse(t);return this._slide(e,this._handleIndex,n),!1},_mouseStop:function(e){return this.handles.removeClass("ui-state-active"),this._mouseSliding=!1,this._stop(e,this._handleIndex),this._change(e,this._handleIndex),this._handleIndex=null,this._clickOffset=null,this._animateOff=!1,!1},_detectOrientation:function(){this.orientation=this.options.orientation==="vertical"?"vertical":"horizontal"},_normValueFromMouse:function(e){var t,n,r,i,s;return this.orientation==="horizontal"?(t=this.elementSize.width,n=e.x-this.elementOffset.left-(this._clickOffset?this._clickOffset.left:0)):(t=this.elementSize.height,n=e.y-this.elementOffset.top-(this._clickOffset?this._clickOffset.top:0)),r=n/t,r>1&&(r=1),r<0&&(r=0),this.orientation==="vertical"&&(r=1-r),i=this._valueMax()-this._valueMin(),s=this._valueMin()+r*i,this._trimAlignValue(s)},_start:function(e,t){var n={handle:this.handles[t],value:this.value()};return this.options.values&&this.options.values.length&&(n.value=this.values(t),n.values=this.values()),this._trigger("start",e,n)},_slide:function(e,t,n){var r,i,s;this.options.values&&this.options.values.length?(r=this.values(t?0:1),this.options.values.length===2&&this.options.range===!0&&(t===0&&n>r||t===1&&n1){this.options.values[t]=this._trimAlignValue(n),this._refreshValue(),this._change(null,t);return}if(!arguments.length)return this._values();if(!e.isArray(arguments[0]))return this.options.values&&this.options.values.length?this._values(t):this.value();r=this.options.values,i=arguments[0];for(s=0;s=this._valueMax())return this._valueMax();var t=this.options.step>0?this.options.step:1,n=(e-this._valueMin())%t,r=e-n;return Math.abs(n)*2>=t&&(r+=n>0?t:-t),parseFloat(r.toFixed(5))},_valueMin:function(){return this.options.min},_valueMax:function(){return this.options.max},_refreshValue:function(){var t,n,r,i,s,o=this.options.range,u=this.options,a=this,f=this._animateOff?!1:u.animate,l={};this.options.values&&this.options.values.length?this.handles.each(function(r){n=(a.values(r)-a._valueMin())/(a._valueMax()-a._valueMin())*100,l[a.orientation==="horizontal"?"left":"bottom"]=n+"%",e(this).stop(1,1)[f?"animate":"css"](l,u.animate),a.options.range===!0&&(a.orientation==="horizontal"?(r===0&&a.range.stop(1,1)[f?"animate":"css"]({left:n+"%"},u.animate),r===1&&a.range[f?"animate":"css"]({width:n-t+"%"},{queue:!1,duration:u.animate})):(r===0&&a.range.stop(1,1)[f?"animate":"css"]({bottom:n+"%"},u.animate),r===1&&a.range[f?"animate":"css"]({height:n-t+"%"},{queue:!1,duration:u.animate}))),t=n}):(r=this.value(),i=this._valueMin(),s=this._valueMax(),n=s!==i?(r-i)/(s-i)*100:0,l[this.orientation==="horizontal"?"left":"bottom"]=n+"%",this.handle.stop(1,1)[f?"animate":"css"](l,u.animate),o==="min"&&this.orientation==="horizontal"&&this.range.stop(1,1)[f?"animate":"css"]({width:n+"%"},u.animate),o==="max"&&this.orientation==="horizontal"&&this.range[f?"animate":"css"]({width:100-n+"%"},{queue:!1,duration:u.animate}),o==="min"&&this.orientation==="vertical"&&this.range.stop(1,1)[f?"animate":"css"]({height:n+"%"},u.animate),o==="max"&&this.orientation==="vertical"&&this.range[f?"animate":"css"]({height:100-n+"%"},{queue:!1,duration:u.animate}))}})})(jQuery);(function(e,t){e.widget("ui.sortable",e.ui.mouse,{version:"1.9.1",widgetEventPrefix:"sort",ready:!1,options:{appendTo:"parent",axis:!1,connectWith:!1,containment:!1,cursor:"auto",cursorAt:!1,dropOnEmpty:!0,forcePlaceholderSize:!1,forceHelperSize:!1,grid:!1,handle:!1,helper:"original",items:"> *",opacity:!1,placeholder:!1,revert:!1,scroll:!0,scrollSensitivity:20,scrollSpeed:20,scope:"default",tolerance:"intersect",zIndex:1e3},_create:function(){var e=this.options;this.containerCache={},this.element.addClass("ui-sortable"),this.refresh(),this.floating=this.items.length?e.axis==="x"||/left|right/.test(this.items[0].item.css("float"))||/inline|table-cell/.test(this.items[0].item.css("display")):!1,this.offset=this.element.offset(),this._mouseInit(),this.ready=!0},_destroy:function(){this.element.removeClass("ui-sortable ui-sortable-disabled"),this._mouseDestroy();for(var e=this.items.length-1;e>=0;e--)this.items[e].item.removeData(this.widgetName+"-item");return this},_setOption:function(t,n){t==="disabled"?(this.options[t]=n,this.widget().toggleClass("ui-sortable-disabled",!!n)):e.Widget.prototype._setOption.apply(this,arguments)},_mouseCapture:function(t,n){var r=this;if(this.reverting)return!1;if(this.options.disabled||this.options.type=="static")return!1;this._refreshItems(t);var i=null,s=e(t.target).parents().each(function(){if(e.data(this,r.widgetName+"-item")==r)return i=e(this),!1});e.data(t.target,r.widgetName+"-item")==r&&(i=e(t.target));if(!i)return!1;if(this.options.handle&&!n){var o=!1;e(this.options.handle,i).find("*").andSelf().each(function(){this==t.target&&(o=!0)});if(!o)return!1}return this.currentItem=i,this._removeCurrentsFromItems(),!0},_mouseStart:function(t,n,r){var i=this.options;this.currentContainer=this,this.refreshPositions(),this.helper=this._createHelper(t),this._cacheHelperProportions(),this._cacheMargins(),this.scrollParent=this.helper.scrollParent(),this.offset=this.currentItem.offset(),this.offset={top:this.offset.top-this.margins.top,left:this.offset.left-this.margins.left},e.extend(this.offset,{click:{left:t.pageX-this.offset.left,top:t.pageY-this.offset.top},parent:this._getParentOffset(),relative:this._getRelativeOffset()}),this.helper.css("position","absolute"),this.cssPosition=this.helper.css("position"),this.originalPosition=this._generatePosition(t),this.originalPageX=t.pageX,this.originalPageY=t.pageY,i.cursorAt&&this._adjustOffsetFromHelper(i.cursorAt),this.domPosition={prev:this.currentItem.prev()[0],parent:this.currentItem.parent()[0]},this.helper[0]!=this.currentItem[0]&&this.currentItem.hide(),this._createPlaceholder(),i.containment&&this._setContainment(),i.cursor&&(e("body").css("cursor")&&(this._storedCursor=e("body").css("cursor")),e("body").css("cursor",i.cursor)),i.opacity&&(this.helper.css("opacity")&&(this._storedOpacity=this.helper.css("opacity")),this.helper.css("opacity",i.opacity)),i.zIndex&&(this.helper.css("zIndex")&&(this._storedZIndex=this.helper.css("zIndex")),this.helper.css("zIndex",i.zIndex)),this.scrollParent[0]!=document&&this.scrollParent[0].tagName!="HTML"&&(this.overflowOffset=this.scrollParent.offset()),this._trigger("start",t,this._uiHash()),this._preserveHelperProportions||this._cacheHelperProportions();if(!r)for(var s=this.containers.length-1;s>=0;s--)this.containers[s]._trigger("activate",t,this._uiHash(this));return e.ui.ddmanager&&(e.ui.ddmanager.current=this),e.ui.ddmanager&&!i.dropBehaviour&&e.ui.ddmanager.prepareOffsets(this,t),this.dragging=!0,this.helper.addClass("ui-sortable-helper"),this._mouseDrag(t),!0},_mouseDrag:function(t){this.position=this._generatePosition(t),this.positionAbs=this._convertPositionTo("absolute"),this.lastPositionAbs||(this.lastPositionAbs=this.positionAbs);if(this.options.scroll){var n=this.options,r=!1;this.scrollParent[0]!=document&&this.scrollParent[0].tagName!="HTML"?(this.overflowOffset.top+this.scrollParent[0].offsetHeight-t.pageY=0;i--){var s=this.items[i],o=s.item[0],u=this._intersectsWithPointer(s);if(!u)continue;if(s.instance!==this.currentContainer)continue;if(o!=this.currentItem[0]&&this.placeholder[u==1?"next":"prev"]()[0]!=o&&!e.contains(this.placeholder[0],o)&&(this.options.type=="semi-dynamic"?!e.contains(this.element[0],o):!0)){this.direction=u==1?"down":"up";if(this.options.tolerance!="pointer"&&!this._intersectsWithSides(s))break;this._rearrange(t,s),this._trigger("change",t,this._uiHash());break}}return this._contactContainers(t),e.ui.ddmanager&&e.ui.ddmanager.drag(this,t),this._trigger("sort",t,this._uiHash()),this.lastPositionAbs=this.positionAbs,!1},_mouseStop:function(t,n){if(!t)return;e.ui.ddmanager&&!this.options.dropBehaviour&&e.ui.ddmanager.drop(this,t);if(this.options.revert){var r=this,i=this.placeholder.offset();this.reverting=!0,e(this.helper).animate({left:i.left-this.offset.parent.left-this.margins.left+(this.offsetParent[0]==document.body?0:this.offsetParent[0].scrollLeft),top:i.top-this.offset.parent.top-this.margins.top+(this.offsetParent[0]==document.body?0:this.offsetParent[0].scrollTop)},parseInt(this.options.revert,10)||500,function(){r._clear(t)})}else this._clear(t,n);return!1},cancel:function(){if(this.dragging){this._mouseUp({target:null}),this.options.helper=="original"?this.currentItem.css(this._storedCSS).removeClass("ui-sortable-helper"):this.currentItem.show();for(var t=this.containers.length-1;t>=0;t--)this.containers[t]._trigger("deactivate",null,this._uiHash(this)),this.containers[t].containerCache.over&&(this.containers[t]._trigger("out",null,this._uiHash(this)),this.containers[t].containerCache.over=0)}return this.placeholder&&(this.placeholder[0].parentNode&&this.placeholder[0].parentNode.removeChild(this.placeholder[0]),this.options.helper!="original"&&this.helper&&this.helper[0].parentNode&&this.helper.remove(),e.extend(this,{helper:null,dragging:!1,reverting:!1,_noFinalSort:null}),this.domPosition.prev?e(this.domPosition.prev).after(this.currentItem):e(this.domPosition.parent).prepend(this.currentItem)),this},serialize:function(t){var n=this._getItemsAsjQuery(t&&t.connected),r=[];return t=t||{},e(n).each(function(){var n=(e(t.item||this).attr(t.attribute||"id")||"").match(t.expression||/(.+)[-=_](.+)/);n&&r.push((t.key||n[1]+"[]")+"="+(t.key&&t.expression?n[1]:n[2]))}),!r.length&&t.key&&r.push(t.key+"="),r.join("&")},toArray:function(t){var n=this._getItemsAsjQuery(t&&t.connected),r=[];return t=t||{},n.each(function(){r.push(e(t.item||this).attr(t.attribute||"id")||"")}),r},_intersectsWith:function(e){var t=this.positionAbs.left,n=t+this.helperProportions.width,r=this.positionAbs.top,i=r+this.helperProportions.height,s=e.left,o=s+e.width,u=e.top,a=u+e.height,f=this.offset.click.top,l=this.offset.click.left,c=r+f>u&&r+fs&&t+le[this.floating?"width":"height"]?c:s0?"down":"up")},_getDragHorizontalDirection:function(){var e=this.positionAbs.left-this.lastPositionAbs.left;return e!=0&&(e>0?"right":"left")},refresh:function(e){return this._refreshItems(e),this.refreshPositions(),this},_connectWith:function(){var e=this.options;return e.connectWith.constructor==String?[e.connectWith]:e.connectWith},_getItemsAsjQuery:function(t){var n=[],r=[],i=this._connectWith();if(i&&t)for(var s=i.length-1;s>=0;s--){var o=e(i[s]);for(var u=o.length-1;u>=0;u--){var a=e.data(o[u],this.widgetName);a&&a!=this&&!a.options.disabled&&r.push([e.isFunction(a.options.items)?a.options.items.call(a.element):e(a.options.items,a.element).not(".ui-sortable-helper").not(".ui-sortable-placeholder"),a])}}r.push([e.isFunction(this.options.items)?this.options.items.call(this.element,null,{options:this.options,item:this.currentItem}):e(this.options.items,this.element).not(".ui-sortable-helper").not(".ui-sortable-placeholder"),this]);for(var s=r.length-1;s>=0;s--)r[s][0].each(function(){n.push(this)});return e(n)},_removeCurrentsFromItems:function(){var t=this.currentItem.find(":data("+this.widgetName+"-item)");this.items=e.grep(this.items,function(e){for(var n=0;n=0;s--){var o=e(i[s]);for(var u=o.length-1;u>=0;u--){var a=e.data(o[u],this.widgetName);a&&a!=this&&!a.options.disabled&&(r.push([e.isFunction(a.options.items)?a.options.items.call(a.element[0],t,{item:this.currentItem}):e(a.options.items,a.element),a]),this.containers.push(a))}}for(var s=r.length-1;s>=0;s--){var f=r[s][1],l=r[s][0];for(var u=0,c=l.length;u=0;n--){var r=this.items[n];if(r.instance!=this.currentContainer&&this.currentContainer&&r.item[0]!=this.currentItem[0])continue;var i=this.options.toleranceElement?e(this.options.toleranceElement,r.item):r.item;t||(r.width=i.outerWidth(),r.height=i.outerHeight());var s=i.offset();r.left=s.left,r.top=s.top}if(this.options.custom&&this.options.custom.refreshContainers)this.options.custom.refreshContainers.call(this);else for(var n=this.containers.length-1;n>=0;n--){var s=this.containers[n].element.offset();this.containers[n].containerCache.left=s.left,this.containers[n].containerCache.top=s.top,this.containers[n].containerCache.width=this.containers[n].element.outerWidth(),this.containers[n].containerCache.height=this.containers[n].element.outerHeight()}return this},_createPlaceholder:function(t){t=t||this;var n=t.options;if(!n.placeholder||n.placeholder.constructor==String){var r=n.placeholder;n.placeholder={element:function(){var n=e(document.createElement(t.currentItem[0].nodeName)).addClass(r||t.currentItem[0].className+" ui-sortable-placeholder").removeClass("ui-sortable-helper")[0];return r||(n.style.visibility="hidden"),n},update:function(e,i){if(r&&!n.forcePlaceholderSize)return;i.height()||i.height(t.currentItem.innerHeight()-parseInt(t.currentItem.css("paddingTop")||0,10)-parseInt(t.currentItem.css("paddingBottom")||0,10)),i.width()||i.width(t.currentItem.innerWidth()-parseInt(t.currentItem.css("paddingLeft")||0,10)-parseInt(t.currentItem.css("paddingRight")||0,10))}}}t.placeholder=e(n.placeholder.element.call(t.element,t.currentItem)),t.currentItem.after(t.placeholder),n.placeholder.update(t,t.placeholder)},_contactContainers:function(t){var n=null,r=null;for(var i=this.containers.length-1;i>=0;i--){if(e.contains(this.currentItem[0],this.containers[i].element[0]))continue;if(this._intersectsWith(this.containers[i].containerCache)){if(n&&e.contains(this.containers[i].element[0],n.element[0]))continue;n=this.containers[i],r=i}else this.containers[i].containerCache.over&&(this.containers[i]._trigger("out",t,this._uiHash(this)),this.containers[i].containerCache.over=0)}if(!n)return;if(this.containers.length===1)this.containers[r]._trigger("over",t,this._uiHash(this)),this.containers[r].containerCache.over=1;else{var s=1e4,o=null,u=this.containers[r].floating?"left":"top",a=this.containers[r].floating?"width":"height",f=this.positionAbs[u]+this.offset.click[u];for(var l=this.items.length-1;l>=0;l--){if(!e.contains(this.containers[r].element[0],this.items[l].item[0]))continue;if(this.items[l].item[0]==this.currentItem[0])continue;var c=this.items[l].item.offset()[u],h=!1;Math.abs(c-f)>Math.abs(c+this.items[l][a]-f)&&(h=!0,c+=this.items[l][a]),Math.abs(c-f)this.containment[2]&&(s=this.containment[2]+this.offset.click.left),t.pageY-this.offset.click.top>this.containment[3]&&(o=this.containment[3]+this.offset.click.top));if(n.grid){var u=this.originalPageY+Math.round((o-this.originalPageY)/n.grid[1])*n.grid[1];o=this.containment?u-this.offset.click.topthis.containment[3]?u-this.offset.click.topthis.containment[2]?a-this.offset.click.left=0;i--)n||r.push(function(e){return function(t){e._trigger("deactivate",t,this._uiHash(this))}}.call(this,this.containers[i])),this.containers[i].containerCache.over&&(r.push(function(e){return function(t){e._trigger("out",t,this._uiHash(this))}}.call(this,this.containers[i])),this.containers[i].containerCache.over=0);this._storedCursor&&e("body").css("cursor",this._storedCursor),this._storedOpacity&&this.helper.css("opacity",this._storedOpacity),this._storedZIndex&&this.helper.css("zIndex",this._storedZIndex=="auto"?"":this._storedZIndex),this.dragging=!1;if(this.cancelHelperRemoval){if(!n){this._trigger("beforeStop",t,this._uiHash());for(var i=0;i",widgetEventPrefix:"spin",options:{culture:null,icons:{down:"ui-icon-triangle-1-s",up:"ui-icon-triangle-1-n"},incremental:!0,max:null,min:null,numberFormat:null,page:10,step:1,change:null,spin:null,start:null,stop:null},_create:function(){this._setOption("max",this.options.max),this._setOption("min",this.options.min),this._setOption("step",this.options.step),this._value(this.element.val(),!0),this._draw(),this._on(this._events),this._refresh(),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_getCreateOptions:function(){var t={},n=this.element;return e.each(["min","max","step"],function(e,r){var i=n.attr(r);i!==undefined&&i.length&&(t[r]=i)}),t},_events:{keydown:function(e){this._start(e)&&this._keydown(e)&&e.preventDefault()},keyup:"_stop",focus:function(){this.previous=this.element.val()},blur:function(e){if(this.cancelBlur){delete this.cancelBlur;return}this._refresh(),this.previous!==this.element.val()&&this._trigger("change",e)},mousewheel:function(e,t){if(!t)return;if(!this.spinning&&!this._start(e))return!1;this._spin((t>0?1:-1)*this.options.step,e),clearTimeout(this.mousewheelTimer),this.mousewheelTimer=this._delay(function(){this.spinning&&this._stop(e)},100),e.preventDefault()},"mousedown .ui-spinner-button":function(t){function r(){var e=this.element[0]===this.document[0].activeElement;e||(this.element.focus(),this.previous=n,this._delay(function(){this.previous=n}))}var n;n=this.element[0]===this.document[0].activeElement?this.previous:this.element.val(),t.preventDefault(),r.call(this),this.cancelBlur=!0,this._delay(function(){delete this.cancelBlur,r.call(this)});if(this._start(t)===!1)return;this._repeat(null,e(t.currentTarget).hasClass("ui-spinner-up")?1:-1,t)},"mouseup .ui-spinner-button":"_stop","mouseenter .ui-spinner-button":function(t){if(!e(t.currentTarget).hasClass("ui-state-active"))return;if(this._start(t)===!1)return!1;this._repeat(null,e(t.currentTarget).hasClass("ui-spinner-up")?1:-1,t)},"mouseleave .ui-spinner-button":"_stop"},_draw:function(){var e=this.uiSpinner=this.element.addClass("ui-spinner-input").attr("autocomplete","off").wrap(this._uiSpinnerHtml()).parent().append(this._buttonHtml());this.element.attr("role","spinbutton"),this.buttons=e.find(".ui-spinner-button").attr("tabIndex",-1).button().removeClass("ui-corner-all"),this.buttons.height()>Math.ceil(e.height()*.5)&&e.height()>0&&e.height(e.height()),this.options.disabled&&this.disable()},_keydown:function(t){var n=this.options,r=e.ui.keyCode;switch(t.keyCode){case r.UP:return this._repeat(null,1,t),!0;case r.DOWN:return this._repeat(null,-1,t),!0;case r.PAGE_UP:return this._repeat(null,n.page,t),!0;case r.PAGE_DOWN:return this._repeat(null,-n.page,t),!0}return!1},_uiSpinnerHtml:function(){return""},_buttonHtml:function(){return""+""+""+""+""},_start:function(e){return!this.spinning&&this._trigger("start",e)===!1?!1:(this.counter||(this.counter=1),this.spinning=!0,!0)},_repeat:function(e,t,n){e=e||500,clearTimeout(this.timer),this.timer=this._delay(function(){this._repeat(40,t,n)},e),this._spin(t*this.options.step,n)},_spin:function(e,t){var n=this.value()||0;this.counter||(this.counter=1),n=this._adjustValue(n+e*this._increment(this.counter));if(!this.spinning||this._trigger("spin",t,{value:n})!==!1)this._value(n),this.counter++},_increment:function(t){var n=this.options.incremental;return n?e.isFunction(n)?n(t):Math.floor(t*t*t/5e4-t*t/500+17*t/200+1):1},_precision:function(){var e=this._precisionOf(this.options.step);return this.options.min!==null&&(e=Math.max(e,this._precisionOf(this.options.min))),e},_precisionOf:function(e){var t=e.toString(),n=t.indexOf(".");return n===-1?0:t.length-n-1},_adjustValue:function(e){var t,n,r=this.options;return t=r.min!==null?r.min:0,n=e-t,n=Math.round(n/r.step)*r.step,e=t+n,e=parseFloat(e.toFixed(this._precision())),r.max!==null&&e>r.max?r.max:r.min!==null&&e1&&e.href.replace(r,"")===location.href.replace(r,"")}var n=0,r=/#.*$/;e.widget("ui.tabs",{version:"1.9.1",delay:300,options:{active:null,collapsible:!1,event:"click",heightStyle:"content",hide:null,show:null,activate:null,beforeActivate:null,beforeLoad:null,load:null},_create:function(){var t=this,n=this.options,r=n.active,i=location.hash.substring(1);this.running=!1,this.element.addClass("ui-tabs ui-widget ui-widget-content ui-corner-all").toggleClass("ui-tabs-collapsible",n.collapsible).delegate(".ui-tabs-nav > li","mousedown"+this.eventNamespace,function(t){e(this).is(".ui-state-disabled")&&t.preventDefault()}).delegate(".ui-tabs-anchor","focus"+this.eventNamespace,function(){e(this).closest("li").is(".ui-state-disabled")&&this.blur()}),this._processTabs();if(r===null){i&&this.tabs.each(function(t,n){if(e(n).attr("aria-controls")===i)return r=t,!1}),r===null&&(r=this.tabs.index(this.tabs.filter(".ui-tabs-active")));if(r===null||r===-1)r=this.tabs.length?0:!1}r!==!1&&(r=this.tabs.index(this.tabs.eq(r)),r===-1&&(r=n.collapsible?!1:0)),n.active=r,!n.collapsible&&n.active===!1&&this.anchors.length&&(n.active=0),e.isArray(n.disabled)&&(n.disabled=e.unique(n.disabled.concat(e.map(this.tabs.filter(".ui-state-disabled"),function(e){return t.tabs.index(e)}))).sort()),this.options.active!==!1&&this.anchors.length?this.active=this._findActive(this.options.active):this.active=e(),this._refresh(),this.active.length&&this.load(n.active)},_getCreateEventData:function(){return{tab:this.active,panel:this.active.length?this._getPanelForTab(this.active):e()}},_tabKeydown:function(t){var n=e(this.document[0].activeElement).closest("li"),r=this.tabs.index(n),i=!0;if(this._handlePageNav(t))return;switch(t.keyCode){case e.ui.keyCode.RIGHT:case e.ui.keyCode.DOWN:r++;break;case e.ui.keyCode.UP:case e.ui.keyCode.LEFT:i=!1,r--;break;case e.ui.keyCode.END:r=this.anchors.length-1;break;case e.ui.keyCode.HOME:r=0;break;case e.ui.keyCode.SPACE:t.preventDefault(),clearTimeout(this.activating),this._activate(r);return;case e.ui.keyCode.ENTER:t.preventDefault(),clearTimeout(this.activating),this._activate(r===this.options.active?!1:r);return;default:return}t.preventDefault(),clearTimeout(this.activating),r=this._focusNextTab(r,i),t.ctrlKey||(n.attr("aria-selected","false"),this.tabs.eq(r).attr("aria-selected","true"),this.activating=this._delay(function(){this.option("active",r)},this.delay))},_panelKeydown:function(t){if(this._handlePageNav(t))return;t.ctrlKey&&t.keyCode===e.ui.keyCode.UP&&(t.preventDefault(),this.active.focus())},_handlePageNav:function(t){if(t.altKey&&t.keyCode===e.ui.keyCode.PAGE_UP)return this._activate(this._focusNextTab(this.options.active-1,!1)),!0;if(t.altKey&&t.keyCode===e.ui.keyCode.PAGE_DOWN)return this._activate(this._focusNextTab(this.options.active+1,!0)),!0},_findNextTab:function(t,n){function i(){return t>r&&(t=0),t<0&&(t=r),t}var r=this.tabs.length-1;while(e.inArray(i(),this.options.disabled)!==-1)t=n?t+1:t-1;return t},_focusNextTab:function(e,t){return e=this._findNextTab(e,t),this.tabs.eq(e).focus(),e},_setOption:function(e,t){if(e==="active"){this._activate(t);return}if(e==="disabled"){this._setupDisabled(t);return}this._super(e,t),e==="collapsible"&&(this.element.toggleClass("ui-tabs-collapsible",t),!t&&this.options.active===!1&&this._activate(0)),e==="event"&&this._setupEvents(t),e==="heightStyle"&&this._setupHeightStyle(t)},_tabId:function(e){return e.attr("aria-controls")||"ui-tabs-"+i()},_sanitizeSelector:function(e){return e?e.replace(/[!"$%&'()*+,.\/:;<=>?@\[\]\^`{|}~]/g,"\\$&"):""},refresh:function(){var t=this.options,n=this.tablist.children(":has(a[href])");t.disabled=e.map(n.filter(".ui-state-disabled"),function(e){return n.index(e)}),this._processTabs(),t.active===!1||!this.anchors.length?(t.active=!1,this.active=e()):this.active.length&&!e.contains(this.tablist[0],this.active[0])?this.tabs.length===t.disabled.length?(t.active=!1,this.active=e()):this._activate(this._findNextTab(Math.max(0,t.active-1),!1)):t.active=this.tabs.index(this.active),this._refresh()},_refresh:function(){this._setupDisabled(this.options.disabled),this._setupEvents(this.options.event),this._setupHeightStyle(this.options.heightStyle),this.tabs.not(this.active).attr({"aria-selected":"false",tabIndex:-1}),this.panels.not(this._getPanelForTab(this.active)).hide().attr({"aria-expanded":"false","aria-hidden":"true"}),this.active.length?(this.active.addClass("ui-tabs-active ui-state-active").attr({"aria-selected":"true",tabIndex:0}),this._getPanelForTab(this.active).show().attr({"aria-expanded":"true","aria-hidden":"false"})):this.tabs.eq(0).attr("tabIndex",0)},_processTabs:function(){var t=this;this.tablist=this._getList().addClass("ui-tabs-nav ui-helper-reset ui-helper-clearfix ui-widget-header ui-corner-all").attr("role","tablist"),this.tabs=this.tablist.find("> li:has(a[href])").addClass("ui-state-default ui-corner-top").attr({role:"tab",tabIndex:-1}),this.anchors=this.tabs.map(function(){return e("a",this)[0]}).addClass("ui-tabs-anchor").attr({role:"presentation",tabIndex:-1}),this.panels=e(),this.anchors.each(function(n,r){var i,o,u,a=e(r).uniqueId().attr("id"),f=e(r).closest("li"),l=f.attr("aria-controls");s(r)?(i=r.hash,o=t.element.find(t._sanitizeSelector(i))):(u=t._tabId(f),i="#"+u,o=t.element.find(i),o.length||(o=t._createPanel(u),o.insertAfter(t.panels[n-1]||t.tablist)),o.attr("aria-live","polite")),o.length&&(t.panels=t.panels.add(o)),l&&f.data("ui-tabs-aria-controls",l),f.attr({"aria-controls":i.substring(1),"aria-labelledby":a}),o.attr("aria-labelledby",a)}),this.panels.addClass("ui-tabs-panel ui-widget-content ui-corner-bottom").attr("role","tabpanel")},_getList:function(){return this.element.find("ol,ul").eq(0)},_createPanel:function(t){return e("
    ").attr("id",t).addClass("ui-tabs-panel ui-widget-content ui-corner-bottom").data("ui-tabs-destroy",!0)},_setupDisabled:function(t){e.isArray(t)&&(t.length?t.length===this.anchors.length&&(t=!0):t=!1);for(var n=0,r;r=this.tabs[n];n++)t===!0||e.inArray(n,t)!==-1?e(r).addClass("ui-state-disabled").attr("aria-disabled","true"):e(r).removeClass("ui-state-disabled").removeAttr("aria-disabled");this.options.disabled=t},_setupEvents:function(t){var n={click:function(e){e.preventDefault()}};t&&e.each(t.split(" "),function(e,t){n[t]="_eventHandler"}),this._off(this.anchors.add(this.tabs).add(this.panels)),this._on(this.anchors,n),this._on(this.tabs,{keydown:"_tabKeydown"}),this._on(this.panels,{keydown:"_panelKeydown"}),this._focusable(this.tabs),this._hoverable(this.tabs)},_setupHeightStyle:function(t){var n,r,i=this.element.parent();t==="fill"?(e.support.minHeight||(r=i.css("overflow"),i.css("overflow","hidden")),n=i.height(),this.element.siblings(":visible").each(function(){var t=e(this),r=t.css("position");if(r==="absolute"||r==="fixed")return;n-=t.outerHeight(!0)}),r&&i.css("overflow",r),this.element.children().not(this.panels).each(function(){n-=e(this).outerHeight(!0)}),this.panels.each(function(){e(this).height(Math.max(0,n-e(this).innerHeight()+e(this).height()))}).css("overflow","auto")):t==="auto"&&(n=0,this.panels.each(function(){n=Math.max(n,e(this).height("").height())}).height(n))},_eventHandler:function(t){var n=this.options,r=this.active,i=e(t.currentTarget),s=i.closest("li"),o=s[0]===r[0],u=o&&n.collapsible,a=u?e():this._getPanelForTab(s),f=r.length?this._getPanelForTab(r):e(),l={oldTab:r,oldPanel:f,newTab:u?e():s,newPanel:a};t.preventDefault();if(s.hasClass("ui-state-disabled")||s.hasClass("ui-tabs-loading")||this.running||o&&!n.collapsible||this._trigger("beforeActivate",t,l)===!1)return;n.active=u?!1:this.tabs.index(s),this.active=o?e():s,this.xhr&&this.xhr.abort(),!f.length&&!a.length&&e.error("jQuery UI Tabs: Mismatching fragment identifier."),a.length&&this.load(this.tabs.index(s),t),this._toggle(t,l)},_toggle:function(t,n){function o(){r.running=!1,r._trigger("activate",t,n)}function u(){n.newTab.closest("li").addClass("ui-tabs-active ui-state-active"),i.length&&r.options.show?r._show(i,r.options.show,o):(i.show(),o())}var r=this,i=n.newPanel,s=n.oldPanel;this.running=!0,s.length&&this.options.hide?this._hide(s,this.options.hide,function(){n.oldTab.closest("li").removeClass("ui-tabs-active ui-state-active"),u()}):(n.oldTab.closest("li").removeClass("ui-tabs-active ui-state-active"),s.hide(),u()),s.attr({"aria-expanded":"false","aria-hidden":"true"}),n.oldTab.attr("aria-selected","false"),i.length&&s.length?n.oldTab.attr("tabIndex",-1):i.length&&this.tabs.filter(function(){return e(this).attr("tabIndex")===0}).attr("tabIndex",-1),i.attr({"aria-expanded":"true","aria-hidden":"false"}),n.newTab.attr({"aria-selected":"true",tabIndex:0})},_activate:function(t){var n,r=this._findActive(t);if(r[0]===this.active[0])return;r.length||(r=this.active),n=r.find(".ui-tabs-anchor")[0],this._eventHandler({target:n,currentTarget:n,preventDefault:e.noop})},_findActive:function(t){return t===!1?e():this.tabs.eq(t)},_getIndex:function(e){return typeof e=="string"&&(e=this.anchors.index(this.anchors.filter("[href$='"+e+"']"))),e},_destroy:function(){this.xhr&&this.xhr.abort(),this.element.removeClass("ui-tabs ui-widget ui-widget-content ui-corner-all ui-tabs-collapsible"),this.tablist.removeClass("ui-tabs-nav ui-helper-reset ui-helper-clearfix ui-widget-header ui-corner-all").removeAttr("role"),this.anchors.removeClass("ui-tabs-anchor").removeAttr("role").removeAttr("tabIndex").removeData("href.tabs").removeData("load.tabs").removeUniqueId(),this.tabs.add(this.panels).each(function(){e.data(this,"ui-tabs-destroy")?e(this).remove():e(this).removeClass("ui-state-default ui-state-active ui-state-disabled ui-corner-top ui-corner-bottom ui-widget-content ui-tabs-active ui-tabs-panel").removeAttr("tabIndex").removeAttr("aria-live").removeAttr("aria-busy").removeAttr("aria-selected").removeAttr("aria-labelledby").removeAttr("aria-hidden").removeAttr("aria-expanded").removeAttr("role")}),this.tabs.each(function(){var t=e(this),n=t.data("ui-tabs-aria-controls");n?t.attr("aria-controls",n):t.removeAttr("aria-controls")}),this.options.heightStyle!=="content"&&this.panels.css("height","")},enable:function(n){var r=this.options.disabled;if(r===!1)return;n===t?r=!1:(n=this._getIndex(n),e.isArray(r)?r=e.map(r,function(e){return e!==n?e:null}):r=e.map(this.tabs,function(e,t){return t!==n?t:null})),this._setupDisabled(r)},disable:function(n){var r=this.options.disabled;if(r===!0)return;if(n===t)r=!0;else{n=this._getIndex(n);if(e.inArray(n,r)!==-1)return;e.isArray(r)?r=e.merge([n],r).sort():r=[n]}this._setupDisabled(r)},load:function(t,n){t=this._getIndex(t);var r=this,i=this.tabs.eq(t),o=i.find(".ui-tabs-anchor"),u=this._getPanelForTab(i),a={tab:i,panel:u};if(s(o[0]))return;this.xhr=e.ajax(this._ajaxSettings(o,n,a)),this.xhr&&this.xhr.statusText!=="canceled"&&(i.addClass("ui-tabs-loading"),u.attr("aria-busy","true"),this.xhr.success(function(e){setTimeout(function(){u.html(e),r._trigger("load",n,a)},1)}).complete(function(e,t){setTimeout(function(){t==="abort"&&r.panels.stop(!1,!0),i.removeClass("ui-tabs-loading"),u.removeAttr("aria-busy"),e===r.xhr&&delete r.xhr},1)}))},_ajaxSettings:function(t,n,r){var i=this;return{url:t.attr("href"),beforeSend:function(t,s){return i._trigger("beforeLoad",n,e.extend({jqXHR:t,ajaxSettings:s},r))}}},_getPanelForTab:function(t){var n=e(t).attr("aria-controls");return this.element.find(this._sanitizeSelector("#"+n))}}),e.uiBackCompat!==!1&&(e.ui.tabs.prototype._ui=function(e,t){return{tab:e,panel:t,index:this.anchors.index(e)}},e.widget("ui.tabs",e.ui.tabs,{url:function(e,t){this.anchors.eq(e).attr("href",t)}}),e.widget("ui.tabs",e.ui.tabs,{options:{ajaxOptions:null,cache:!1},_create:function(){this._super();var t=this;this._on({tabsbeforeload:function(n,r){if(e.data(r.tab[0],"cache.tabs")){n.preventDefault();return}r.jqXHR.success(function(){t.options.cache&&e.data(r.tab[0],"cache.tabs",!0)})}})},_ajaxSettings:function(t,n,r){var i=this.options.ajaxOptions;return e.extend({},i,{error:function(e,t){try{i.error(e,t,r.tab.closest("li").index(),r.tab[0])}catch(n){}}},this._superApply(arguments))},_setOption:function(e,t){e==="cache"&&t===!1&&this.anchors.removeData("cache.tabs"),this._super(e,t)},_destroy:function(){this.anchors.removeData("cache.tabs"),this._super()},url:function(e){this.anchors.eq(e).removeData("cache.tabs"),this._superApply(arguments)}}),e.widget("ui.tabs",e.ui.tabs,{abort:function(){this.xhr&&this.xhr.abort()}}),e.widget("ui.tabs",e.ui.tabs,{options:{spinner:"Loading…"},_create:function(){this._super(),this._on({tabsbeforeload:function(e,t){if(e.target!==this.element[0]||!this.options.spinner)return;var n=t.tab.find("span"),r=n.html();n.html(this.options.spinner),t.jqXHR.complete(function(){n.html(r)})}})}}),e.widget("ui.tabs",e.ui.tabs,{options:{enable:null,disable:null},enable:function(t){var n=this.options,r;if(t&&n.disabled===!0||e.isArray(n.disabled)&&e.inArray(t,n.disabled)!==-1)r=!0;this._superApply(arguments),r&&this._trigger("enable",null,this._ui(this.anchors[t],this.panels[t]))},disable:function(t){var n=this.options,r;if(t&&n.disabled===!1||e.isArray(n.disabled)&&e.inArray(t,n.disabled)===-1)r=!0;this._superApply(arguments),r&&this._trigger("disable",null,this._ui(this.anchors[t],this.panels[t]))}}),e.widget("ui.tabs",e.ui.tabs,{options:{add:null,remove:null,tabTemplate:"
  • #{label}
  • "},add:function(n,r,i){i===t&&(i=this.anchors.length);var s,o,u=this.options,a=e(u.tabTemplate.replace(/#\{href\}/g,n).replace(/#\{label\}/g,r)),f=n.indexOf("#")?this._tabId(a):n.replace("#","");return a.addClass("ui-state-default ui-corner-top").data("ui-tabs-destroy",!0),a.attr("aria-controls",f),s=i>=this.tabs.length,o=this.element.find("#"+f),o.length||(o=this._createPanel(f),s?i>0?o.insertAfter(this.panels.eq(-1)):o.appendTo(this.element):o.insertBefore(this.panels[i])),o.addClass("ui-tabs-panel ui-widget-content ui-corner-bottom").hide(),s?a.appendTo(this.tablist):a.insertBefore(this.tabs[i]),u.disabled=e.map(u.disabled,function(e){return e>=i?++e:e}),this.refresh(),this.tabs.length===1&&u.active===!1&&this.option("active",0),this._trigger("add",null,this._ui(this.anchors[i],this.panels[i])),this},remove:function(t){t=this._getIndex(t);var n=this.options,r=this.tabs.eq(t).remove(),i=this._getPanelForTab(r).remove();return r.hasClass("ui-tabs-active")&&this.anchors.length>2&&this._activate(t+(t+1=t?--e:e}),this.refresh(),this._trigger("remove",null,this._ui(r.find("a")[0],i[0])),this}}),e.widget("ui.tabs",e.ui.tabs,{length:function(){return this.anchors.length}}),e.widget("ui.tabs",e.ui.tabs,{options:{idPrefix:"ui-tabs-"},_tabId:function(t){var n=t.is("li")?t.find("a[href]"):t;return n=n[0],e(n).closest("li").attr("aria-controls")||n.title&&n.title.replace(/\s/g,"_").replace(/[^\w\u00c0-\uFFFF\-]/g,"")||this.options.idPrefix+i()}}),e.widget("ui.tabs",e.ui.tabs,{options:{panelTemplate:"
    "},_createPanel:function(t){return e(this.options.panelTemplate).attr("id",t).addClass("ui-tabs-panel ui-widget-content ui-corner-bottom").data("ui-tabs-destroy",!0)}}),e.widget("ui.tabs",e.ui.tabs,{_create:function(){var e=this.options;e.active===null&&e.selected!==t&&(e.active=e.selected===-1?!1:e.selected),this._super(),e.selected=e.active,e.selected===!1&&(e.selected=-1)},_setOption:function(e,t){if(e!=="selected")return this._super(e,t);var n=this.options;this._super("active",t===-1?!1:t),n.selected=n.active,n.selected===!1&&(n.selected=-1)},_eventHandler:function(){this._superApply(arguments),this.options.selected=this.options.active,this.options.selected===!1&&(this.options.selected=-1)}}),e.widget("ui.tabs",e.ui.tabs,{options:{show:null,select:null},_create:function(){this._super(),this.options.active!==!1&&this._trigger("show",null,this._ui(this.active.find(".ui-tabs-anchor")[0],this._getPanelForTab(this.active)[0]))},_trigger:function(e,t,n){var r=this._superApply(arguments);return r?(e==="beforeActivate"&&n.newTab.length?r=this._super("select",t,{tab:n.newTab.find(".ui-tabs-anchor")[0],panel:n.newPanel[0],index:n.newTab.closest("li").index()}):e==="activate"&&n.newTab.length&&(r=this._super("show",t,{tab:n.newTab.find(".ui-tabs-anchor")[0],panel:n.newPanel[0],index:n.newTab.closest("li").index()})),r):!1}}),e.widget("ui.tabs",e.ui.tabs,{select:function(e){e=this._getIndex(e);if(e===-1){if(!this.options.collapsible||this.options.selected===-1)return;e=this.options.selected}this.anchors.eq(e).trigger(this.options.event+this.eventNamespace)}}),function(){var t=0;e.widget("ui.tabs",e.ui.tabs,{options:{cookie:null},_create:function(){var e=this.options,t;e.active==null&&e.cookie&&(t=parseInt(this._cookie(),10),t===-1&&(t=!1),e.active=t),this._super()},_cookie:function(n){var r=[this.cookie||(this.cookie=this.options.cookie.name||"ui-tabs-"+ ++t)];return arguments.length&&(r.push(n===!1?-1:n),r.push(this.options.cookie)),e.cookie.apply(null,r)},_refresh:function(){this._super(),this.options.cookie&&this._cookie(this.options.active,this.options.cookie)},_eventHandler:function(){this._superApply(arguments),this.options.cookie&&this._cookie(this.options.active,this.options.cookie)},_destroy:function(){this._super(),this.options.cookie&&this._cookie(null,this.options.cookie)}})}(),e.widget("ui.tabs",e.ui.tabs,{_trigger:function(t,n,r){var i=e.extend({},r);return t==="load"&&(i.panel=i.panel[0],i.tab=i.tab.find(".ui-tabs-anchor")[0]),this._super(t,n,i)}}),e.widget("ui.tabs",e.ui.tabs,{options:{fx:null},_getFx:function(){var t,n,r=this.options.fx;return r&&(e.isArray(r)?(t=r[0],n=r[1]):t=n=r),r?{show:n,hide:t}:null},_toggle:function(e,t){function o(){n.running=!1,n._trigger("activate",e,t)}function u(){t.newTab.closest("li").addClass("ui-tabs-active ui-state-active"),r.length&&s.show?r.animate(s.show,s.show.duration,function(){o()}):(r.show(),o())}var n=this,r=t.newPanel,i=t.oldPanel,s=this._getFx();if(!s)return this._super(e,t);n.running=!0,i.length&&s.hide?i.animate(s.hide,s.hide.duration,function(){t.oldTab.closest("li").removeClass("ui-tabs-active ui-state-active"),u()}):(t.oldTab.closest("li").removeClass("ui-tabs-active ui-state-active"),i.hide(),u())}}))})(jQuery);(function(e){function n(t,n){var r=(t.attr("aria-describedby")||"").split(/\s+/);r.push(n),t.data("ui-tooltip-id",n).attr("aria-describedby",e.trim(r.join(" ")))}function r(t){var n=t.data("ui-tooltip-id"),r=(t.attr("aria-describedby")||"").split(/\s+/),i=e.inArray(n,r);i!==-1&&r.splice(i,1),t.removeData("ui-tooltip-id"),r=e.trim(r.join(" ")),r?t.attr("aria-describedby",r):t.removeAttr("aria-describedby")}var t=0;e.widget("ui.tooltip",{version:"1.9.1",options:{content:function(){return e(this).attr("title")},hide:!0,items:"[title]:not([disabled])",position:{my:"left top+15",at:"left bottom",collision:"flipfit flipfit"},show:!0,tooltipClass:null,track:!1,close:null,open:null},_create:function(){this._on({mouseover:"open",focusin:"open"}),this.tooltips={},this.parents={},this.options.disabled&&this._disable()},_setOption:function(t,n){var r=this;if(t==="disabled"){this[n?"_disable":"_enable"](),this.options[t]=n;return}this._super(t,n),t==="content"&&e.each(this.tooltips,function(e,t){r._updateContent(t)})},_disable:function(){var t=this;e.each(this.tooltips,function(n,r){var i=e.Event("blur");i.target=i.currentTarget=r[0],t.close(i,!0)}),this.element.find(this.options.items).andSelf().each(function(){var t=e(this);t.is("[title]")&&t.data("ui-tooltip-title",t.attr("title")).attr("title","")})},_enable:function(){this.element.find(this.options.items).andSelf().each(function(){var t=e(this);t.data("ui-tooltip-title")&&t.attr("title",t.data("ui-tooltip-title"))})},open:function(t){var n=this,r=e(t?t.target:this.element).closest(this.options.items);if(!r.length)return;if(this.options.track&&r.data("ui-tooltip-id")){this._find(r).position(e.extend({of:r},this.options.position)),this._off(this.document,"mousemove");return}r.attr("title")&&r.data("ui-tooltip-title",r.attr("title")),r.data("tooltip-open",!0),t&&t.type==="mouseover"&&r.parents().each(function(){var t;e(this).data("tooltip-open")&&(t=e.Event("blur"),t.target=t.currentTarget=this,n.close(t,!0)),this.title&&(e(this).uniqueId(),n.parents[this.id]={element:this,title:this.title},this.title="")}),this._updateContent(r,t)},_updateContent:function(e,t){var n,r=this.options.content,i=this;if(typeof r=="string")return this._open(t,e,r);n=r.call(e[0],function(n){if(!e.data("tooltip-open"))return;i._delay(function(){this._open(t,e,n)})}),n&&this._open(t,e,n)},_open:function(t,r,i){function f(e){a.of=e;if(s.is(":hidden"))return;s.position(a)}var s,o,u,a=e.extend({},this.options.position);if(!i)return;s=this._find(r);if(s.length){s.find(".ui-tooltip-content").html(i);return}r.is("[title]")&&(t&&t.type==="mouseover"?r.attr("title",""):r.removeAttr("title")),s=this._tooltip(r),n(r,s.attr("id")),s.find(".ui-tooltip-content").html(i),this.options.track&&t&&/^mouse/.test(t.originalEvent.type)?(this._on(this.document,{mousemove:f}),f(t)):s.position(e.extend({of:r},this.options.position)),s.hide(),this._show(s,this.options.show),this.options.show&&this.options.show.delay&&(u=setInterval(function(){s.is(":visible")&&(f(a.of),clearInterval(u))},e.fx.interval)),this._trigger("open",t,{tooltip:s}),o={keyup:function(t){if(t.keyCode===e.ui.keyCode.ESCAPE){var n=e.Event(t);n.currentTarget=r[0],this.close(n,!0)}},remove:function(){this._removeTooltip(s)}};if(!t||t.type==="mouseover")o.mouseleave="close";if(!t||t.type==="focusin")o.focusout="close";this._on(r,o)},close:function(t){var n=this,i=e(t?t.currentTarget:this.element),s=this._find(i);if(this.closing)return;i.data("ui-tooltip-title")&&i.attr("title",i.data("ui-tooltip-title")),r(i),s.stop(!0),this._hide(s,this.options.hide,function(){n._removeTooltip(e(this))}),i.removeData("tooltip-open"),this._off(i,"mouseleave focusout keyup"),i[0]!==this.element[0]&&this._off(i,"remove"),this._off(this.document,"mousemove"),t&&t.type==="mouseleave"&&e.each(this.parents,function(e,t){t.element.title=t.title,delete n.parents[e]}),this.closing=!0,this._trigger("close",t,{tooltip:s}),this.closing=!1},_tooltip:function(n){var r="ui-tooltip-"+t++,i=e("
    ").attr({id:r,role:"tooltip"}).addClass("ui-tooltip ui-widget ui-corner-all ui-widget-content "+(this.options.tooltipClass||""));return e("
    ").addClass("ui-tooltip-content").appendTo(i),i.appendTo(this.document[0].body),e.fn.bgiframe&&i.bgiframe(),this.tooltips[r]=n,i},_find:function(t){var n=t.data("ui-tooltip-id");return n?e("#"+n):e()},_removeTooltip:function(e){e.remove(),delete this.tooltips[e.attr("id")]},_destroy:function(){var t=this;e.each(this.tooltips,function(n,r){var i=e.Event("blur");i.target=i.currentTarget=r[0],t.close(i,!0),e("#"+n).remove(),r.data("ui-tooltip-title")&&(r.attr("title",r.data("ui-tooltip-title")),r.removeData("ui-tooltip-title"))})}})})(jQuery); \ No newline at end of file From 5d7449d2b8bcd0963d172fc30df784279671176f Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 14 Jun 2018 13:43:14 -0700 Subject: [PATCH 04/70] HDFS-13675. Speed up TestDFSAdminWithHA. Contributed by Lukas Majercak. --- .../org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java index c6139c13e4..b85a8d8b18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby; @@ -104,6 +105,10 @@ private void setUpHaCluster(boolean security) throws Exception { conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, 500); + conf.setInt(HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY, 2); + conf.setInt(HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY, 2); + conf.setInt(HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY, 0); + conf.setInt(HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY, 0); } @After From 020dd61988b1d47971e328174135d54baf5d41aa Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Thu, 14 Jun 2018 14:14:24 -0700 Subject: [PATCH 05/70] HDDS-146. Refactor the structure of the acceptance tests. Contributed by Elek, Marton. --- .../dev-support/bin/robot-all.sh | 2 +- .../dev-support/bin/robot-dnd-all.sh | 63 +++++ .../acceptance-test/dev-support/bin/robot.sh | 9 +- .../dev-support/docker/Dockerfile | 21 ++ .../dev-support/docker/docker-compose.yaml | 23 ++ hadoop-ozone/acceptance-test/pom.xml | 1 + .../test/{compose => acceptance/basic}/.env | 2 +- .../src/test/acceptance/basic/basic.robot | 50 ++++ .../basic}/docker-compose.yaml | 0 .../basic}/docker-config | 4 +- .../test/acceptance/basic/ozone-shell.robot | 85 ++++++ .../commonlib.robot} | 67 ++--- .../src/test/acceptance/ozonefs/.env | 17 ++ .../acceptance/ozone-shell.robot | 256 ------------------ start-build-env.sh | 8 +- 15 files changed, 296 insertions(+), 312 deletions(-) create mode 100755 hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh create mode 100644 hadoop-ozone/acceptance-test/dev-support/docker/Dockerfile create mode 100644 hadoop-ozone/acceptance-test/dev-support/docker/docker-compose.yaml rename hadoop-ozone/acceptance-test/src/test/{compose => acceptance/basic}/.env (93%) create mode 100644 hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot rename hadoop-ozone/acceptance-test/src/test/{compose => acceptance/basic}/docker-compose.yaml (100%) rename hadoop-ozone/acceptance-test/src/test/{compose => acceptance/basic}/docker-config (93%) create mode 100644 hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot rename hadoop-ozone/acceptance-test/src/test/{robotframework/acceptance/ozone.robot => acceptance/commonlib.robot} (54%) create mode 100644 hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/.env delete mode 100644 hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh index 0e212a2834..ee9c6b80a5 100755 --- a/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh +++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh @@ -15,4 +15,4 @@ # limitations under the License. DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -$DIR/robot.sh $DIR/../../src/test/robotframework/acceptance \ No newline at end of file +$DIR/robot.sh $DIR/../../src/test/acceptance diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh b/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh new file mode 100755 index 0000000000..9f1d367141 --- /dev/null +++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -x + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +#Dir od the definition of the dind based test exeucution container +DOCKERDIR="$DIR/../docker" + +#Dir to save the results +TARGETDIR="$DIR/../../target/dnd" + +#Dir to mount the distribution from +OZONEDIST="$DIR/../../../../hadoop-dist/target/ozone" + +#Name and imagename of the temporary, dind based test containers +DOCKER_IMAGE_NAME=ozoneacceptance +DOCKER_INSTANCE_NAME="${DOCKER_INSTANCE_NAME:-ozoneacceptance}" + +teardown() { + docker stop "$DOCKER_INSTANCE_NAME" +} + +trap teardown EXIT + +#Make sure it will work even if the ozone is built by an other user. We +# eneable to run the distribution by an other user +mkdir -p "$TARGETDIR" +mkdir -p "$OZONEDIST/logs" +chmod o+w "$OZONEDIST/logs" || true +chmod -R o+w "$OZONEDIST/etc/hadoop" || true +chmod o+w "$OZONEDIST" || true + +rm "$TARGETDIR/docker-compose.log" +docker rm "$DOCKER_INSTANCE_NAME" || true +docker build -t "$DOCKER_IMAGE_NAME" $DIR/../docker + +#Starting the dind based environment +docker run --rm -v $DIR/../../../..:/opt/hadoop --privileged -d --name "$DOCKER_INSTANCE_NAME" $DOCKER_IMAGE_NAME +sleep 5 + +#Starting the tests +docker exec "$DOCKER_INSTANCE_NAME" /opt/hadoop/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh +RESULT=$? + +docker cp "$DOCKER_INSTANCE_NAME:/root/log.html" "$TARGETDIR/" +docker cp "$DOCKER_INSTANCE_NAME:/root/junit-results.xml" "$TARGETDIR/" +docker cp "$DOCKER_INSTANCE_NAME:/root/docker-compose.log" "$TARGETDIR/" +exit $RESULT diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh b/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh index b651f76d2f..ef2a111066 100755 --- a/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh +++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh @@ -14,10 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +set -x + DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -#basedir is the directory of the whole hadoop project. Used to calculate the -#exact path to the hadoop-dist project -BASEDIR=${DIR}/../../../.. if [ ! "$(which robot)" ] ; then echo "" @@ -29,10 +28,10 @@ if [ ! "$(which robot)" ] ; then exit -1 fi -OZONEDISTDIR="$BASEDIR/hadoop-dist/target/ozone" +OZONEDISTDIR="$DIR/../../../../hadoop-dist/target/ozone" if [ ! -d "$OZONEDISTDIR" ]; then echo "Ozone can't be found in the $OZONEDISTDIR." echo "You may need a full build with -Phdds and -Pdist profiles" exit -1 fi -robot -v basedir:$BASEDIR $@ +robot -x junit-results.xml "$@" diff --git a/hadoop-ozone/acceptance-test/dev-support/docker/Dockerfile b/hadoop-ozone/acceptance-test/dev-support/docker/Dockerfile new file mode 100644 index 0000000000..06feda6221 --- /dev/null +++ b/hadoop-ozone/acceptance-test/dev-support/docker/Dockerfile @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM docker:18-dind +RUN apk add --update python3 bash curl jq sudo +RUN pip3 install robotframework docker-compose +WORKDIR /root +USER root diff --git a/hadoop-ozone/acceptance-test/dev-support/docker/docker-compose.yaml b/hadoop-ozone/acceptance-test/dev-support/docker/docker-compose.yaml new file mode 100644 index 0000000000..6f16b0ac3b --- /dev/null +++ b/hadoop-ozone/acceptance-test/dev-support/docker/docker-compose.yaml @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3" +services: + robotenv: + build: . + privileged: true + volumes: + - ../../../..:/opt/hadoop diff --git a/hadoop-ozone/acceptance-test/pom.xml b/hadoop-ozone/acceptance-test/pom.xml index ef45c443e6..fee41f1d49 100644 --- a/hadoop-ozone/acceptance-test/pom.xml +++ b/hadoop-ozone/acceptance-test/pom.xml @@ -43,6 +43,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> run + src/test/acceptance basedir:${project.basedir}/../.. diff --git a/hadoop-ozone/acceptance-test/src/test/compose/.env b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/.env similarity index 93% rename from hadoop-ozone/acceptance-test/src/test/compose/.env rename to hadoop-ozone/acceptance-test/src/test/acceptance/basic/.env index cf22168909..98234cb112 100644 --- a/hadoop-ozone/acceptance-test/src/test/compose/.env +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/.env @@ -14,4 +14,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -OZONEDIR=../../../hadoop-dist/target/ozone +OZONEDIR=../../../../../../hadoop-dist/target/ozone diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot new file mode 100644 index 0000000000..c741588c19 --- /dev/null +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot @@ -0,0 +1,50 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Smoketest ozone cluster startup +Library OperatingSystem +Suite Setup Startup Ozone cluster with size 5 +Suite Teardown Teardown Ozone cluster +Resource ../commonlib.robot + +*** Variables *** +${COMMON_REST_HEADER} -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root" +${COMPOSEFILE} ${CURDIR}/docker-compose.yaml +${PROJECTDIR} ${CURDIR}/../../../../../.. + + +*** Test Cases *** + +Test rest interface + ${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1" + Should contain ${result} 201 Created + ${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1" + Should contain ${result} 201 Created + ${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1" + Should contain ${result} 200 OK + ${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1" + Should contain ${result} 200 OK + +Check webui static resources + ${result} = Execute on scm curl -s -I http://localhost:9876/static/bootstrap-3.3.7/js/bootstrap.min.js + Should contain ${result} 200 + ${result} = Execute on ksm curl -s -I http://localhost:9874/static/bootstrap-3.3.7/js/bootstrap.min.js + Should contain ${result} 200 + +Start freon testing + ${result} = Execute on ksm ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10 + Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125 + Should Not Contain ${result} ERROR diff --git a/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml similarity index 100% rename from hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml rename to hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml diff --git a/hadoop-ozone/acceptance-test/src/test/compose/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config similarity index 93% rename from hadoop-ozone/acceptance-test/src/test/compose/docker-config rename to hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config index 0591a7aac2..180dc8ef49 100644 --- a/hadoop-ozone/acceptance-test/src/test/compose/docker-config +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config @@ -25,12 +25,14 @@ OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed OZONE-SITE.XML_ozone.scm.client.address=scm OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService +OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +LOG4J.PROPERTIES_log4j.category.org.apache.hadoop.util.NativeCodeLoader=ERROR diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot new file mode 100644 index 0000000000..9521ad60be --- /dev/null +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot @@ -0,0 +1,85 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone shell CLI usage +Library OperatingSystem +Suite Setup Startup Ozone cluster with size 5 +Suite Teardown Teardown Ozone cluster +Resource ../commonlib.robot +Test Timeout 2 minute + +*** Variables *** +${basedir} +${COMPOSEFILE} ${CURDIR}/docker-compose.yaml +${PROJECTDIR} ${CURDIR}/../../../../../.. + +*** Test Cases *** +RestClient without http port + Test ozone shell http:// ksm restwoport True + +RestClient with http port + Test ozone shell http:// ksm:9874 restwport True + +RestClient without host name + Test ozone shell http:// ${EMPTY} restwohost True + +RpcClient with port + Test ozone shell o3:// ksm:9862 rpcwoport False + +RpcClient without host + Test ozone shell o3:// ${EMPTY} rpcwport False + +RpcClient without scheme + Test ozone shell ${EMPTY} ${EMPTY} rpcwoscheme False + + +*** Keywords *** +Test ozone shell + [arguments] ${protocol} ${server} ${volume} ${withkeytest} + ${result} = Execute on datanode ozone oz -createVolume ${protocol}${server}/${volume} -user bilbo -quota 100TB -root + Should not contain ${result} Failed + Should contain ${result} Creating Volume: ${volume} + ${result} = Execute on datanode ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")' + Should contain ${result} createdOn + Execute on datanode ozone oz -updateVolume ${protocol}${server}/${volume} -user bill -quota 10TB + ${result} = Execute on datanode ozone oz -infoVolume ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name' + Should Be Equal ${result} bill + ${result} = Execute on datanode ozone oz -infoVolume ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .quota | .size' + Should Be Equal ${result} 10 + Execute on datanode ozone oz -createBucket ${protocol}${server}/${volume}/bb1 + ${result} = Execute on datanode ozone oz -infoBucket ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' + Should Be Equal ${result} DISK + ${result} = Execute on datanode ozone oz -updateBucket ${protocol}${server}/${volume}/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' + Should Be Equal ${result} GROUP + ${result} = Execute on datanode ozone oz -updateBucket ${protocol}${server}/${volume}/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' + Should Be Equal ${result} USER + ${result} = Execute on datanode ozone oz -listBucket o3://ksm/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' + Should Be Equal ${result} ${volume} + Run Keyword and Return If ${withkeytest} Test key handling ${protocol} ${server} ${volume} + Execute on datanode ozone oz -deleteBucket ${protocol}${server}/${volume}/bb1 + Execute on datanode ozone oz -deleteVolume ${protocol}${server}/${volume} -user bilbo + +Test key handling + [arguments] ${protocol} ${server} ${volume} + Execute on datanode ozone oz -putKey ${protocol}${server}/${volume}/bb1/key1 -file NOTICE.txt + Execute on datanode rm -f NOTICE.txt.1 + Execute on datanode ozone oz -getKey ${protocol}${server}/${volume}/bb1/key1 -file NOTICE.txt.1 + Execute on datanode ls -l NOTICE.txt.1 + ${result} = Execute on datanode ozone oz -infoKey ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")' + Should contain ${result} createdOn + ${result} = Execute on datanode ozone oz -listKey o3://ksm/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName' + Should Be Equal ${result} key1 + Execute on datanode ozone oz -deleteKey ${protocol}${server}/${volume}/bb1/key1 -v diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot similarity index 54% rename from hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot rename to hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot index 7bb60b6a8c..01ed302e25 100644 --- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot @@ -13,16 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -*** Settings *** -Documentation Smoke test to start cluster with docker-compose environments. -Library OperatingSystem -Suite Setup Startup Ozone Cluster -Suite Teardown Teardown Ozone Cluster +*** Keywords *** -*** Variables *** -${COMMON_REST_HEADER} -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root" -${basedir} -*** Test Cases *** +Startup Ozone cluster with size + [arguments] ${datanodeno} + ${rc} ${output} = Run docker compose down + Run echo "Starting new docker-compose environment" >> docker-compose.log + ${rc} ${output} = Run docker compose up -d + Should Be Equal As Integers ${rc} 0 + Wait Until Keyword Succeeds 1min 5sec Is Daemon started ksm HTTP server of KSM is listening + Daemons are running without error + Scale datanodes up 5 Daemons are running without error Is daemon running without error ksm @@ -37,38 +38,15 @@ Scale it up to 5 datanodes Scale datanodes up 5 Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes 5 -Test rest interface - ${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1" - Should contain ${result} 201 Created - ${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1" - Should contain ${result} 201 Created - ${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1" - Should contain ${result} 200 OK - ${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1" - Should contain ${result} 200 OK +Scale datanodes up + [arguments] ${datanodeno} + Run docker compose scale datanode=${datanodeno} + Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes ${datanodeno} -Check webui static resources - ${result} = Execute on scm curl -s -I http://localhost:9876/static/bootstrap-3.3.7/js/bootstrap.min.js - Should contain ${result} 200 - ${result} = Execute on ksm curl -s -I http://localhost:9874/static/bootstrap-3.3.7/js/bootstrap.min.js - Should contain ${result} 200 - -Start freon testing - ${result} = Execute on ksm ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10 - Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125 - Should Not Contain ${result} ERROR - -*** Keywords *** - -Startup Ozone Cluster - ${rc} ${output} = Run docker compose down - ${rc} ${output} = Run docker compose up -d - Should Be Equal As Integers ${rc} 0 - Wait Until Keyword Succeeds 1min 5sec Is Daemon started ksm HTTP server of KSM is listening - -Teardown Ozone Cluster +Teardown Ozone cluster Run docker compose down - + Run docker compose logs >> docker-compose.log + Is daemon running without error [arguments] ${name} ${result} = Run docker ps @@ -86,19 +64,16 @@ Have healthy datanodes ${result} = Execute on scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value' Should Be Equal ${result} ${requirednodes} -Scale datanodes up - [arguments] ${requirednodes} - Run docker compose scale datanode=${requirednodes} - Execute on [arguments] ${componentname} ${command} - ${rc} ${return} = Run docker compose exec ${componentname} ${command} + ${rc} ${return} = Run docker compose exec -T ${componentname} ${command} [return] ${return} Run docker compose [arguments] ${command} - Set Environment Variable OZONEDIR ${basedir}/hadoop-dist/target/ozone - ${rc} ${output} = Run And Return Rc And Output docker-compose -f ${basedir}/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml ${command} + Set Environment Variable COMPOSE_INTERACTIVE_NO_CLI 1 + Set Environment Variable OZONEDIR ${PROJECTDIR}/hadoop-dist/target/ozone + ${rc} ${output} = Run And Return Rc And Output docker-compose -f ${COMPOSEFILE} ${command} Log ${output} Should Be Equal As Integers ${rc} 0 [return] ${rc} ${output} diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/.env b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/.env new file mode 100644 index 0000000000..98234cb112 --- /dev/null +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/.env @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +OZONEDIR=../../../../../../hadoop-dist/target/ozone diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot deleted file mode 100644 index 1a91a9388e..0000000000 --- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot +++ /dev/null @@ -1,256 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Smoke test to start cluster with docker-compose environments. -Library OperatingSystem -Suite Setup Startup Ozone Cluster -Suite Teardown Teardown Ozone Cluster - -*** Variables *** -${basedir} -*** Test Cases *** - -Daemons are running without error - Is daemon running without error ksm - Is daemon running without error scm - Is daemon running without error namenode - Is daemon running without error datanode - -Check if datanode is connected to the scm - Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes 1 - -Scale it up to 5 datanodes - Scale datanodes up 5 - Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes 5 - -Test ozone shell (RestClient without http port) - Execute on datanode ozone oz -createVolume http://ksm/hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume http://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume http://ksm/hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket http://ksm/hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket http://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket http://ksm/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket http://ksm/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket http://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -putKey http://ksm/hive/bb1/key1 -file NOTICE.txt - Execute on datanode rm -f NOTICE.txt.1 - Execute on datanode ozone oz -getKey http://ksm/hive/bb1/key1 -file NOTICE.txt.1 - Execute on datanode ls -l NOTICE.txt.1 - ${result} = Execute on datanode ozone oz -infoKey http://ksm/hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")' - Should contain ${result} createdOn - ${result} = Execute on datanode ozone oz -listKey http://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName' - Should Be Equal ${result} key1 - Execute on datanode ozone oz -deleteKey http://ksm/hive/bb1/key1 -v - Execute on datanode ozone oz -deleteBucket http://ksm/hive/bb1 - Execute on datanode ozone oz -deleteVolume http://ksm/hive -user bilbo - -Test ozone shell (RestClient with http port) - Execute on datanode ozone oz -createVolume http://ksm:9874/hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume http://ksm:9874 -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume http://ksm:9874/hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume http://ksm:9874/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume http://ksm:9874/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket http://ksm:9874/hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket http://ksm:9874/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket http://ksm:9874/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket http://ksm:9874/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket http://ksm:9874/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -putKey http://ksm:9874/hive/bb1/key1 -file NOTICE.txt - Execute on datanode rm -f NOTICE.txt.1 - Execute on datanode ozone oz -getKey http://ksm:9874/hive/bb1/key1 -file NOTICE.txt.1 - Execute on datanode ls -l NOTICE.txt.1 - ${result} = Execute on datanode ozone oz -infoKey http://ksm:9874/hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")' - Should contain ${result} createdOn - ${result} = Execute on datanode ozone oz -listKey http://ksm:9874/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName' - Should Be Equal ${result} key1 - Execute on datanode ozone oz -deleteKey http://ksm:9874/hive/bb1/key1 -v - Execute on datanode ozone oz -deleteBucket http://ksm:9874/hive/bb1 - Execute on datanode ozone oz -deleteVolume http://ksm:9874/hive -user bilbo - -Test ozone shell (RestClient without hostname) - Execute on datanode ozone oz -createVolume http:///hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume http:/// -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume http:///hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume http:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume http:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket http:///hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket http:///hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket http:///hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket http:///hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket http:///hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -putKey http:///hive/bb1/key1 -file NOTICE.txt - Execute on datanode rm -f NOTICE.txt.1 - Execute on datanode ozone oz -getKey http:///hive/bb1/key1 -file NOTICE.txt.1 - Execute on datanode ls -l NOTICE.txt.1 - ${result} = Execute on datanode ozone oz -infoKey http:///hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")' - Should contain ${result} createdOn - ${result} = Execute on datanode ozone oz -listKey http:///hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName' - Should Be Equal ${result} key1 - Execute on datanode ozone oz -deleteKey http:///hive/bb1/key1 -v - Execute on datanode ozone oz -deleteBucket http:///hive/bb1 - Execute on datanode ozone oz -deleteVolume http:///hive -user bilbo - -Test ozone shell (RpcClient without http port) - Execute on datanode ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume o3://ksm/hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket o3://ksm/hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket o3://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket o3://ksm/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket o3://ksm/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket o3://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -deleteBucket o3://ksm/hive/bb1 - Execute on datanode ozone oz -deleteVolume o3://ksm/hive -user bilbo - -Test ozone shell (RpcClient with http port) - Execute on datanode ozone oz -createVolume o3://ksm:9862/hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume o3://ksm:9862 -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume o3://ksm:9862/hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume o3://ksm:9862/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume o3://ksm:9862/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket o3://ksm:9862/hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket o3://ksm:9862/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket o3://ksm:9862/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket o3://ksm:9862/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket o3://ksm:9862/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -deleteBucket o3://ksm:9862/hive/bb1 - Execute on datanode ozone oz -deleteVolume o3://ksm:9862/hive -user bilbo - -Test ozone shell (RpcClient without hostname) - Execute on datanode ozone oz -createVolume o3:///hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume o3:/// -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume o3:///hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume o3:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume o3:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket o3:///hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket o3:///hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket o3:///hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket o3:///hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket o3:///hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -deleteBucket o3:///hive/bb1 - Execute on datanode ozone oz -deleteVolume o3:///hive -user bilbo - -Test ozone shell (no scheme - RpcClient used by default) - Execute on datanode ozone oz -createVolume /hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume / -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume /hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume /hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume /hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket /hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket /hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket /hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket /hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket /hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -deleteBucket /hive/bb1 - Execute on datanode ozone oz -deleteVolume /hive -user bilbo - -*** Keywords *** - -Startup Ozone Cluster - ${rc} ${output} = Run docker compose down - ${rc} ${output} = Run docker compose up -d - Should Be Equal As Integers ${rc} 0 - Wait Until Keyword Succeeds 1min 5sec Is Daemon started ksm HTTP server of KSM is listening - -Teardown Ozone Cluster - Run docker compose down - -Is daemon running without error - [arguments] ${name} - ${result} = Run docker ps - Should contain ${result} _${name}_1 - ${rc} ${result} = Run docker compose logs ${name} - Should not contain ${result} ERROR - -Is Daemon started - [arguments] ${name} ${expression} - ${rc} ${result} = Run docker compose logs - Should contain ${result} ${expression} - -Have healthy datanodes - [arguments] ${requirednodes} - ${result} = Execute on scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value' - Should Be Equal ${result} ${requirednodes} - -Scale datanodes up - [arguments] ${requirednodes} - Run docker compose scale datanode=${requirednodes} - -Execute on - [arguments] ${componentname} ${command} - ${rc} ${return} = Run docker compose exec ${componentname} ${command} - [return] ${return} - -Run docker compose - [arguments] ${command} - Set Environment Variable OZONEDIR ${basedir}/hadoop-dist/target/ozone - ${rc} ${output} = Run And Return Rc And Output docker-compose -f ${basedir}/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml ${command} - Log ${output} - Should Be Equal As Integers ${rc} 0 - [return] ${rc} ${output} diff --git a/start-build-env.sh b/start-build-env.sh index 4da55af6f7..c5581ca621 100755 --- a/start-build-env.sh +++ b/start-build-env.sh @@ -66,13 +66,17 @@ ENV HOME /home/${USER_NAME} UserSpecificDocker +#If this env varible is empty, docker will be started +# in non interactive mode +DOCKER_INTERACTIVE_RUN=${DOCKER_INTERACTIVE_RUN-"-i -t"} + # By mapping the .m2 directory you can do an mvn install from # within the container and use the result on your normal # system. And this also is a significant speedup in subsequent # builds because the dependencies are downloaded only once. -docker run --rm=true -t -i \ +docker run --rm=true $DOCKER_INTERACTIVE_RUN \ -v "${PWD}:/home/${USER_NAME}/hadoop${V_OPTS:-}" \ -w "/home/${USER_NAME}/hadoop" \ -v "${HOME}/.m2:/home/${USER_NAME}/.m2${V_OPTS:-}" \ -u "${USER_NAME}" \ - "hadoop-build-${USER_ID}" + "hadoop-build-${USER_ID}" "$@" From 3e37a9a70ba93430da1b47f2a8b50358348396b0 Mon Sep 17 00:00:00 2001 From: Mukul Kumar Singh Date: Fri, 15 Jun 2018 14:58:20 +0530 Subject: [PATCH 06/70] HDFS-13679. Fix Typo in javadoc for ScanInfoPerBlockPool#addAll. Contributed by Shashikant Banerjee. --- .../apache/hadoop/hdfs/server/datanode/DirectoryScanner.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index ab9743cffc..89f7c5d5da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -165,7 +165,7 @@ static class ScanInfoPerBlockPool extends /** * Merges {@code that} ScanInfoPerBlockPool into this one * - * @param the ScanInfoPerBlockPool to merge + * @param that ScanInfoPerBlockPool to merge */ public void addAll(ScanInfoPerBlockPool that) { if (that == null) return; From b272b71095eb8929b60128835ef1078fe946b231 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Fri, 15 Jun 2018 09:40:45 -0700 Subject: [PATCH 07/70] HADOOP-15537. Clean up ContainerLaunch and ContainerExecutor pre-HADOOP-15528. Contributed by Giovanni Matteo Fumarola. --- .../server/nodemanager/ContainerExecutor.java | 4 ++- .../launcher/ContainerLaunch.java | 27 +------------------ .../launcher/ContainerRelaunch.java | 1 - .../launcher/TestContainerLaunch.java | 2 -- 4 files changed, 4 insertions(+), 30 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 8e33535032..9b604cede5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -24,6 +24,7 @@ import java.io.PrintStream; import java.net.InetAddress; import java.net.UnknownHostException; +import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -309,7 +310,8 @@ public int reacquireContainer(ContainerReacquisitionContext ctx) } try { - return Integer.parseInt(FileUtils.readFileToString(file).trim()); + return Integer.parseInt( + FileUtils.readFileToString(file, Charset.defaultCharset()).trim()); } catch (NumberFormatException e) { throw new IOException("Error parsing exit code from pid " + pid, e); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java index 57abfc3d0f..bb842af0f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java @@ -22,8 +22,6 @@ import static org.apache.hadoop.fs.CreateFlag.OVERWRITE; import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.DataOutputStream; import java.io.File; @@ -189,7 +187,6 @@ private Map expandAllEnvironmentVars( } @Override - @SuppressWarnings("unchecked") // dispatcher not typed public Integer call() { if (!validateContainerState()) { return 0; @@ -374,7 +371,6 @@ private void prepareContainer(Map> localResources, .build()); } - @SuppressWarnings("unchecked") protected boolean validateContainerState() { // CONTAINER_KILLED_ON_REQUEST should not be missed if the container // is already at KILLING @@ -486,7 +482,6 @@ protected Map> getLocalizedResources() return localResources; } - @SuppressWarnings("unchecked") protected int launchContainer(ContainerStartContext ctx) throws IOException, ConfigurationException { int launchPrep = prepareForLaunch(ctx); @@ -496,7 +491,6 @@ protected int launchContainer(ContainerStartContext ctx) return launchPrep; } - @SuppressWarnings("unchecked") protected int relaunchContainer(ContainerStartContext ctx) throws IOException, ConfigurationException { int launchPrep = prepareForLaunch(ctx); @@ -546,7 +540,6 @@ protected void setContainerCompletedStatus(int exitCode) { } } - @SuppressWarnings("unchecked") protected void handleContainerExitCode(int exitCode, Path containerLogDir) { ContainerId containerId = container.getContainerId(); @@ -592,7 +585,6 @@ protected void handleContainerExitCode(int exitCode, Path containerLogDir) { * @param containerLogDir * @param diagnosticInfo */ - @SuppressWarnings("unchecked") protected void handleContainerExitWithFailure(ContainerId containerID, int ret, Path containerLogDir, StringBuilder diagnosticInfo) { LOG.warn("Container launch failed : " + diagnosticInfo.toString()); @@ -727,7 +719,6 @@ protected String getPidFileSubpath(String appIdStr, String containerIdStr) { * the process id is available. * @throws IOException */ - @SuppressWarnings("unchecked") // dispatcher not typed public void cleanupContainer() throws IOException { ContainerId containerId = container.getContainerId(); String containerIdStr = containerId.toString(); @@ -855,7 +846,6 @@ public void cleanupContainer() throws IOException { * * @throws IOException */ - @SuppressWarnings("unchecked") // dispatcher not typed public void signalContainer(SignalContainerCommand command) throws IOException { ContainerId containerId = @@ -994,7 +984,6 @@ public static Signal translateCommandToSignal( * executor to pause the container. * @throws IOException in case of errors. */ - @SuppressWarnings("unchecked") // dispatcher not typed public void pauseContainer() throws IOException { ContainerId containerId = container.getContainerId(); String containerIdStr = containerId.toString(); @@ -1044,7 +1033,6 @@ public void pauseContainer() throws IOException { * executor to pause the container. * @throws IOException in case of error. */ - @SuppressWarnings("unchecked") // dispatcher not typed public void resumeContainer() throws IOException { ContainerId containerId = container.getContainerId(); String containerIdStr = containerId.toString(); @@ -1357,6 +1345,7 @@ void resolve() { } private static final class UnixShellScriptBuilder extends ShellScriptBuilder { + @SuppressWarnings("unused") private void errorCheck() { line("hadoop_shell_errorcode=$?"); line("if [[ \"$hadoop_shell_errorcode\" -ne 0 ]]"); @@ -1651,20 +1640,6 @@ public Set getEnvDependencies(final String envVal) { } } - private static void putEnvIfNotNull( - Map environment, String variable, String value) { - if (value != null) { - environment.put(variable, value); - } - } - - private static void putEnvIfAbsent( - Map environment, String variable) { - if (environment.get(variable) == null) { - putEnvIfNotNull(environment, variable, System.getenv(variable)); - } - } - private static void addToEnvMap( Map envMap, Set envSet, String envName, String envValue) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java index f69cf967ae..a34ed62505 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java @@ -58,7 +58,6 @@ public ContainerRelaunch(Context context, Configuration configuration, } @Override - @SuppressWarnings("unchecked") public Integer call() { if (!validateContainerState()) { return 0; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index ebdceead35..ddf46a63ac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -776,8 +776,6 @@ public void handle(Event event) { String testKey3 = "MOUNT_LIST"; String testVal3 = "/home/a/b/c,/home/d/e/f,/home/g/e/h"; conf.set("yarn.nodemanager.admin-env." + testKey3, testVal3); - Map environment = new HashMap<>(); - LinkedHashSet nmVars = new LinkedHashSet<>(); ContainerLaunch launch = new ContainerLaunch(distContext, conf, dispatcher, exec, null, container, dirsHandler, containerManager); String testDir = System.getProperty("test.build.data", From 43d994e4a6dfd1c24eafb909d6f8a0663b20769a Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Fri, 15 Jun 2018 10:33:28 -0700 Subject: [PATCH 08/70] HDFS-13673. TestNameNodeMetrics fails on Windows. Contributed by Zuoming Zhang. --- .../hdfs/server/namenode/metrics/TestNameNodeMetrics.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index e34deead95..05cf2ea622 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; @@ -271,7 +272,8 @@ public void testVolumeFailures() throws Exception { File dataDir = new File(fsVolume.getBaseURI()); long capacity = fsVolume.getCapacity(); volumeReferences.close(); - DataNodeTestUtils.injectDataDirFailure(dataDir); + File storageDir = new File(dataDir, Storage.STORAGE_DIR_CURRENT); + DataNodeTestUtils.injectDataDirFailure(storageDir); DataNodeTestUtils.waitForDiskError(dn, fsVolume); DataNodeTestUtils.triggerHeartbeat(dn); BlockManagerTestUtil.checkHeartbeat(bm); From eebeb6033fd09791fcbff626f128a98e393f0a88 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Fri, 15 Jun 2018 13:07:54 -0700 Subject: [PATCH 09/70] HDFS-13676. TestEditLogRace fails on Windows. Contributed by Zuoming Zhang. --- .../apache/hadoop/hdfs/server/namenode/TestEditLogRace.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java index 46010e078d..10f571c4d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java @@ -84,6 +84,8 @@ public TestEditLogRace(boolean useAsyncEditLog) { TestEditLogRace.useAsyncEditLog = useAsyncEditLog; } + private static final String NAME_DIR = MiniDFSCluster.getBaseDirectory() + "name-0-1"; + private static final Log LOG = LogFactory.getLog(TestEditLogRace.class); // This test creates NUM_THREADS threads and each thread continuously writes @@ -363,8 +365,8 @@ private Configuration getConf() { useAsyncEditLog); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - //conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, NAME_DIR); - //conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, NAME_DIR); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, NAME_DIR); + conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, NAME_DIR); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); return conf; } From c966a3837af1c1a1c4a441f491b0d76d5c9e5d78 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Fri, 15 Jun 2018 13:35:50 -0700 Subject: [PATCH 10/70] HDFS-13174. hdfs mover -p /path times out after 20 min. Contributed by Istvan Fajth. --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 +- .../hadoop/hdfs/server/balancer/Balancer.java | 6 +- .../hdfs/server/balancer/Dispatcher.java | 30 ++++--- .../src/main/resources/hdfs-default.xml | 10 +++ .../hdfs/server/balancer/TestBalancer.java | 79 +++++++++++++++++++ .../hadoop/hdfs/server/mover/TestMover.java | 46 +++++++++++ 6 files changed, 163 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index bc8e81f976..dde7eb79c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -581,7 +581,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_BALANCER_BLOCK_MOVE_TIMEOUT = "dfs.balancer.block-move.timeout"; public static final int DFS_BALANCER_BLOCK_MOVE_TIMEOUT_DEFAULT = 0; public static final String DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY = "dfs.balancer.max-no-move-interval"; - public static final int DFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; // One minute + public static final int DFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; // One minute + public static final String DFS_BALANCER_MAX_ITERATION_TIME_KEY = "dfs.balancer.max-iteration-time"; + public static final long DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT = 20 * 60 * 1000L; // 20 mins public static final String DFS_MOVER_MOVEDWINWIDTH_KEY = "dfs.mover.movedWinWidth"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 13d584644d..426c7ab074 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -289,13 +289,17 @@ static int getInt(Configuration conf, String key, int defaultValue) { final int maxNoMoveInterval = conf.getInt( DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY, DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT); + final long maxIterationTime = conf.getLong( + DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY, + DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT); this.nnc = theblockpool; this.dispatcher = new Dispatcher(theblockpool, p.getIncludedNodes(), p.getExcludedNodes(), movedWinWidth, moverThreads, dispatcherThreads, maxConcurrentMovesPerNode, getBlocksSize, - getBlocksMinBlockSize, blockMoveTimeout, maxNoMoveInterval, conf); + getBlocksMinBlockSize, blockMoveTimeout, maxNoMoveInterval, + maxIterationTime, conf); this.threshold = p.getThreshold(); this.policy = p.getBalancingPolicy(); this.sourceNodes = p.getSourceNodes(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java index 349ced13f3..060c013e37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java @@ -138,6 +138,8 @@ public class Dispatcher { private final boolean connectToDnViaHostname; private BlockPlacementPolicies placementPolicies; + private long maxIterationTime; + static class Allocator { private final int max; private int count = 0; @@ -346,13 +348,19 @@ private boolean addTo(StorageGroup g) { /** Dispatch the move to the proxy source & wait for the response. */ private void dispatch() { - LOG.info("Start moving " + this); - assert !(reportedBlock instanceof DBlockStriped); - Socket sock = new Socket(); DataOutputStream out = null; DataInputStream in = null; try { + if (source.isIterationOver()){ + LOG.info("Cancel moving " + this + + " as iteration is already cancelled due to" + + " dfs.balancer.max-iteration-time is passed."); + throw new IOException("Block move cancelled."); + } + LOG.info("Start moving " + this); + assert !(reportedBlock instanceof DBlockStriped); + sock.connect( NetUtils.createSocketAddr(target.getDatanodeInfo(). getXferAddr(Dispatcher.this.connectToDnViaHostname)), @@ -760,7 +768,10 @@ private Source(StorageType storageType, long maxSize2Move, DDatanode dn) { * Check if the iteration is over */ public boolean isIterationOver() { - return (Time.monotonicNow()-startTime > MAX_ITERATION_TIME); + if (maxIterationTime < 0){ + return false; + } + return (Time.monotonicNow()-startTime > maxIterationTime); } /** Add a task */ @@ -908,8 +919,6 @@ private boolean shouldFetchMoreBlocks() { return blocksToReceive > 0; } - private static final long MAX_ITERATION_TIME = 20 * 60 * 1000L; // 20 mins - /** * This method iteratively does the following: it first selects a block to * move, then sends a request to the proxy source to start the block move @@ -990,7 +999,7 @@ private void dispatchBlocks(long delay) { } if (isIterationOver()) { - LOG.info("The maximum iteration time (" + MAX_ITERATION_TIME/1000 + LOG.info("The maximum iteration time (" + maxIterationTime/1000 + " seconds) has been reached. Stopping " + this); } } @@ -1013,14 +1022,14 @@ public Dispatcher(NameNodeConnector nnc, Set includedNodes, int maxNoMoveInterval, Configuration conf) { this(nnc, includedNodes, excludedNodes, movedWinWidth, moverThreads, dispatcherThreads, maxConcurrentMovesPerNode, - 0L, 0L, 0, maxNoMoveInterval, conf); + 0L, 0L, 0, maxNoMoveInterval, -1, conf); } Dispatcher(NameNodeConnector nnc, Set includedNodes, Set excludedNodes, long movedWinWidth, int moverThreads, int dispatcherThreads, int maxConcurrentMovesPerNode, - long getBlocksSize, long getBlocksMinBlockSize, - int blockMoveTimeout, int maxNoMoveInterval, Configuration conf) { + long getBlocksSize, long getBlocksMinBlockSize, int blockMoveTimeout, + int maxNoMoveInterval, long maxIterationTime, Configuration conf) { this.nnc = nnc; this.excludedNodes = excludedNodes; this.includedNodes = includedNodes; @@ -1047,6 +1056,7 @@ public Dispatcher(NameNodeConnector nnc, Set includedNodes, HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); placementPolicies = new BlockPlacementPolicies(conf, null, cluster, null); + this.maxIterationTime = maxIterationTime; } public DistributedFileSystem getDistributedFileSystem() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index b55421c162..146ae6c9c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -3540,6 +3540,16 @@ + + dfs.balancer.max-iteration-time + 1200000 + + Maximum amount of time while an iteration can be run by the Balancer. After + this time the Balancer will stop the iteration, and reevaluate the work + needs to be done to Balance the cluster. The default value is 20 minutes. + + + dfs.block.invalidate.limit 1000 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 9579b82c09..fa026f0499 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -1580,6 +1580,85 @@ public void testBalancerCliWithIncludeListWithPortsInAFile() throws Exception { CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), true, true); } + + @Test(timeout = 100000) + public void testMaxIterationTime() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + int blockSize = 10*1024*1024; // 10MB block size + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, blockSize); + // limit the worker thread count of Balancer to have only 1 queue per DN + conf.setInt(DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_KEY, 1); + // limit the bandwitdh to 1 packet per sec to emulate slow block moves + conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, + 64 * 1024); + // set client socket timeout to have an IN_PROGRESS notification back from + // the DataNode about the copy in every second. + conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2000L); + // set max iteration time to 2 seconds to timeout before moving any block + conf.setLong(DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY, 2000L); + // setup the cluster + final long capacity = 10L * blockSize; + final long[] dnCapacities = new long[] {capacity, capacity}; + final short rep = 1; + final long seed = 0xFAFAFA; + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .build(); + try { + cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); + cluster.startDataNodes(conf, 1, true, null, null, dnCapacities); + cluster.waitClusterUp(); + cluster.waitActive(); + final Path path = new Path("/testMaxIterationTime.dat"); + DistributedFileSystem fs = cluster.getFileSystem(); + // fill the DN to 40% + DFSTestUtil.createFile(fs, path, 4L * blockSize, rep, seed); + // start a new DN + cluster.startDataNodes(conf, 1, true, null, null, dnCapacities); + cluster.triggerHeartbeats(); + // setup Balancer and run one iteration + List connectors = Collections.emptyList(); + try { + BalancerParameters bParams = BalancerParameters.DEFAULT; + connectors = NameNodeConnector.newNameNodeConnectors( + DFSUtil.getInternalNsRpcUris(conf), Balancer.class.getSimpleName(), + Balancer.BALANCER_ID_PATH, conf, bParams.getMaxIdleIteration()); + for (NameNodeConnector nnc : connectors) { + LOG.info("NNC to work on: " + nnc); + Balancer b = new Balancer(nnc, bParams, conf); + long startTime = Time.monotonicNow(); + Result r = b.runOneIteration(); + long runtime = Time.monotonicNow() - startTime; + assertEquals("We expect ExitStatus.IN_PROGRESS to be reported.", + ExitStatus.IN_PROGRESS, r.exitStatus); + // accept runtime if it is under 3.5 seconds, as we need to wait for + // IN_PROGRESS report from DN, and some spare to be able to finish. + // NOTE: This can be a source of flaky tests, if the box is busy, + // assertion here is based on the following: Balancer is already set + // up, iteration gets the blocks from the NN, and makes the decision + // to move 2 blocks. After that the PendingMoves are scheduled, and + // DataNode heartbeats in for the Balancer every second, iteration is + // two seconds long. This means that it will fail if the setup and the + // heartbeat from the DataNode takes more than 500ms, as the iteration + // should end at the 3rd second from start. As the number of + // operations seems to be pretty low, and all comm happens locally, I + // think the possibility of a failure due to node busyness is low. + assertTrue("Unexpected iteration runtime: " + runtime + "ms > 3.5s", + runtime < 3500); + } + } finally { + for (NameNodeConnector nnc : connectors) { + IOUtils.cleanupWithLogger(null, nnc); + } + } + } finally { + cluster.shutdown(true, true); + } + } + /* * Test Balancer with Ram_Disk configured * One DN has two files on RAM_DISK, other DN has no files on RAM_DISK. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index 36e7bb9840..62c91bf9e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -685,6 +685,52 @@ public void testMoverFailedRetry() throws Exception { } } + @Test(timeout=100000) + public void testBalancerMaxIterationTimeNotAffectMover() throws Exception { + long blockSize = 10*1024*1024; + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + conf.setInt(DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY, 1); + conf.setInt( + DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, 1); + // set a fairly large block size to run into the limitation + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + conf.setLong(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, blockSize); + // set a somewhat grater than zero max iteration time to have the move time + // to surely exceed it + conf.setLong(DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY, 200L); + conf.setInt(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, 1); + // set client socket timeout to have an IN_PROGRESS notification back from + // the DataNode about the copy in every second. + conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 1000L); + + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(2) + .storageTypes( + new StorageType[][] {{StorageType.DISK, StorageType.DISK}, + {StorageType.ARCHIVE, StorageType.ARCHIVE}}) + .build(); + try { + cluster.waitActive(); + final DistributedFileSystem fs = cluster.getFileSystem(); + final String file = "/testMaxIterationTime.dat"; + final Path path = new Path(file); + short rep_factor = 1; + int seed = 0xFAFAFA; + // write to DISK + DFSTestUtil.createFile(fs, path, 4L * blockSize, rep_factor, seed); + + // move to ARCHIVE + fs.setStoragePolicy(new Path(file), "COLD"); + int rc = ToolRunner.run(conf, new Mover.Cli(), + new String[] {"-p", file}); + Assert.assertEquals("Retcode expected to be ExitStatus.SUCCESS (0).", + ExitStatus.SUCCESS.getExitCode(), rc); + } finally { + cluster.shutdown(); + } + } + private final ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy(); private final int dataBlocks = ecPolicy.getNumDataUnits(); From 308a1591f9f41597f4e7cc17bca06c66d6efc0a2 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Fri, 15 Jun 2018 10:23:58 -0700 Subject: [PATCH 11/70] HDDS-172. The numbers of operation should be integer in KSM UI. Contributed by Takanobu Asanuma. --- hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js index 7fb52b1292..ab6f73bfc9 100644 --- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js +++ b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js @@ -48,7 +48,9 @@ labelType: 'value', duration: 500, labelThreshold: 0.01, - labelSunbeamLayout: true, + valueFormat: function(d) { + return d3.format('d')(d); + }, legend: { margin: { top: 5, From 1da3b556591fffaae0751c4f7fceda34d2314fda Mon Sep 17 00:00:00 2001 From: Sean Mackrory Date: Tue, 1 May 2018 08:56:10 -0600 Subject: [PATCH 12/70] HADOOP-15504. Upgrade Maven Core and Maven Wagon dependencies. --- hadoop-maven-plugins/pom.xml | 38 ++++++++++++++----- .../plugin/resourcegz/ResourceGzMojo.java | 2 +- pom.xml | 2 +- 3 files changed, 31 insertions(+), 11 deletions(-) diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml index b31d158c13..d6b18b4e35 100644 --- a/hadoop-maven-plugins/pom.xml +++ b/hadoop-maven-plugins/pom.xml @@ -26,7 +26,7 @@ maven-plugin Apache Hadoop Maven Plugins - 3.0 + 3.0.5 3.5.1 @@ -45,6 +45,14 @@ maven-plugin-annotations ${maven.plugin-tools.version} provided + + + + org.apache.maven + maven-artifact + + commons-io @@ -60,16 +68,28 @@ ${maven-shade-plugin.version} provided - + - org.apache.maven.shared - maven-dependency-tree + org.apache.maven + maven-artifact + + + org.apache.maven + maven-compat + + + org.apache.maven + maven-core + + + org.apache.maven + maven-model + + + org.apache.maven + maven-plugin-api - org.vafer jdependency diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java index e7ab663e42..5bf84c21fe 100644 --- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java +++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java @@ -13,7 +13,7 @@ */ package org.apache.hadoop.maven.plugin.resourcegz; -import com.google.inject.internal.util.Lists; +import com.google.common.collect.Lists; import org.apache.commons.io.IOUtils; import org.apache.maven.plugin.AbstractMojo; import org.apache.maven.plugin.MojoExecutionException; diff --git a/pom.xml b/pom.xml index 3695be05af..a250e64b8e 100644 --- a/pom.xml +++ b/pom.xml @@ -106,7 +106,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.5 3.0.1 0.12 - 1.0 + 2.4 3.3.0 2.5.0 1.0.0 From d31a3ce767d3bb68bdbb4f36d45600eab9f4f8b7 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Fri, 15 Jun 2018 15:59:19 -0700 Subject: [PATCH 13/70] HDFS-13686. Add overall metrics for FSNamesystemLock. Contributed by Lukas Majercak. --- .../hdfs/server/namenode/FSNamesystemLock.java | 17 ++++++++++++----- .../server/namenode/TestFSNamesystemLock.java | 10 ++++++++-- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java index 900f8a2291..f8e69e288e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java @@ -107,6 +107,8 @@ public Long initialValue() { private static final String WRITE_LOCK_METRIC_PREFIX = "FSNWriteLock"; private static final String LOCK_METRIC_SUFFIX = "Nanos"; + private static final String OVERALL_METRIC_NAME = "Overall"; + FSNamesystemLock(Configuration conf, MutableRatesWithAggregation detailedHoldTimeMetrics) { this(conf, detailedHoldTimeMetrics, new Timer()); @@ -320,12 +322,17 @@ public int getQueueLength() { */ private void addMetric(String operationName, long value, boolean isWrite) { if (metricsEnabled) { - String metricName = - (isWrite ? WRITE_LOCK_METRIC_PREFIX : READ_LOCK_METRIC_PREFIX) + - org.apache.commons.lang.StringUtils.capitalize(operationName) + - LOCK_METRIC_SUFFIX; - detailedHoldTimeMetrics.add(metricName, value); + String opMetric = getMetricName(operationName, isWrite); + detailedHoldTimeMetrics.add(opMetric, value); + + String overallMetric = getMetricName(OVERALL_METRIC_NAME, isWrite); + detailedHoldTimeMetrics.add(overallMetric, value); } } + private static String getMetricName(String operationName, boolean isWrite) { + return (isWrite ? WRITE_LOCK_METRIC_PREFIX : READ_LOCK_METRIC_PREFIX) + + org.apache.commons.lang.StringUtils.capitalize(operationName) + + LOCK_METRIC_SUFFIX; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java index 2daf5c2cf6..49506fe54d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java @@ -333,7 +333,7 @@ public void testDetailedHoldMetrics() throws Exception { FSNamesystemLock fsLock = new FSNamesystemLock(conf, rates, timer); fsLock.readLock(); - timer.advanceNanos(1200000); + timer.advanceNanos(1300000); fsLock.readUnlock("foo"); fsLock.readLock(); timer.advanceNanos(2400000); @@ -353,12 +353,18 @@ public void testDetailedHoldMetrics() throws Exception { MetricsRecordBuilder rb = MetricsAsserts.mockMetricsRecordBuilder(); rates.snapshot(rb, true); - assertGauge("FSNReadLockFooNanosAvgTime", 1800000.0, rb); + assertGauge("FSNReadLockFooNanosAvgTime", 1850000.0, rb); assertCounter("FSNReadLockFooNanosNumOps", 2L, rb); assertGauge("FSNReadLockBarNanosAvgTime", 2000000.0, rb); assertCounter("FSNReadLockBarNanosNumOps", 1L, rb); assertGauge("FSNWriteLockBazNanosAvgTime", 1000000.0, rb); assertCounter("FSNWriteLockBazNanosNumOps", 1L, rb); + + // Overall + assertGauge("FSNReadLockOverallNanosAvgTime", 1900000.0, rb); + assertCounter("FSNReadLockOverallNanosNumOps", 3L, rb); + assertGauge("FSNWriteLockOverallNanosAvgTime", 1000000.0, rb); + assertCounter("FSNWriteLockOverallNanosNumOps", 1L, rb); } /** From 8762e9cf10fa100dd5f7fd695f5e52b75a94c5d4 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Fri, 15 Jun 2018 16:49:06 -0700 Subject: [PATCH 14/70] HDFS-13681. Fix TestStartup.testNNFailToStartOnReadOnlyNNDir test failure on Windows. Contributed by Xiao Liang. --- .../apache/hadoop/hdfs/server/namenode/TestStartup.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index d5f548736f..24016087da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -728,8 +728,8 @@ public void testNNFailToStartOnReadOnlyNNDir() throws Exception { assertTrue(nnDirs.iterator().hasNext()); assertEquals( "NN dir should be created after NN startup.", - nnDirStr, - nnDirs.iterator().next().getPath()); + new File(nnDirStr), + new File(nnDirs.iterator().next().getPath())); final File nnDir = new File(nnDirStr); assertTrue(nnDir.exists()); assertTrue(nnDir.isDirectory()); @@ -738,7 +738,7 @@ public void testNNFailToStartOnReadOnlyNNDir() throws Exception { /* set read only */ assertTrue( "Setting NN dir read only should succeed.", - nnDir.setReadOnly()); + FileUtil.setWritable(nnDir, false)); cluster.restartNameNodes(); fail("Restarting NN should fail on read only NN dir."); } catch (InconsistentFSStateException e) { @@ -750,7 +750,8 @@ public void testNNFailToStartOnReadOnlyNNDir() throws Exception { "storage directory does not exist or is not accessible.")))); } finally { /* set back to writable in order to clean it */ - assertTrue("Setting NN dir should succeed.", nnDir.setWritable(true)); + assertTrue("Setting NN dir should succeed.", + FileUtil.setWritable(nnDir, true)); } } } From 3905fdb793e6370243d05d0c3036ca69898fe3fb Mon Sep 17 00:00:00 2001 From: Surendra Singh Lilhore Date: Sun, 17 Jun 2018 12:12:01 +0530 Subject: [PATCH 15/70] HADOOP-15523. Shell command timeout given is in seconds whereas it is taken as millisec while scheduling. Contributed by Bilwa S T. --- .../fs/CommonConfigurationKeysPublic.java | 4 +- .../security/ShellBasedUnixGroupsMapping.java | 10 ++--- .../java/org/apache/hadoop/util/Shell.java | 2 +- .../TestShellBasedUnixGroupsMapping.java | 39 +++++++++++++++++-- 4 files changed, 43 insertions(+), 12 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index 9e0ba20c28..c7f32f92a6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -542,7 +542,7 @@ public class CommonConfigurationKeysPublic { * * core-default.xml */ - public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS = + public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY = "hadoop.security.groups.shell.command.timeout"; /** * @see @@ -550,7 +550,7 @@ public class CommonConfigurationKeysPublic { * core-default.xml */ public static final long - HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT = + HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT = 0L; /** * @see diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java index 94698d8446..976ddba84d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java @@ -18,7 +18,6 @@ package org.apache.hadoop.security; import java.io.IOException; -import java.util.Arrays; import java.util.LinkedList; import java.util.List; import java.util.StringTokenizer; @@ -52,7 +51,8 @@ public class ShellBasedUnixGroupsMapping extends Configured protected static final Logger LOG = LoggerFactory.getLogger(ShellBasedUnixGroupsMapping.class); - private long timeout = 0L; + private long timeout = CommonConfigurationKeys. + HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT; private static final List EMPTY_GROUPS = new LinkedList<>(); @Override @@ -61,10 +61,10 @@ public void setConf(Configuration conf) { if (conf != null) { timeout = conf.getTimeDuration( CommonConfigurationKeys. - HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS, + HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY, CommonConfigurationKeys. - HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT, - TimeUnit.SECONDS); + HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index 0b76f0df2a..46a0fccd41 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -1191,7 +1191,7 @@ public ShellCommandExecutor(String[] execString, File dir, /** * Returns the timeout value set for the executor's sub-commands. - * @return The timeout value in seconds + * @return The timeout value in milliseconds */ @VisibleForTesting public long getTimeoutInterval() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java index d3c9538641..8c1339d38d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java @@ -173,6 +173,37 @@ public void testGetNumericGroupsResolvable() throws Exception { assertTrue(groups.contains("zzz")); } + public long getTimeoutInterval(String timeout) { + Configuration conf = new Configuration(); + String userName = "foobarnonexistinguser"; + conf.set( + CommonConfigurationKeys.HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY, + timeout); + TestDelayedGroupCommand mapping = ReflectionUtils + .newInstance(TestDelayedGroupCommand.class, conf); + ShellCommandExecutor executor = mapping.createGroupExecutor(userName); + return executor.getTimeoutInterval(); + } + + @Test + public void testShellTimeOutConf() { + + // Test a 1 second max-runtime timeout + assertEquals( + "Expected the group names executor to carry the configured timeout", + 1000L, getTimeoutInterval("1s")); + + // Test a 1 minute max-runtime timeout + assertEquals( + "Expected the group names executor to carry the configured timeout", + 60000L, getTimeoutInterval("1m")); + + // Test a 1 millisecond max-runtime timeout + assertEquals( + "Expected the group names executor to carry the configured timeout", + 1L, getTimeoutInterval("1")); + } + private class TestGroupResolvable extends ShellBasedUnixGroupsMapping { /** @@ -222,7 +253,7 @@ public void testGetGroupsResolvable() throws Exception { private static class TestDelayedGroupCommand extends ShellBasedUnixGroupsMapping { - private Long timeoutSecs = 2L; + private Long timeoutSecs = 1L; TestDelayedGroupCommand() { super(); @@ -249,12 +280,12 @@ public void testFiniteGroupResolutionTime() throws Exception { String userName = "foobarnonexistinguser"; String commandTimeoutMessage = "ran longer than the configured timeout limit"; - long testTimeout = 1L; + long testTimeout = 500L; // Test a 1 second max-runtime timeout conf.setLong( CommonConfigurationKeys. - HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS, + HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY, testTimeout); TestDelayedGroupCommand mapping = @@ -306,7 +337,7 @@ public void testFiniteGroupResolutionTime() throws Exception { conf = new Configuration(); long defaultTimeout = CommonConfigurationKeys. - HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT; + HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT; mapping = ReflectionUtils.newInstance(TestDelayedGroupCommand.class, conf); From 980031bb043dd026a6bf42b0e71d304ac89294a5 Mon Sep 17 00:00:00 2001 From: Chris Douglas Date: Sun, 17 Jun 2018 11:54:26 -0700 Subject: [PATCH 16/70] HADOOP-13186. Multipart Uploader API. Contributed by Ewan Higgs --- .../org/apache/hadoop/fs/BBPartHandle.java | 58 +++++++ .../org/apache/hadoop/fs/BBUploadHandle.java | 57 +++++++ .../fs/FileSystemMultipartUploader.java | 132 +++++++++++++++ .../hadoop/fs/LocalFileSystemPathHandle.java | 100 ++++++++++++ .../apache/hadoop/fs/MultipartUploader.java | 90 +++++++++++ .../hadoop/fs/MultipartUploaderFactory.java | 65 ++++++++ .../java/org/apache/hadoop/fs/PartHandle.java | 45 ++++++ .../apache/hadoop/fs/RawLocalFileSystem.java | 61 ++++++- ...UnsupportedMultipartUploaderException.java | 41 +++++ .../org/apache/hadoop/fs/UploadHandle.java | 47 ++++++ .../src/main/proto/FSProtos.proto | 8 + ....apache.hadoop.fs.MultipartUploaderFactory | 16 ++ .../AbstractSystemMultipartUploaderTest.java | 143 +++++++++++++++++ .../TestLocalFileSystemMultipartUploader.java | 65 ++++++++ .../AbstractContractPathHandleTest.java | 6 + .../TestRawlocalContractPathHandle.java | 40 +++++ .../src/test/resources/contract/rawlocal.xml | 5 + .../hdfs/DFSMultipartUploaderFactory.java | 40 +++++ ....apache.hadoop.fs.MultipartUploaderFactory | 16 ++ .../hadoop/fs/TestHDFSMultipartUploader.java | 76 +++++++++ .../hadoop/fs/s3a/S3AMultipartUploader.java | 150 ++++++++++++++++++ ....apache.hadoop.fs.MultipartUploaderFactory | 15 ++ .../org.apache.hadoop.fs.MultipartUploader | 16 ++ 23 files changed, 1290 insertions(+), 2 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnsupportedMultipartUploaderException.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UploadHandle.java create mode 100644 hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/AbstractSystemMultipartUploaderTest.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemMultipartUploader.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSMultipartUploader.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java create mode 100644 hadoop-tools/hadoop-aws/src/main/resources/META-INF/org.apache.hadoop.fs.MultipartUploaderFactory create mode 100644 hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java new file mode 100644 index 0000000000..e1336b8085 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +/** + * Byte array backed part handle. + */ +public final class BBPartHandle implements PartHandle { + + private static final long serialVersionUID = 0x23ce3eb1; + + private final byte[] bytes; + + private BBPartHandle(ByteBuffer byteBuffer){ + this.bytes = byteBuffer.array(); + } + + public static PartHandle from(ByteBuffer byteBuffer) { + return new BBPartHandle(byteBuffer); + } + + @Override + public ByteBuffer bytes() { + return ByteBuffer.wrap(bytes); + } + + @Override + public int hashCode() { + return Arrays.hashCode(bytes); + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof PartHandle)) { + return false; + + } + PartHandle o = (PartHandle) other; + return bytes().equals(o.bytes()); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java new file mode 100644 index 0000000000..6430c145e2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +/** + * Byte array backed upload handle. + */ +public final class BBUploadHandle implements UploadHandle { + + private static final long serialVersionUID = 0x69d5509b; + + private final byte[] bytes; + + private BBUploadHandle(ByteBuffer byteBuffer){ + this.bytes = byteBuffer.array(); + } + + public static UploadHandle from(ByteBuffer byteBuffer) { + return new BBUploadHandle(byteBuffer); + } + + @Override + public int hashCode() { + return Arrays.hashCode(bytes); + } + + @Override + public ByteBuffer bytes() { + return ByteBuffer.wrap(bytes); + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof UploadHandle)) { + return false; + } + UploadHandle o = (UploadHandle) other; + return bytes().equals(o.bytes()); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java new file mode 100644 index 0000000000..b57ff3dc3a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import com.google.common.base.Charsets; +import org.apache.commons.compress.utils.IOUtils; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.FsPermission; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.Comparator; +import java.util.List; +import java.util.stream.Collectors; + +/** + * A MultipartUploader that uses the basic FileSystem commands. + * This is done in three stages: + * Init - create a temp _multipart directory. + * PutPart - copying the individual parts of the file to the temp directory. + * Complete - use {@link FileSystem#concat} to merge the files; and then delete + * the temp directory. + */ +public class FileSystemMultipartUploader extends MultipartUploader { + + private final FileSystem fs; + + public FileSystemMultipartUploader(FileSystem fs) { + this.fs = fs; + } + + @Override + public UploadHandle initialize(Path filePath) throws IOException { + Path collectorPath = createCollectorPath(filePath); + fs.mkdirs(collectorPath, FsPermission.getDirDefault()); + + ByteBuffer byteBuffer = ByteBuffer.wrap( + collectorPath.toString().getBytes(Charsets.UTF_8)); + return BBUploadHandle.from(byteBuffer); + } + + @Override + public PartHandle putPart(Path filePath, InputStream inputStream, + int partNumber, UploadHandle uploadId, long lengthInBytes) + throws IOException { + + byte[] uploadIdByteArray = uploadId.toByteArray(); + Path collectorPath = new Path(new String(uploadIdByteArray, 0, + uploadIdByteArray.length, Charsets.UTF_8)); + Path partPath = + Path.mergePaths(collectorPath, Path.mergePaths(new Path(Path.SEPARATOR), + new Path(Integer.toString(partNumber) + ".part"))); + FSDataOutputStreamBuilder outputStream = fs.createFile(partPath); + FSDataOutputStream fsDataOutputStream = outputStream.build(); + IOUtils.copy(inputStream, fsDataOutputStream, 4096); + fsDataOutputStream.close(); + return BBPartHandle.from(ByteBuffer.wrap( + partPath.toString().getBytes(Charsets.UTF_8))); + } + + private Path createCollectorPath(Path filePath) { + return Path.mergePaths(filePath.getParent(), + Path.mergePaths(new Path(filePath.getName().split("\\.")[0]), + Path.mergePaths(new Path("_multipart"), + new Path(Path.SEPARATOR)))); + } + + @Override + @SuppressWarnings("deprecation") // rename w/ OVERWRITE + public PathHandle complete(Path filePath, + List> handles, UploadHandle multipartUploadId) + throws IOException { + handles.sort(Comparator.comparing(Pair::getKey)); + List partHandles = handles + .stream() + .map(pair -> { + byte[] byteArray = pair.getValue().toByteArray(); + return new Path(new String(byteArray, 0, byteArray.length, + Charsets.UTF_8)); + }) + .collect(Collectors.toList()); + + Path collectorPath = createCollectorPath(filePath); + Path filePathInsideCollector = Path.mergePaths(collectorPath, + new Path(Path.SEPARATOR + filePath.getName())); + fs.create(filePathInsideCollector).close(); + fs.concat(filePathInsideCollector, + partHandles.toArray(new Path[handles.size()])); + fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE); + fs.delete(collectorPath, true); + FileStatus status = fs.getFileStatus(filePath); + return fs.getPathHandle(status); + } + + @Override + public void abort(Path filePath, UploadHandle uploadId) throws IOException { + byte[] uploadIdByteArray = uploadId.toByteArray(); + Path collectorPath = new Path(new String(uploadIdByteArray, 0, + uploadIdByteArray.length, Charsets.UTF_8)); + fs.delete(collectorPath, true); + } + + /** + * Factory for creating MultipartUploaderFactory objects for file:// + * filesystems. + */ + public static class Factory extends MultipartUploaderFactory { + protected MultipartUploader createMultipartUploader(FileSystem fs, + Configuration conf) { + if (fs.getScheme().equals("file")) { + return new FileSystemMultipartUploader(fs); + } + return null; + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java new file mode 100644 index 0000000000..a6b37b32bb --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import com.google.protobuf.ByteString; +import org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; +import java.util.Optional; + +/** + * Opaque handle to an entity in a FileSystem. + */ +public class LocalFileSystemPathHandle implements PathHandle { + + private final String path; + private final Long mtime; + + public LocalFileSystemPathHandle(String path, Optional mtime) { + this.path = path; + this.mtime = mtime.orElse(null); + } + + public LocalFileSystemPathHandle(ByteBuffer bytes) throws IOException { + if (null == bytes) { + throw new IOException("Missing PathHandle"); + } + LocalFileSystemPathHandleProto p = + LocalFileSystemPathHandleProto.parseFrom(ByteString.copyFrom(bytes)); + path = p.hasPath() ? p.getPath() : null; + mtime = p.hasMtime() ? p.getMtime() : null; + } + + public String getPath() { + return path; + } + + public void verify(FileStatus stat) throws InvalidPathHandleException { + if (null == stat) { + throw new InvalidPathHandleException("Could not resolve handle"); + } + if (mtime != null && mtime != stat.getModificationTime()) { + throw new InvalidPathHandleException("Content changed"); + } + } + + @Override + public ByteBuffer bytes() { + LocalFileSystemPathHandleProto.Builder b = + LocalFileSystemPathHandleProto.newBuilder(); + b.setPath(path); + if (mtime != null) { + b.setMtime(mtime); + } + return b.build().toByteString().asReadOnlyByteBuffer(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + LocalFileSystemPathHandle that = (LocalFileSystemPathHandle) o; + return Objects.equals(path, that.path) && + Objects.equals(mtime, that.mtime); + } + + @Override + public int hashCode() { + return Objects.hash(path, mtime); + } + + @Override + public String toString() { + return "LocalFileSystemPathHandle{" + + "path='" + path + '\'' + + ", mtime=" + mtime + + '}'; + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java new file mode 100644 index 0000000000..24a92169a2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import org.apache.commons.lang3.tuple.Pair; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * MultipartUploader is an interface for copying files multipart and across + * multiple nodes. Users should: + * 1. Initialize an upload + * 2. Upload parts in any order + * 3. Complete the upload in order to have it materialize in the destination FS. + * + * Implementers should make sure that the complete function should make sure + * that 'complete' will reorder parts if the destination FS doesn't already + * do it for them. + */ +public abstract class MultipartUploader { + public static final Logger LOG = + LoggerFactory.getLogger(MultipartUploader.class); + + /** + * Initialize a multipart upload. + * @param filePath Target path for upload. + * @return unique identifier associating part uploads. + * @throws IOException + */ + public abstract UploadHandle initialize(Path filePath) throws IOException; + + /** + * Put part as part of a multipart upload. It should be possible to have + * parts uploaded in any order (or in parallel). + * @param filePath Target path for upload (same as {@link #initialize(Path)}). + * @param inputStream Data for this part. + * @param partNumber Index of the part relative to others. + * @param uploadId Identifier from {@link #initialize(Path)}. + * @param lengthInBytes Target length to read from the stream. + * @return unique PartHandle identifier for the uploaded part. + * @throws IOException + */ + public abstract PartHandle putPart(Path filePath, InputStream inputStream, + int partNumber, UploadHandle uploadId, long lengthInBytes) + throws IOException; + + /** + * Complete a multipart upload. + * @param filePath Target path for upload (same as {@link #initialize(Path)}. + * @param handles Identifiers with associated part numbers from + * {@link #putPart(Path, InputStream, int, UploadHandle, long)}. + * Depending on the backend, the list order may be significant. + * @param multipartUploadId Identifier from {@link #initialize(Path)}. + * @return unique PathHandle identifier for the uploaded file. + * @throws IOException + */ + public abstract PathHandle complete(Path filePath, + List> handles, UploadHandle multipartUploadId) + throws IOException; + + /** + * Aborts a multipart upload. + * @param filePath Target path for upload (same as {@link #initialize(Path)}. + * @param multipartuploadId Identifier from {@link #initialize(Path)}. + * @throws IOException + */ + public abstract void abort(Path filePath, UploadHandle multipartuploadId) + throws IOException; + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java new file mode 100644 index 0000000000..b0fa798ee2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import org.apache.hadoop.conf.Configuration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Iterator; +import java.util.ServiceLoader; + +/** + * {@link ServiceLoader}-driven uploader API for storage services supporting + * multipart uploads. + */ +public abstract class MultipartUploaderFactory { + public static final Logger LOG = + LoggerFactory.getLogger(MultipartUploaderFactory.class); + + /** + * Multipart Uploaders listed as services. + */ + private static ServiceLoader serviceLoader = + ServiceLoader.load(MultipartUploaderFactory.class, + MultipartUploaderFactory.class.getClassLoader()); + + // Iterate through the serviceLoader to avoid lazy loading. + // Lazy loading would require synchronization in concurrent use cases. + static { + Iterator iterServices = serviceLoader.iterator(); + while (iterServices.hasNext()) { + iterServices.next(); + } + } + + public static MultipartUploader get(FileSystem fs, Configuration conf) + throws IOException { + MultipartUploader mpu = null; + for (MultipartUploaderFactory factory : serviceLoader) { + mpu = factory.createMultipartUploader(fs, conf); + if (mpu != null) { + break; + } + } + return mpu; + } + + protected abstract MultipartUploader createMultipartUploader(FileSystem fs, + Configuration conf) throws IOException; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java new file mode 100644 index 0000000000..df70b746cc --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import java.io.Serializable; +import java.nio.ByteBuffer; + +/** + * Opaque, serializable reference to an part id for multipart uploads. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface PartHandle extends Serializable { + /** + * @return Serialized from in bytes. + */ + default byte[] toByteArray() { + ByteBuffer bb = bytes(); + byte[] ret = new byte[bb.remaining()]; + bb.get(ret); + return ret; + } + + ByteBuffer bytes(); + + @Override + boolean equals(Object other); +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index c0f81997b8..bd003ae90a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -40,6 +40,7 @@ import java.nio.file.attribute.FileTime; import java.util.Arrays; import java.util.EnumSet; +import java.util.Optional; import java.util.StringTokenizer; import org.apache.hadoop.classification.InterfaceAudience; @@ -212,7 +213,19 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException { return new FSDataInputStream(new BufferedFSInputStream( new LocalFSFileInputStream(f), bufferSize)); } - + + @Override + public FSDataInputStream open(PathHandle fd, int bufferSize) + throws IOException { + if (!(fd instanceof LocalFileSystemPathHandle)) { + fd = new LocalFileSystemPathHandle(fd.bytes()); + } + LocalFileSystemPathHandle id = (LocalFileSystemPathHandle) fd; + id.verify(getFileStatus(new Path(id.getPath()))); + return new FSDataInputStream(new BufferedFSInputStream( + new LocalFSFileInputStream(new Path(id.getPath())), bufferSize)); + } + /********************************************************* * For create()'s FSOutputStream. *********************************************************/ @@ -246,7 +259,7 @@ private LocalFSFileOutputStream(Path f, boolean append, } } } - + /* * Just forward to the fos */ @@ -350,6 +363,18 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, return out; } + @Override + public void concat(final Path trg, final Path [] psrcs) throws IOException { + final int bufferSize = 4096; + try(FSDataOutputStream out = create(trg)) { + for (Path src : psrcs) { + try(FSDataInputStream in = open(src)) { + IOUtils.copyBytes(in, out, bufferSize, false); + } + } + } + } + @Override public boolean rename(Path src, Path dst) throws IOException { // Attempt rename using Java API. @@ -863,6 +888,38 @@ public void setTimes(Path p, long mtime, long atime) throws IOException { } } + /** + * Hook to implement support for {@link PathHandle} operations. + * @param stat Referent in the target FileSystem + * @param opts Constraints that determine the validity of the + * {@link PathHandle} reference. + */ + protected PathHandle createPathHandle(FileStatus stat, + Options.HandleOpt... opts) { + if (stat.isDirectory() || stat.isSymlink()) { + throw new IllegalArgumentException("PathHandle only available for files"); + } + String authority = stat.getPath().toUri().getAuthority(); + if (authority != null && !authority.equals("file://")) { + throw new IllegalArgumentException("Wrong FileSystem: " + stat.getPath()); + } + Options.HandleOpt.Data data = + Options.HandleOpt.getOpt(Options.HandleOpt.Data.class, opts) + .orElse(Options.HandleOpt.changed(false)); + Options.HandleOpt.Location loc = + Options.HandleOpt.getOpt(Options.HandleOpt.Location.class, opts) + .orElse(Options.HandleOpt.moved(false)); + if (loc.allowChange()) { + throw new UnsupportedOperationException("Tracking file movement in " + + "basic FileSystem is not supported"); + } + final Path p = stat.getPath(); + final Optional mtime = !data.allowChange() + ? Optional.of(stat.getModificationTime()) + : Optional.empty(); + return new LocalFileSystemPathHandle(p.toString(), mtime); + } + @Override public boolean supportsSymlinks() { return true; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnsupportedMultipartUploaderException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnsupportedMultipartUploaderException.java new file mode 100644 index 0000000000..5606a80dec --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnsupportedMultipartUploaderException.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * MultipartUploader for a given file system name/scheme is not supported. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class UnsupportedMultipartUploaderException extends IOException { + private static final long serialVersionUID = 1L; + + /** + * Constructs exception with the specified detail message. + * + * @param message exception message. + */ + public UnsupportedMultipartUploaderException(final String message) { + super(message); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UploadHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UploadHandle.java new file mode 100644 index 0000000000..143b4d1584 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UploadHandle.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import java.io.Serializable; +import java.nio.ByteBuffer; + +/** + * Opaque, serializable reference to an uploadId for multipart uploads. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface UploadHandle extends Serializable { + + /** + * @return Serialized from in bytes. + */ + default byte[] toByteArray() { + ByteBuffer bb = bytes(); + byte[] ret = new byte[bb.remaining()]; + bb.get(ret); + return ret; + } + + ByteBuffer bytes(); + + @Override + boolean equals(Object other); + +} diff --git a/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto b/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto index 5b8c45d0ad..c3b768ab67 100644 --- a/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto +++ b/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto @@ -68,3 +68,11 @@ message FileStatusProto { optional bytes ec_data = 17; optional uint32 flags = 18 [default = 0]; } + +/** + * Placeholder type for consistent basic FileSystem operations. + */ +message LocalFileSystemPathHandleProto { + optional uint64 mtime = 1; + optional string path = 2; +} diff --git a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory new file mode 100644 index 0000000000..f0054fedb8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +org.apache.hadoop.fs.FileSystemMultipartUploader$Factory diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/AbstractSystemMultipartUploaderTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/AbstractSystemMultipartUploaderTest.java new file mode 100644 index 0000000000..f132089a9e --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/AbstractSystemMultipartUploaderTest.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.tuple.Pair; + +import org.junit.Test; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +public abstract class AbstractSystemMultipartUploaderTest { + + abstract FileSystem getFS() throws IOException; + + abstract Path getBaseTestPath(); + + @Test + public void testMultipartUpload() throws Exception { + FileSystem fs = getFS(); + Path file = new Path(getBaseTestPath(), "some-file"); + MultipartUploader mpu = MultipartUploaderFactory.get(fs, null); + UploadHandle uploadHandle = mpu.initialize(file); + List> partHandles = new ArrayList<>(); + StringBuilder sb = new StringBuilder(); + for (int i = 1; i <= 100; ++i) { + String contents = "ThisIsPart" + i + "\n"; + sb.append(contents); + int len = contents.getBytes().length; + InputStream is = IOUtils.toInputStream(contents, "UTF-8"); + PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len); + partHandles.add(Pair.of(i, partHandle)); + } + PathHandle fd = mpu.complete(file, partHandles, uploadHandle); + byte[] fdData = IOUtils.toByteArray(fs.open(fd)); + byte[] fileData = IOUtils.toByteArray(fs.open(file)); + String readString = new String(fdData); + assertEquals(sb.toString(), readString); + assertArrayEquals(fdData, fileData); + } + + @Test + public void testMultipartUploadReverseOrder() throws Exception { + FileSystem fs = getFS(); + Path file = new Path(getBaseTestPath(), "some-file"); + MultipartUploader mpu = MultipartUploaderFactory.get(fs, null); + UploadHandle uploadHandle = mpu.initialize(file); + List> partHandles = new ArrayList<>(); + StringBuilder sb = new StringBuilder(); + for (int i = 1; i <= 100; ++i) { + String contents = "ThisIsPart" + i + "\n"; + sb.append(contents); + } + for (int i = 100; i > 0; --i) { + String contents = "ThisIsPart" + i + "\n"; + int len = contents.getBytes().length; + InputStream is = IOUtils.toInputStream(contents, "UTF-8"); + PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len); + partHandles.add(Pair.of(i, partHandle)); + } + PathHandle fd = mpu.complete(file, partHandles, uploadHandle); + byte[] fdData = IOUtils.toByteArray(fs.open(fd)); + byte[] fileData = IOUtils.toByteArray(fs.open(file)); + String readString = new String(fdData); + assertEquals(sb.toString(), readString); + assertArrayEquals(fdData, fileData); + } + + @Test + public void testMultipartUploadReverseOrderNoNContiguousPartNumbers() + throws Exception { + FileSystem fs = getFS(); + Path file = new Path(getBaseTestPath(), "some-file"); + MultipartUploader mpu = MultipartUploaderFactory.get(fs, null); + UploadHandle uploadHandle = mpu.initialize(file); + List> partHandles = new ArrayList<>(); + StringBuilder sb = new StringBuilder(); + for (int i = 2; i <= 200; i += 2) { + String contents = "ThisIsPart" + i + "\n"; + sb.append(contents); + } + for (int i = 200; i > 0; i -= 2) { + String contents = "ThisIsPart" + i + "\n"; + int len = contents.getBytes().length; + InputStream is = IOUtils.toInputStream(contents, "UTF-8"); + PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len); + partHandles.add(Pair.of(i, partHandle)); + } + PathHandle fd = mpu.complete(file, partHandles, uploadHandle); + byte[] fdData = IOUtils.toByteArray(fs.open(fd)); + byte[] fileData = IOUtils.toByteArray(fs.open(file)); + String readString = new String(fdData); + assertEquals(sb.toString(), readString); + assertArrayEquals(fdData, fileData); + } + + @Test + public void testMultipartUploadAbort() throws Exception { + FileSystem fs = getFS(); + Path file = new Path(getBaseTestPath(), "some-file"); + MultipartUploader mpu = MultipartUploaderFactory.get(fs, null); + UploadHandle uploadHandle = mpu.initialize(file); + for (int i = 100; i >= 50; --i) { + String contents = "ThisIsPart" + i + "\n"; + int len = contents.getBytes().length; + InputStream is = IOUtils.toInputStream(contents, "UTF-8"); + PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len); + } + mpu.abort(file, uploadHandle); + + String contents = "ThisIsPart49\n"; + int len = contents.getBytes().length; + InputStream is = IOUtils.toInputStream(contents, "UTF-8"); + + try { + mpu.putPart(file, is, 49, uploadHandle, len); + fail("putPart should have thrown an exception"); + } catch (IOException ok) { + // ignore + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemMultipartUploader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemMultipartUploader.java new file mode 100644 index 0000000000..21d01b6cdb --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemMultipartUploader.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import org.apache.hadoop.conf.Configuration; +import static org.apache.hadoop.test.GenericTestUtils.getRandomizedTestDir; + +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.File; +import java.io.IOException; + +/** + * Test the FileSystemMultipartUploader on local file system. + */ +public class TestLocalFileSystemMultipartUploader + extends AbstractSystemMultipartUploaderTest { + + private static FileSystem fs; + private File tmp; + + @BeforeClass + public static void init() throws IOException { + fs = LocalFileSystem.getLocal(new Configuration()); + } + + @Before + public void setup() throws IOException { + tmp = getRandomizedTestDir(); + tmp.mkdirs(); + } + + @After + public void tearDown() throws IOException { + tmp.delete(); + } + + @Override + public FileSystem getFS() { + return fs; + } + + @Override + public Path getBaseTestPath() { + return new Path(tmp.getAbsolutePath()); + } + +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java index fbe28c3c24..36cfa6ccda 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java @@ -123,6 +123,12 @@ public void testChanged() throws IOException { HandleOpt.Data data = HandleOpt.getOpt(HandleOpt.Data.class, opts) .orElseThrow(IllegalArgumentException::new); FileStatus stat = testFile(B1); + try { + // Temporary workaround while RawLocalFS supports only second precision + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new IOException(e); + } // modify the file by appending data appendFile(getFileSystem(), stat.getPath(), B2); byte[] b12 = Arrays.copyOf(B1, B1.length + B2.length); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java new file mode 100644 index 0000000000..3c088d278e --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.contract.rawlocal; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.fs.contract.localfs.LocalFSContract; +import org.apache.hadoop.fs.contract.rawlocal.RawlocalFSContract; + +public class TestRawlocalContractPathHandle + extends AbstractContractPathHandleTest { + + public TestRawlocalContractPathHandle(String testname, + Options.HandleOpt[] opts, boolean serialized) { + super(testname, opts, serialized); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RawlocalFSContract(conf); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml b/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml index a0d1d21a94..8cbd4a0abc 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml +++ b/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml @@ -122,4 +122,9 @@ true + + fs.contract.supports-content-check + true + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java new file mode 100644 index 0000000000..e9959c192d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemMultipartUploader; +import org.apache.hadoop.fs.MultipartUploader; +import org.apache.hadoop.fs.MultipartUploaderFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; + +/** + * Support for HDFS multipart uploads, built on + * {@link FileSystem#concat(Path, Path[])}. + */ +public class DFSMultipartUploaderFactory extends MultipartUploaderFactory { + protected MultipartUploader createMultipartUploader(FileSystem fs, + Configuration conf) { + if (fs.getScheme().equals(HdfsConstants.HDFS_URI_SCHEME)) { + return new FileSystemMultipartUploader(fs); + } + return null; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory new file mode 100644 index 0000000000..b153fd9924 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +org.apache.hadoop.hdfs.DFSMultipartUploaderFactory diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSMultipartUploader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSMultipartUploader.java new file mode 100644 index 0000000000..96c50938b3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSMultipartUploader.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestName; + +import java.io.IOException; + +public class TestHDFSMultipartUploader + extends AbstractSystemMultipartUploaderTest { + + private static MiniDFSCluster cluster; + private Path tmp; + + @Rule + public TestName name = new TestName(); + + @BeforeClass + public static void init() throws IOException { + HdfsConfiguration conf = new HdfsConfiguration(); + cluster = new MiniDFSCluster.Builder(conf, + GenericTestUtils.getRandomizedTestDir()) + .numDataNodes(1) + .build(); + cluster.waitClusterUp(); + } + + @AfterClass + public static void cleanup() throws IOException { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + @Before + public void setup() throws IOException { + tmp = new Path(cluster.getFileSystem().getWorkingDirectory(), + name.getMethodName()); + cluster.getFileSystem().mkdirs(tmp); + } + + @Override + public FileSystem getFS() throws IOException { + return cluster.getFileSystem(); + } + + @Override + public Path getBaseTestPath() { + return tmp; + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java new file mode 100644 index 0000000000..34c88d43f6 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.s3a; + +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import com.google.common.base.Charsets; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BBPartHandle; +import org.apache.hadoop.fs.BBUploadHandle; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.MultipartUploader; +import org.apache.hadoop.fs.MultipartUploaderFactory; +import org.apache.hadoop.fs.PartHandle; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathHandle; +import org.apache.hadoop.fs.UploadHandle; +import org.apache.hadoop.hdfs.DFSUtilClient; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.stream.Collectors; + +/** + * MultipartUploader for S3AFileSystem. This uses the S3 multipart + * upload mechanism. + */ +public class S3AMultipartUploader extends MultipartUploader { + + private final S3AFileSystem s3a; + + public S3AMultipartUploader(FileSystem fs, Configuration conf) { + if (!(fs instanceof S3AFileSystem)) { + throw new IllegalArgumentException( + "S3A MultipartUploads must use S3AFileSystem"); + } + s3a = (S3AFileSystem) fs; + } + + @Override + public UploadHandle initialize(Path filePath) throws IOException { + String key = s3a.pathToKey(filePath); + InitiateMultipartUploadRequest request = + new InitiateMultipartUploadRequest(s3a.getBucket(), key); + LOG.debug("initialize request: {}", request); + InitiateMultipartUploadResult result = s3a.initiateMultipartUpload(request); + String uploadId = result.getUploadId(); + return BBUploadHandle.from(ByteBuffer.wrap( + uploadId.getBytes(Charsets.UTF_8))); + } + + @Override + public PartHandle putPart(Path filePath, InputStream inputStream, + int partNumber, UploadHandle uploadId, long lengthInBytes) { + String key = s3a.pathToKey(filePath); + UploadPartRequest request = new UploadPartRequest(); + byte[] uploadIdBytes = uploadId.toByteArray(); + request.setUploadId(new String(uploadIdBytes, 0, uploadIdBytes.length, + Charsets.UTF_8)); + request.setInputStream(inputStream); + request.setPartSize(lengthInBytes); + request.setPartNumber(partNumber); + request.setBucketName(s3a.getBucket()); + request.setKey(key); + LOG.debug("putPart request: {}", request); + UploadPartResult result = s3a.uploadPart(request); + String eTag = result.getETag(); + return BBPartHandle.from(ByteBuffer.wrap(eTag.getBytes(Charsets.UTF_8))); + } + + @Override + public PathHandle complete(Path filePath, + List> handles, UploadHandle uploadId) { + String key = s3a.pathToKey(filePath); + CompleteMultipartUploadRequest request = + new CompleteMultipartUploadRequest(); + request.setBucketName(s3a.getBucket()); + request.setKey(key); + byte[] uploadIdBytes = uploadId.toByteArray(); + request.setUploadId(new String(uploadIdBytes, 0, uploadIdBytes.length, + Charsets.UTF_8)); + List eTags = handles + .stream() + .map(handle -> { + byte[] partEtagBytes = handle.getRight().toByteArray(); + return new PartETag(handle.getLeft(), + new String(partEtagBytes, 0, partEtagBytes.length, + Charsets.UTF_8)); + }) + .collect(Collectors.toList()); + request.setPartETags(eTags); + LOG.debug("Complete request: {}", request); + CompleteMultipartUploadResult completeMultipartUploadResult = + s3a.getAmazonS3Client().completeMultipartUpload(request); + + byte[] eTag = DFSUtilClient.string2Bytes( + completeMultipartUploadResult.getETag()); + return (PathHandle) () -> ByteBuffer.wrap(eTag); + } + + @Override + public void abort(Path filePath, UploadHandle uploadId) { + String key = s3a.pathToKey(filePath); + byte[] uploadIdBytes = uploadId.toByteArray(); + String uploadIdString = new String(uploadIdBytes, 0, uploadIdBytes.length, + Charsets.UTF_8); + AbortMultipartUploadRequest request = new AbortMultipartUploadRequest(s3a + .getBucket(), key, uploadIdString); + LOG.debug("Abort request: {}", request); + s3a.getAmazonS3Client().abortMultipartUpload(request); + } + + /** + * Factory for creating MultipartUploader objects for s3a:// FileSystems. + */ + public static class Factory extends MultipartUploaderFactory { + @Override + protected MultipartUploader createMultipartUploader(FileSystem fs, + Configuration conf) { + if (fs.getScheme().equals("s3a")) { + return new S3AMultipartUploader(fs, conf); + } + return null; + } + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/resources/META-INF/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/org.apache.hadoop.fs.MultipartUploaderFactory new file mode 100644 index 0000000000..2e4bc241d0 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/org.apache.hadoop.fs.MultipartUploaderFactory @@ -0,0 +1,15 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +org.apache.hadoop.fs.s3a.S3AMultipartUploader$Factory diff --git a/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader new file mode 100644 index 0000000000..d16846b25b --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +org.apache.hadoop.fs.s3a.S3AMultipartUploader From f34744603ee93e082e7ba148df1400af5ac7c30c Mon Sep 17 00:00:00 2001 From: Chris Douglas Date: Sun, 17 Jun 2018 23:12:18 -0700 Subject: [PATCH 17/70] HADOOP-15533. Make WASB listStatus messages consistent. Contributed by Esfandiar Manii --- .../java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java index e05327e4b3..dfc881ae5c 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java @@ -2886,7 +2886,7 @@ public FileStatus[] listStatus(Path f) throws FileNotFoundException, IOException // There is no metadata found for the path. LOG.debug("Did not find any metadata for path: {}", key); - throw new FileNotFoundException("File" + f + " does not exist."); + throw new FileNotFoundException(f + " is not found"); } return status.toArray(new FileStatus[0]); From 2a4632d3d7b82980d10cc90cdfc52afd866cebb8 Mon Sep 17 00:00:00 2001 From: Mukul Kumar Singh Date: Sun, 17 Jun 2018 23:48:33 -0700 Subject: [PATCH 18/70] HDDS-141. Remove PipeLine Class from SCM and move the data field in the Pipeline to ContainerInfo. Contributed by Shashikant Banerjee. --- .../common/helpers/ContainerInfo.java | 32 ++++ .../container/common/helpers/Pipeline.java | 148 +++++++++++------- .../common/helpers/PipelineChannel.java | 124 --------------- hadoop-hdds/common/src/main/proto/hdds.proto | 8 +- .../scm/container/closer/ContainerCloser.java | 6 +- .../hdds/scm/pipelines/PipelineManager.java | 67 ++++---- .../hdds/scm/pipelines/PipelineSelector.java | 11 +- .../scm/pipelines/ratis/RatisManagerImpl.java | 13 +- .../standalone/StandaloneManagerImpl.java | 8 +- .../hdds/scm/block/TestDeletedBlockLog.java | 8 +- .../hadoop/ozone/TestMiniOzoneCluster.java | 8 +- .../ozone/container/ContainerTestHelper.java | 19 +-- .../genesis/BenchMarkContainerStateMap.java | 11 +- .../genesis/BenchMarkDatanodeDispatcher.java | 6 +- .../apache/hadoop/ozone/scm/cli/SQLCLI.java | 4 +- 15 files changed, 194 insertions(+), 279 deletions(-) delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java index 2c38d45728..ee05c8768a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.container.common.helpers; import com.fasterxml.jackson.annotation.JsonAutoDetect; +import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.PropertyAccessor; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; @@ -30,6 +31,7 @@ import org.apache.hadoop.util.Time; import java.io.IOException; +import java.util.Arrays; import java.util.Comparator; import static java.lang.Math.max; @@ -63,6 +65,13 @@ public class ContainerInfo private String owner; private long containerID; private long deleteTransactionId; + /** + * Allows you to maintain private data on ContainerInfo. This is not + * serialized via protobuf, just allows us to maintain some private data. + */ + @JsonIgnore + private byte[] data; + ContainerInfo( long containerID, HddsProtos.LifeCycleState state, @@ -295,6 +304,29 @@ public String toJsonString() throws IOException { return WRITER.writeValueAsString(this); } + /** + * Returns private data that is set on this containerInfo. + * + * @return blob, the user can interpret it any way they like. + */ + public byte[] getData() { + if (this.data != null) { + return Arrays.copyOf(this.data, this.data.length); + } else { + return null; + } + } + + /** + * Set private data on ContainerInfo object. + * + * @param data -- private data. + */ + public void setData(byte[] data) { + if (data != null) { + this.data = Arrays.copyOf(data, data.length); + } + } /** * Builder class for ContainerInfo. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java index 87408385ec..c5794f4c03 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java @@ -27,14 +27,14 @@ import com.fasterxml.jackson.databind.ser.FilterProvider; import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Map; +import java.util.TreeMap; import java.util.List; /** @@ -46,7 +46,7 @@ public class Pipeline { static { ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"data"}; + String[] ignorableFieldNames = {"leaderID", "datanodes"}; FilterProvider filters = new SimpleFilterProvider() .addFilter(PIPELINE_INFO, SimpleBeanPropertyFilter .serializeAllExcept(ignorableFieldNames)); @@ -57,38 +57,66 @@ public class Pipeline { WRITER = mapper.writer(filters); } - private PipelineChannel pipelineChannel; - /** - * Allows you to maintain private data on pipelines. This is not serialized - * via protobuf, just allows us to maintain some private data. - */ @JsonIgnore - private byte[] data; + private String leaderID; + @JsonIgnore + private Map datanodes; + private HddsProtos.LifeCycleState lifeCycleState; + private HddsProtos.ReplicationType type; + private HddsProtos.ReplicationFactor factor; + private String name; + // TODO: change to long based id + //private long id; + /** * Constructs a new pipeline data structure. * - * @param pipelineChannel - transport information for this container + * @param leaderID - Leader datanode id + * @param lifeCycleState - Pipeline State + * @param replicationType - Replication protocol + * @param replicationFactor - replication count on datanodes + * @param name - pipelineName */ - public Pipeline(PipelineChannel pipelineChannel) { - this.pipelineChannel = pipelineChannel; - data = null; + public Pipeline(String leaderID, HddsProtos.LifeCycleState lifeCycleState, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, String name) { + this.leaderID = leaderID; + this.lifeCycleState = lifeCycleState; + this.type = replicationType; + this.factor = replicationFactor; + this.name = name; + datanodes = new TreeMap<>(); } /** * Gets pipeline object from protobuf. * - * @param pipeline - ProtoBuf definition for the pipeline. + * @param pipelineProto - ProtoBuf definition for the pipeline. * @return Pipeline Object */ - public static Pipeline getFromProtoBuf(HddsProtos.Pipeline pipeline) { - Preconditions.checkNotNull(pipeline); - PipelineChannel pipelineChannel = - PipelineChannel.getFromProtoBuf(pipeline.getPipelineChannel()); - return new Pipeline(pipelineChannel); + public static Pipeline getFromProtoBuf( + HddsProtos.Pipeline pipelineProto) { + Preconditions.checkNotNull(pipelineProto); + Pipeline pipeline = + new Pipeline(pipelineProto.getLeaderID(), + pipelineProto.getState(), + pipelineProto.getType(), + pipelineProto.getFactor(), + pipelineProto.getName()); + + for (HddsProtos.DatanodeDetailsProto dataID : + pipelineProto.getMembersList()) { + pipeline.addMember(DatanodeDetails.getFromProtoBuf(dataID)); + } + return pipeline; } + /** + * returns the replication count. + * @return Replication Factor + */ public HddsProtos.ReplicationFactor getFactor() { - return pipelineChannel.getFactor(); + return factor; } /** @@ -98,19 +126,34 @@ public HddsProtos.ReplicationFactor getFactor() { */ @JsonIgnore public DatanodeDetails getLeader() { - return pipelineChannel.getDatanodes().get(pipelineChannel.getLeaderID()); + return getDatanodes().get(leaderID); } + public void addMember(DatanodeDetails datanodeDetails) { + datanodes.put(datanodeDetails.getUuid().toString(), + datanodeDetails); + } + + public Map getDatanodes() { + return datanodes; + } /** * Returns the leader host. * * @return First Machine. */ public String getLeaderHost() { - return pipelineChannel.getDatanodes() - .get(pipelineChannel.getLeaderID()).getHostName(); + return getDatanodes() + .get(leaderID).getHostName(); } + /** + * + * @return lead + */ + public String getLeaderID() { + return leaderID; + } /** * Returns all machines that make up this pipeline. * @@ -118,7 +161,7 @@ public String getLeaderHost() { */ @JsonIgnore public List getMachines() { - return new ArrayList<>(pipelineChannel.getDatanodes().values()); + return new ArrayList<>(getDatanodes().values()); } /** @@ -128,7 +171,7 @@ public List getMachines() { */ public List getDatanodeHosts() { List dataHosts = new ArrayList<>(); - for (DatanodeDetails id : pipelineChannel.getDatanodes().values()) { + for (DatanodeDetails id :getDatanodes().values()) { dataHosts.add(id.getHostName()); } return dataHosts; @@ -143,46 +186,31 @@ public List getDatanodeHosts() { public HddsProtos.Pipeline getProtobufMessage() { HddsProtos.Pipeline.Builder builder = HddsProtos.Pipeline.newBuilder(); - builder.setPipelineChannel(this.pipelineChannel.getProtobufMessage()); + for (DatanodeDetails datanode : datanodes.values()) { + builder.addMembers(datanode.getProtoBufMessage()); + } + builder.setLeaderID(leaderID); + + if (this.getLifeCycleState() != null) { + builder.setState(this.getLifeCycleState()); + } + if (this.getType() != null) { + builder.setType(this.getType()); + } + + if (this.getFactor() != null) { + builder.setFactor(this.getFactor()); + } return builder.build(); } - /** - * Returns private data that is set on this pipeline. - * - * @return blob, the user can interpret it any way they like. - */ - public byte[] getData() { - if (this.data != null) { - return Arrays.copyOf(this.data, this.data.length); - } else { - return null; - } - } - - @VisibleForTesting - public PipelineChannel getPipelineChannel() { - return pipelineChannel; - } - - /** - * Set private data on pipeline. - * - * @param data -- private data. - */ - public void setData(byte[] data) { - if (data != null) { - this.data = Arrays.copyOf(data, data.length); - } - } - /** * Gets the State of the pipeline. * * @return - LifeCycleStates. */ public HddsProtos.LifeCycleState getLifeCycleState() { - return pipelineChannel.getLifeCycleState(); + return lifeCycleState; } /** @@ -191,7 +219,7 @@ public HddsProtos.LifeCycleState getLifeCycleState() { * @return - Name of the pipeline */ public String getPipelineName() { - return pipelineChannel.getName(); + return name; } /** @@ -200,16 +228,16 @@ public String getPipelineName() { * @return type - Standalone, Ratis, Chained. */ public HddsProtos.ReplicationType getType() { - return pipelineChannel.getType(); + return type; } @Override public String toString() { final StringBuilder b = new StringBuilder(getClass().getSimpleName()) .append("["); - pipelineChannel.getDatanodes().keySet().stream() + getDatanodes().keySet().stream() .forEach(id -> b. - append(id.endsWith(pipelineChannel.getLeaderID()) ? "*" + id : id)); + append(id.endsWith(getLeaderID()) ? "*" + id : id)); b.append(" name:").append(getPipelineName()); if (getType() != null) { b.append(" type:").append(getType().toString()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java deleted file mode 100644 index 655751d737..0000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java +++ /dev/null @@ -1,124 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.common.helpers; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; - -import java.util.Map; -import java.util.TreeMap; - -/** - * PipelineChannel information for a {@link Pipeline}. - */ -public class PipelineChannel { - @JsonIgnore - private String leaderID; - @JsonIgnore - private Map datanodes; - private LifeCycleState lifeCycleState; - private ReplicationType type; - private ReplicationFactor factor; - private String name; - // TODO: change to long based id - //private long id; - - public PipelineChannel(String leaderID, LifeCycleState lifeCycleState, - ReplicationType replicationType, ReplicationFactor replicationFactor, - String name) { - this.leaderID = leaderID; - this.lifeCycleState = lifeCycleState; - this.type = replicationType; - this.factor = replicationFactor; - this.name = name; - datanodes = new TreeMap<>(); - } - - public String getLeaderID() { - return leaderID; - } - - public Map getDatanodes() { - return datanodes; - } - - public LifeCycleState getLifeCycleState() { - return lifeCycleState; - } - - public ReplicationType getType() { - return type; - } - - public ReplicationFactor getFactor() { - return factor; - } - - public String getName() { - return name; - } - - public void addMember(DatanodeDetails datanodeDetails) { - datanodes.put(datanodeDetails.getUuid().toString(), - datanodeDetails); - } - - @JsonIgnore - public HddsProtos.PipelineChannel getProtobufMessage() { - HddsProtos.PipelineChannel.Builder builder = - HddsProtos.PipelineChannel.newBuilder(); - for (DatanodeDetails datanode : datanodes.values()) { - builder.addMembers(datanode.getProtoBufMessage()); - } - builder.setLeaderID(leaderID); - - if (this.getLifeCycleState() != null) { - builder.setState(this.getLifeCycleState()); - } - if (this.getType() != null) { - builder.setType(this.getType()); - } - - if (this.getFactor() != null) { - builder.setFactor(this.getFactor()); - } - return builder.build(); - } - - public static PipelineChannel getFromProtoBuf( - HddsProtos.PipelineChannel transportProtos) { - Preconditions.checkNotNull(transportProtos); - PipelineChannel pipelineChannel = - new PipelineChannel(transportProtos.getLeaderID(), - transportProtos.getState(), - transportProtos.getType(), - transportProtos.getFactor(), - transportProtos.getName()); - - for (HddsProtos.DatanodeDetailsProto dataID : - transportProtos.getMembersList()) { - pipelineChannel.addMember(DatanodeDetails.getFromProtoBuf(dataID)); - } - return pipelineChannel; - } -} diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto index a9a703eb00..816efa7c25 100644 --- a/hadoop-hdds/common/src/main/proto/hdds.proto +++ b/hadoop-hdds/common/src/main/proto/hdds.proto @@ -40,7 +40,7 @@ message Port { required uint32 value = 2; } -message PipelineChannel { +message Pipeline { required string leaderID = 1; repeated DatanodeDetailsProto members = 2; optional LifeCycleState state = 3 [default = OPEN]; @@ -49,12 +49,6 @@ message PipelineChannel { optional string name = 6; } -// A pipeline is composed of PipelineChannel (Ratis/StandAlone) that back a -// container. -message Pipeline { - required PipelineChannel pipelineChannel = 2; -} - message KeyValue { required string key = 1; optional string value = 2; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java index 937076cfb7..cbb2ba75c2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java @@ -127,12 +127,12 @@ public void close(HddsProtos.SCMContainerInfo info) { // to SCM. In that case also, data node will ignore this command. HddsProtos.Pipeline pipeline = info.getPipeline(); - for (HddsProtos.DatanodeDetailsProto datanodeDetails : pipeline - .getPipelineChannel().getMembersList()) { + for (HddsProtos.DatanodeDetailsProto datanodeDetails : + pipeline.getMembersList()) { nodeManager.addDatanodeCommand( DatanodeDetails.getFromProtoBuf(datanodeDetails).getUuid(), new CloseContainerCommand(info.getContainerID(), - pipeline.getPipelineChannel().getType())); + pipeline.getType())); } if (!commandIssued.containsKey(info.getContainerID())) { commandIssued.put(info.getContainerID(), diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java index 832fcc669a..48affa4112 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java @@ -17,7 +17,6 @@ package org.apache.hadoop.hdds.scm.pipelines; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; @@ -36,12 +35,12 @@ public abstract class PipelineManager { private static final Logger LOG = LoggerFactory.getLogger(PipelineManager.class); - private final List activePipelineChannels; - private final AtomicInteger conduitsIndex; + private final List activePipelines; + private final AtomicInteger pipelineIndex; public PipelineManager() { - activePipelineChannels = new LinkedList<>(); - conduitsIndex = new AtomicInteger(0); + activePipelines = new LinkedList<>(); + pipelineIndex = new AtomicInteger(0); } /** @@ -59,9 +58,9 @@ public synchronized final Pipeline getPipeline( /** * In the Ozone world, we have a very simple policy. * - * 1. Try to create a pipelineChannel if there are enough free nodes. + * 1. Try to create a pipeline if there are enough free nodes. * - * 2. This allows all nodes to part of a pipelineChannel quickly. + * 2. This allows all nodes to part of a pipeline quickly. * * 3. if there are not enough free nodes, return conduits in a * round-robin fashion. @@ -70,28 +69,28 @@ public synchronized final Pipeline getPipeline( * Create a new placement policy that returns conduits in round robin * fashion. */ - PipelineChannel pipelineChannel = - allocatePipelineChannel(replicationFactor); - if (pipelineChannel != null) { - LOG.debug("created new pipelineChannel:{} for container with " + + Pipeline pipeline = + allocatePipeline(replicationFactor); + if (pipeline != null) { + LOG.debug("created new pipeline:{} for container with " + "replicationType:{} replicationFactor:{}", - pipelineChannel.getName(), replicationType, replicationFactor); - activePipelineChannels.add(pipelineChannel); + pipeline.getPipelineName(), replicationType, replicationFactor); + activePipelines.add(pipeline); } else { - pipelineChannel = - findOpenPipelineChannel(replicationType, replicationFactor); - if (pipelineChannel != null) { - LOG.debug("re-used pipelineChannel:{} for container with " + + pipeline = + findOpenPipeline(replicationType, replicationFactor); + if (pipeline != null) { + LOG.debug("re-used pipeline:{} for container with " + "replicationType:{} replicationFactor:{}", - pipelineChannel.getName(), replicationType, replicationFactor); + pipeline.getPipelineName(), replicationType, replicationFactor); } } - if (pipelineChannel == null) { - LOG.error("Get pipelineChannel call failed. We are not able to find" + - "free nodes or operational pipelineChannel."); + if (pipeline == null) { + LOG.error("Get pipeline call failed. We are not able to find" + + "free nodes or operational pipeline."); return null; } else { - return new Pipeline(pipelineChannel); + return pipeline; } } @@ -106,19 +105,19 @@ protected int getReplicationCount(ReplicationFactor factor) { } } - public abstract PipelineChannel allocatePipelineChannel( + public abstract Pipeline allocatePipeline( ReplicationFactor replicationFactor) throws IOException; /** - * Find a PipelineChannel that is operational. + * Find a Pipeline that is operational. * * @return - Pipeline or null */ - private PipelineChannel findOpenPipelineChannel( + private Pipeline findOpenPipeline( ReplicationType type, ReplicationFactor factor) { - PipelineChannel pipelineChannel = null; + Pipeline pipeline = null; final int sentinal = -1; - if (activePipelineChannels.size() == 0) { + if (activePipelines.size() == 0) { LOG.error("No Operational conduits found. Returning null."); return null; } @@ -126,26 +125,26 @@ private PipelineChannel findOpenPipelineChannel( int nextIndex = sentinal; for (; startIndex != nextIndex; nextIndex = getNextIndex()) { // Just walk the list in a circular way. - PipelineChannel temp = - activePipelineChannels + Pipeline temp = + activePipelines .get(nextIndex != sentinal ? nextIndex : startIndex); - // if we find an operational pipelineChannel just return that. + // if we find an operational pipeline just return that. if ((temp.getLifeCycleState() == LifeCycleState.OPEN) && (temp.getFactor() == factor) && (temp.getType() == type)) { - pipelineChannel = temp; + pipeline = temp; break; } } - return pipelineChannel; + return pipeline; } /** - * gets the next index of the PipelineChannel to get. + * gets the next index of the Pipeline to get. * * @return index in the link list to get. */ private int getNextIndex() { - return conduitsIndex.incrementAndGet() % activePipelineChannels.size(); + return pipelineIndex.incrementAndGet() % activePipelines.size(); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java index 2e56043c6b..508ca9bd3b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.scm.container.placement.algorithms .ContainerPlacementPolicy; import org.apache.hadoop.hdds.scm.container.placement.algorithms @@ -85,20 +84,20 @@ public PipelineSelector(NodeManager nodeManager, Configuration conf) { * The first of the list will be the leader node. * @return pipeline corresponding to nodes */ - public static PipelineChannel newPipelineFromNodes( + public static Pipeline newPipelineFromNodes( List nodes, LifeCycleState state, ReplicationType replicationType, ReplicationFactor replicationFactor, String name) { Preconditions.checkNotNull(nodes); Preconditions.checkArgument(nodes.size() > 0); String leaderId = nodes.get(0).getUuidString(); - PipelineChannel - pipelineChannel = new PipelineChannel(leaderId, state, replicationType, + Pipeline + pipeline = new Pipeline(leaderId, state, replicationType, replicationFactor, name); for (DatanodeDetails node : nodes) { - pipelineChannel.addMember(node); + pipeline.addMember(node); } - return pipelineChannel; + return pipeline; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java index 70489b9253..ace8758234 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.scm.container.placement.algorithms .ContainerPlacementPolicy; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -68,12 +67,12 @@ public RatisManagerImpl(NodeManager nodeManager, } /** - * Allocates a new ratis PipelineChannel from the free nodes. + * Allocates a new ratis Pipeline from the free nodes. * * @param factor - One or Three * @return PipelineChannel. */ - public PipelineChannel allocatePipelineChannel(ReplicationFactor factor) { + public Pipeline allocatePipeline(ReplicationFactor factor) { List newNodesList = new LinkedList<>(); List datanodes = nodeManager.getNodes(NodeState.HEALTHY); int count = getReplicationCount(factor); @@ -87,22 +86,20 @@ public PipelineChannel allocatePipelineChannel(ReplicationFactor factor) { // once a datanode has been added to a pipeline, exclude it from // further allocations ratisMembers.addAll(newNodesList); - LOG.info("Allocating a new ratis pipelineChannel of size: {}", count); + LOG.info("Allocating a new ratis pipeline of size: {}", count); // Start all channel names with "Ratis", easy to grep the logs. String conduitName = PREFIX + UUID.randomUUID().toString().substring(PREFIX.length()); - PipelineChannel pipelineChannel = + Pipeline pipeline= PipelineSelector.newPipelineFromNodes(newNodesList, LifeCycleState.OPEN, ReplicationType.RATIS, factor, conduitName); - Pipeline pipeline = - new Pipeline(pipelineChannel); try (XceiverClientRatis client = XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) { client.createPipeline(pipeline.getPipelineName(), newNodesList); } catch (IOException e) { return null; } - return pipelineChannel; + return pipeline; } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java index 8268329351..e76027fb2b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java @@ -17,7 +17,7 @@ package org.apache.hadoop.hdds.scm.pipelines.standalone; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.placement.algorithms .ContainerPlacementPolicy; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -67,12 +67,12 @@ public StandaloneManagerImpl(NodeManager nodeManager, /** - * Allocates a new standalone PipelineChannel from the free nodes. + * Allocates a new standalone Pipeline from the free nodes. * * @param factor - One - * @return PipelineChannel. + * @return Pipeline. */ - public PipelineChannel allocatePipelineChannel(ReplicationFactor factor) { + public Pipeline allocatePipeline(ReplicationFactor factor) { List newNodesList = new LinkedList<>(); List datanodes = nodeManager.getNodes(NodeState.HEALTHY); int count = getReplicationCount(factor); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index adb212a409..d06d568ae0 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.scm.container.Mapping; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -357,11 +356,10 @@ public void testDeletedBlockTransactions() throws IOException { private void mockContainerInfo(Mapping mappingService, long containerID, DatanodeDetails dd) throws IOException { - PipelineChannel pipelineChannel = - new PipelineChannel("fake", LifeCycleState.OPEN, + Pipeline pipeline = + new Pipeline("fake", LifeCycleState.OPEN, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, "fake"); - pipelineChannel.addMember(dd); - Pipeline pipeline = new Pipeline(pipelineChannel); + pipeline.addMember(dd); ContainerInfo.Builder builder = new ContainerInfo.Builder(); builder.setPipeline(pipeline); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index 0254984d23..50cdd54858 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.XceiverClient; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.TestGenericTestUtils; @@ -92,13 +91,12 @@ public void testStartMultipleDatanodes() throws Exception { for(HddsDatanodeService dn : datanodes) { // Create a single member pipe line DatanodeDetails datanodeDetails = dn.getDatanodeDetails(); - final PipelineChannel pipelineChannel = - new PipelineChannel(datanodeDetails.getUuidString(), + final Pipeline pipeline = + new Pipeline(datanodeDetails.getUuidString(), HddsProtos.LifeCycleState.OPEN, HddsProtos.ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor.ONE, "test"); - pipelineChannel.addMember(datanodeDetails); - Pipeline pipeline = new Pipeline(pipelineChannel); + pipeline.addMember(datanodeDetails); // Verify client is able to connect to the container try (XceiverClient client = new XceiverClient(pipeline, conf)){ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 7046132f6f..459da2ebf8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.KeyData; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; @@ -136,14 +135,14 @@ public static Pipeline createPipeline( Preconditions.checkArgument(i.hasNext()); final DatanodeDetails leader = i.next(); String pipelineName = "TEST-" + UUID.randomUUID().toString().substring(3); - final PipelineChannel pipelineChannel = - new PipelineChannel(leader.getUuidString(), LifeCycleState.OPEN, + final Pipeline pipeline = + new Pipeline(leader.getUuidString(), LifeCycleState.OPEN, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName); - pipelineChannel.addMember(leader); + pipeline.addMember(leader); for(; i.hasNext();) { - pipelineChannel.addMember(i.next()); + pipeline.addMember(i.next()); } - return new Pipeline(pipelineChannel); + return pipeline; } /** @@ -207,8 +206,6 @@ public static ContainerCommandRequestProto getWriteChunkRequest( ContainerProtos.WriteChunkRequestProto .newBuilder(); - Pipeline newPipeline = - new Pipeline(pipeline.getPipelineChannel()); writeRequest.setBlockID(blockID.getDatanodeBlockIDProtobuf()); byte[] data = getData(datalen); @@ -223,7 +220,7 @@ public static ContainerCommandRequestProto getWriteChunkRequest( request.setCmdType(ContainerProtos.Type.WriteChunk); request.setWriteChunk(writeRequest); request.setTraceID(UUID.randomUUID().toString()); - request.setDatanodeUuid(newPipeline.getLeader().getUuidString()); + request.setDatanodeUuid(pipeline.getLeader().getUuidString()); return request.build(); } @@ -241,8 +238,6 @@ public static ContainerCommandRequestProto getWriteSmallFileRequest( throws Exception { ContainerProtos.PutSmallFileRequestProto.Builder smallFileRequest = ContainerProtos.PutSmallFileRequestProto.newBuilder(); - Pipeline newPipeline = - new Pipeline(pipeline.getPipelineChannel()); byte[] data = getData(dataLen); ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, dataLen); setDataChecksum(info, data); @@ -266,7 +261,7 @@ public static ContainerCommandRequestProto getWriteSmallFileRequest( request.setCmdType(ContainerProtos.Type.PutSmallFile); request.setPutSmallFile(smallFileRequest); request.setTraceID(UUID.randomUUID().toString()); - request.setDatanodeUuid(newPipeline.getLeader().getUuidString()); + request.setDatanodeUuid(pipeline.getLeader().getUuidString()); return request.build(); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java index 1b1153b18a..375450ca09 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.util.Time; @@ -150,14 +149,14 @@ public static Pipeline createPipeline(String containerName, Preconditions.checkArgument(i.hasNext()); final DatanodeDetails leader = i.next(); String pipelineName = "TEST-" + UUID.randomUUID().toString().substring(5); - final PipelineChannel pipelineChannel = - new PipelineChannel(leader.getUuidString(), OPEN, + final Pipeline pipeline = + new Pipeline(leader.getUuidString(), OPEN, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName); - pipelineChannel.addMember(leader); + pipeline.addMember(leader); for (; i.hasNext();) { - pipelineChannel.addMember(i.next()); + pipeline.addMember(i.next()); } - return new Pipeline(pipelineChannel); + return pipeline; } @Benchmark diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java index 13b04c31ee..3d4426f82b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.genesis; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.ratis.shaded.com.google.protobuf.ByteString; import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.io.FileUtils; @@ -32,7 +33,6 @@ import org.apache.hadoop.ozone.container.common.impl.KeyManagerImpl; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.util.Time; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Level; @@ -78,7 +78,7 @@ public class BenchMarkDatanodeDispatcher { private String baseDir; private String datanodeUuid; private Dispatcher dispatcher; - private PipelineChannel pipelineChannel; + private Pipeline pipeline; private ByteString data; private Random random; private AtomicInteger containerCount; @@ -96,7 +96,7 @@ public class BenchMarkDatanodeDispatcher { @Setup(Level.Trial) public void initialize() throws IOException { datanodeUuid = UUID.randomUUID().toString(); - pipelineChannel = new PipelineChannel("127.0.0.1", + pipeline = new Pipeline("127.0.0.1", LifeCycleState.OPEN, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, "SA-" + UUID.randomUUID()); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java index d4ac994cff..2bd43fb93a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java @@ -519,11 +519,11 @@ private void insertContainerDB(Connection conn, long containerID, LOG.info("Insert to sql container db, for container {}", containerID); String insertContainerInfo = String.format( INSERT_CONTAINER_INFO, containerID, - pipeline.getPipelineChannel().getLeaderID()); + pipeline.getLeaderID()); executeSQL(conn, insertContainerInfo); for (HddsProtos.DatanodeDetailsProto dd : - pipeline.getPipelineChannel().getMembersList()) { + pipeline.getMembersList()) { String uuid = dd.getUuid(); if (!uuidChecked.contains(uuid)) { // we may also not use this checked set, but catch exception instead From fba9d7cd746cd7b659d2fd9d2bfa23266be9009b Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Mon, 18 Jun 2018 10:17:07 -0700 Subject: [PATCH 19/70] HDFS-13621. Upgrade commons-lang version to 3.7 in hadoop-hdfs-project. Contributed by Takanobu Asanuma. --- .../src/main/java/org/apache/hadoop/fs/XAttr.java | 4 ++-- .../java/org/apache/hadoop/hdfs/ExtendedBlockId.java | 4 ++-- .../hadoop/hdfs/client/impl/BlockReaderFactory.java | 2 +- .../protocol/AddErasureCodingPolicyResponse.java | 4 ++-- .../hadoop/hdfs/protocol/CacheDirectiveInfo.java | 4 ++-- .../apache/hadoop/hdfs/protocol/CachePoolInfo.java | 4 ++-- .../apache/hadoop/hdfs/protocol/EncryptionZone.java | 4 ++-- .../hadoop/hdfs/protocol/ErasureCodingPolicy.java | 4 ++-- .../hdfs/protocol/ErasureCodingPolicyInfo.java | 4 ++-- .../apache/hadoop/hdfs/protocol/ExtendedBlock.java | 2 +- .../hdfs/shortcircuit/DfsClientShmManager.java | 2 +- .../hadoop/hdfs/shortcircuit/ShortCircuitCache.java | 2 +- .../hadoop/hdfs/shortcircuit/ShortCircuitShm.java | 4 ++-- .../namenode/ha/TestRequestHedgingProxyProvider.java | 2 +- hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml | 5 ----- .../server/federation/router/ConnectionPoolId.java | 2 +- .../federation/router/RemoteLocationContext.java | 2 +- .../store/driver/impl/StateStoreFileImpl.java | 2 +- .../server/federation/store/records/MountTable.java | 2 +- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 5 ----- .../apache/hadoop/hdfs/protocol/CacheDirective.java | 2 +- .../hdfs/qjournal/server/GetJournalEditServlet.java | 4 ++-- .../apache/hadoop/hdfs/qjournal/server/Journal.java | 12 ++++++------ .../hdfs/server/datanode/DirectoryScanner.java | 2 +- .../datanode/fsdataset/impl/FsDatasetCache.java | 2 +- .../hdfs/server/diskbalancer/command/Command.java | 4 ++-- .../server/diskbalancer/command/PlanCommand.java | 4 ++-- .../server/diskbalancer/command/ReportCommand.java | 4 ++-- .../hdfs/server/namenode/EncryptionZoneManager.java | 4 ++-- .../hdfs/server/namenode/FSDirErasureCodingOp.java | 2 +- .../hadoop/hdfs/server/namenode/FSNamesystem.java | 6 +++--- .../hdfs/server/namenode/INodeAttributeProvider.java | 2 +- .../hdfs/server/namenode/startupprogress/Step.java | 6 +++--- .../hdfs/server/namenode/top/metrics/TopMetrics.java | 2 +- .../org/apache/hadoop/hdfs/tools/CacheAdmin.java | 2 +- .../org/apache/hadoop/hdfs/util/EnumCounters.java | 2 +- .../hadoop/fs/TestEnhancedByteBufferAccess.java | 4 ++-- .../java/org/apache/hadoop/fs/TestGlobPaths.java | 2 +- .../fs/TestWebHdfsFileContextMainOperations.java | 2 +- .../java/org/apache/hadoop/hdfs/DFSTestUtil.java | 4 ++-- .../java/org/apache/hadoop/hdfs/TestDFSShell.java | 2 +- .../org/apache/hadoop/hdfs/TestDecommission.java | 2 +- .../apache/hadoop/hdfs/TestFsShellPermission.java | 2 +- .../apache/hadoop/hdfs/TestHDFSPolicyProvider.java | 2 +- .../apache/hadoop/hdfs/TestLeaseRecoveryStriped.java | 2 +- .../datatransfer/sasl/SaslDataTransferTestCase.java | 2 +- .../hadoop/hdfs/server/balancer/TestBalancer.java | 2 +- .../hdfs/server/datanode/SimulatedFSDataset.java | 2 +- .../command/TestDiskBalancerCommand.java | 2 +- .../hadoop/hdfs/server/namenode/FSImageTestUtil.java | 2 +- .../hadoop/hdfs/server/namenode/NameNodeAdapter.java | 2 +- .../hadoop/hdfs/server/namenode/TestAuditLogger.java | 2 +- .../hdfs/server/namenode/TestCacheDirectives.java | 2 +- .../server/namenode/TestEditLogJournalFailures.java | 2 +- .../hdfs/shortcircuit/TestShortCircuitCache.java | 2 +- .../org/apache/hadoop/hdfs/tools/TestDFSAdmin.java | 2 +- .../java/org/apache/hadoop/tracing/TestTracing.java | 2 +- 57 files changed, 80 insertions(+), 90 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java index ad7b0569f4..de9bbdab7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java @@ -19,8 +19,8 @@ import java.util.Arrays; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java index fe39df6305..5dfcc736b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java index e83c8ae92b..a8c73a4220 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java @@ -31,7 +31,7 @@ import com.google.common.io.ByteArrayDataOutput; import com.google.common.io.ByteStreams; -import org.apache.commons.lang.mutable.MutableBoolean; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java index dc77a47a94..f873b84c8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.protocol; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.HadoopIllegalArgumentException; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java index d8a7de2b7b..e80f12aa0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java @@ -19,8 +19,8 @@ import java.util.Date; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java index daa77be118..6c9f27796a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java @@ -22,8 +22,8 @@ import javax.annotation.Nullable; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.InvalidRequestException; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java index f1441b5727..0b851caff8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.protocol; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.crypto.CipherSuite; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java index 39489b479c..3559ab97d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.protocol; import com.google.common.base.Preconditions; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java index a5b95cb217..c8a2722621 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.protocol; import com.google.common.base.Preconditions; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import java.io.Serializable; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java index 7939662ee3..8413c84df9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java index 6f8a8fa8f2..2262003112 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java @@ -29,7 +29,7 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; -import org.apache.commons.lang.mutable.MutableBoolean; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.net.DomainPeer; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java index c2f0350bc3..9c2d2e0ecb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java @@ -34,7 +34,7 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.collections.map.LinkedMap; -import org.apache.commons.lang.mutable.MutableBoolean; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java index fb0e06f4ac..b9fcadae52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java @@ -25,8 +25,8 @@ import java.util.NoSuchElementException; import java.util.Random; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.io.nativeio.NativeIO; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java index 8913f1a5ea..5c33ef6f6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java @@ -28,7 +28,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.ClientProtocol; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index e9525e21b5..42d2c008a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -123,11 +123,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-io compile - - commons-lang - commons-lang - compile - commons-logging commons-logging diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java index 458fec203f..868476a826 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java @@ -24,7 +24,7 @@ import java.util.List; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.security.UserGroupInformation; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java index 0959eaa34a..cf78be3190 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.router; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; /** * Base class for objects that are unique to a namespace. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java index 6b288b3555..60dbcdc10e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java @@ -30,7 +30,7 @@ import java.util.Collections; import java.util.List; -import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java index 005882ebdf..49cdf10364 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java @@ -26,7 +26,7 @@ import java.util.SortedMap; import java.util.TreeMap; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.HdfsConstants; diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index eaf9361e9f..fcd5ae1940 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -113,11 +113,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-io compile - - commons-lang - commons-lang - compile - commons-logging commons-logging diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java index 89cf641a02..f8987a367b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java @@ -21,7 +21,7 @@ import java.util.Date; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSUtil; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java index e96fd4da60..64ac11ca23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java @@ -31,7 +31,7 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import org.apache.commons.lang.StringEscapeUtils; +import org.apache.commons.lang3.StringEscapeUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -157,7 +157,7 @@ private boolean checkStorageInfoOrSendError(JNStorage storage, int myNsId = storage.getNamespaceID(); String myClusterId = storage.getClusterID(); - String theirStorageInfoString = StringEscapeUtils.escapeHtml( + String theirStorageInfoString = StringEscapeUtils.escapeHtml4( request.getParameter(STORAGEINFO_PARAM)); if (theirStorageInfoString != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 452664a947..8f25d260b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -31,7 +31,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import org.apache.commons.lang.math.LongRange; +import org.apache.commons.lang3.Range; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -842,8 +842,8 @@ public synchronized void acceptRecovery(RequestInfo reqInfo, // Paranoid sanity check: if the new log is shorter than the log we // currently have, we should not end up discarding any transactions // which are already Committed. - if (txnRange(currentSegment).containsLong(committedTxnId.get()) && - !txnRange(segment).containsLong(committedTxnId.get())) { + if (txnRange(currentSegment).contains(committedTxnId.get()) && + !txnRange(segment).contains(committedTxnId.get())) { throw new AssertionError( "Cannot replace segment " + TextFormat.shortDebugString(currentSegment) + @@ -862,7 +862,7 @@ public synchronized void acceptRecovery(RequestInfo reqInfo, // If we're shortening the log, update our highest txid // used for lag metrics. - if (txnRange(currentSegment).containsLong(highestWrittenTxId)) { + if (txnRange(currentSegment).contains(highestWrittenTxId)) { updateHighestWrittenTxId(segment.getEndTxId()); } } @@ -906,10 +906,10 @@ public synchronized void acceptRecovery(RequestInfo reqInfo, TextFormat.shortDebugString(newData) + " ; journal id: " + journalId); } - private LongRange txnRange(SegmentStateProto seg) { + private Range txnRange(SegmentStateProto seg) { Preconditions.checkArgument(seg.hasEndTxId(), "invalid segment: %s ; journal id: %s", seg, journalId); - return new LongRange(seg.getStartTxId(), seg.getEndTxId()); + return Range.between(seg.getStartTxId(), seg.getEndTxId()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 89f7c5d5da..39665e3e95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -38,7 +38,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; -import org.apache.commons.lang.time.FastDateFormat; +import org.apache.commons.lang3.time.FastDateFormat; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index f70d4afe29..767b150e1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -43,7 +43,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.time.DurationFormatUtils; +import org.apache.commons.lang3.time.DurationFormatUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.ChecksumException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java index 8eacdecf7b..968a5a77f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java @@ -26,8 +26,8 @@ import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.text.StrBuilder; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.text.StrBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.CommonConfigurationKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java index b765885e0f..90cc0c4800 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java @@ -22,8 +22,8 @@ import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.text.StrBuilder; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.text.StrBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java index 58ef5ce51a..5f4e0f716f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java @@ -24,8 +24,8 @@ import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.text.StrBuilder; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.text.StrBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index d06cd1cdef..5604a218d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -34,8 +34,8 @@ import com.google.common.collect.Lists; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.protobuf.InvalidProtocolBufferException; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoProtocolVersion; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 7160b861f7..769c13757b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -19,7 +19,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.XAttr; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index a8c1926051..f94f6d072b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.commons.lang.StringEscapeUtils.escapeJava; +import static org.apache.commons.lang3.StringEscapeUtils.escapeJava; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT; @@ -1827,7 +1827,7 @@ public BatchedListEntries getFilesBlockingDecom(long prevId, INodeFile inodeFile = ucFile.asFile(); String fullPathName = inodeFile.getFullPathName(); - if (org.apache.commons.lang.StringUtils.isEmpty(path) + if (org.apache.commons.lang3.StringUtils.isEmpty(path) || fullPathName.startsWith(path)) { openFileEntries.add(new OpenFileEntry(inodeFile.getId(), inodeFile.getFullPathName(), @@ -2383,7 +2383,7 @@ private HdfsFileStatus startFileInt(String src, boolean shouldReplicate = flag.contains(CreateFlag.SHOULD_REPLICATE); if (shouldReplicate && - (!org.apache.commons.lang.StringUtils.isEmpty(ecPolicyName))) { + (!org.apache.commons.lang3.StringUtils.isEmpty(ecPolicyName))) { throw new HadoopIllegalArgumentException("SHOULD_REPLICATE flag and " + "ecPolicyName are exclusive parameters. Set both is not allowed!"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java index 2f9bc370da..8392463d94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java index 9b23e09e49..0baf99d994 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java @@ -18,9 +18,9 @@ import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.lang.builder.CompareToBuilder; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.CompareToBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java index 2719c8857e..4d61d0f95b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode.top.metrics; import com.google.common.collect.Lists; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index d8cbfc6b2e..9781ea14dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -22,7 +22,7 @@ import java.util.LinkedList; import java.util.List; -import org.apache.commons.lang.WordUtils; +import org.apache.commons.lang3.text.WordUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java index bec44a99e9..280a2d775c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.util; import com.google.common.base.Preconditions; -import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang3.ArrayUtils; import java.util.Arrays; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java index 9cd46c191d..417d31ba52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java @@ -35,8 +35,8 @@ import java.util.concurrent.TimeoutException; import org.apache.commons.collections.map.LinkedMap; -import org.apache.commons.lang.SystemUtils; -import org.apache.commons.lang.mutable.MutableBoolean; +import org.apache.commons.lang3.SystemUtils; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java index 32d960ad6f..7027f3bc6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java @@ -26,7 +26,7 @@ import java.util.regex.Pattern; import com.google.common.collect.Ordering; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.HdfsConfiguration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java index 72fc6e6274..7544835c7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 63199f31dd..e6a2a00252 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -81,7 +81,6 @@ import com.google.common.collect.Maps; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.UnhandledException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -2278,7 +2277,8 @@ public Boolean get() { ", current value = " + currentValue); return currentValue == expectedValue; } catch (Exception e) { - throw new UnhandledException("Test failed due to unexpected exception", e); + throw new RuntimeException( + "Test failed due to unexpected exception", e); } } }, 1000, 60000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index c352dc99a2..b19bdeab57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -37,7 +37,7 @@ import com.google.common.base.Supplier; import com.google.common.collect.Lists; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.log4j.Level; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index c0a595bcb7..42b4257d71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -38,7 +38,7 @@ import com.google.common.base.Supplier; import com.google.common.collect.Lists; -import org.apache.commons.lang.text.StrBuilder; +import org.apache.commons.lang3.text.StrBuilder; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java index cc456b244f..7aa9f2362d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java @@ -28,7 +28,7 @@ import java.util.ArrayList; import java.util.Arrays; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java index e2426907ca..3463f57379 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java @@ -28,7 +28,7 @@ import java.util.Set; import com.google.common.collect.Sets; -import org.apache.commons.lang.ClassUtils; +import org.apache.commons.lang3.ClassUtils; import org.apache.hadoop.hdfs.qjournal.server.JournalNodeRpcServer; import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer; import org.apache.hadoop.hdfs.server.datanode.DataNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java index 0b6bc6adfd..c87a6d17e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java @@ -19,7 +19,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Supplier; -import org.apache.commons.lang.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java index d03d095399..63ce45b72d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java @@ -35,7 +35,7 @@ import java.io.File; import java.util.Properties; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.http.HttpConfig; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index fa026f0499..35ebe781ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -73,7 +73,7 @@ import java.util.Set; import java.util.concurrent.TimeoutException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 987ba97d64..eb9461f746 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -38,7 +38,7 @@ import javax.management.StandardMBean; import com.google.common.math.LongMath; -import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DF; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java index dee2a905c8..f2e998e20e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java @@ -43,7 +43,7 @@ import java.util.List; import java.util.Scanner; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 7be645f36e..f990c5eb6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -42,7 +42,7 @@ import java.util.Properties; import java.util.Set; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java index 3b601d5b4c..b85527a948 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java @@ -24,7 +24,7 @@ import java.io.IOException; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.commons.lang.reflect.FieldUtils; +import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index 5b4f1f491b..76cc9063da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.collect.Lists; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index c58e090333..551670e1d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -41,7 +41,7 @@ import java.util.LinkedList; import java.util.List; -import org.apache.commons.lang.time.DateUtils; +import org.apache.commons.lang3.time.DateUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java index 28169bbf16..1e8ee9c555 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java @@ -29,7 +29,7 @@ import java.util.ArrayList; import java.util.Collection; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java index 5da6a25055..4e2cedef56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java @@ -36,7 +36,7 @@ import net.jcip.annotations.NotThreadSafe; import org.apache.commons.collections.map.LinkedMap; -import org.apache.commons.lang.mutable.MutableBoolean; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 647327cc33..12452473d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -27,7 +27,7 @@ import com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.text.StrBuilder; +import org.apache.commons.lang3.text.StrBuilder; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java index bdad46a4e6..04c85a12a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java @@ -21,7 +21,7 @@ import java.nio.ByteBuffer; import java.util.List; import java.util.Map; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; From 2c87ec5affefeb1dc794c4eaae685a4e544f1841 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Mon, 18 Jun 2018 14:28:22 -0400 Subject: [PATCH 20/70] HADOOP-15527. Improve delay check for stopping processes. Contributed by Vinod Kumar Vavilapalli --- .../test/scripts/process_with_sigterm_trap.sh | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 hadoop-common/src/test/scripts/process_with_sigterm_trap.sh diff --git a/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh b/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh new file mode 100644 index 0000000000..d7c7427b70 --- /dev/null +++ b/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +trap "echo SIGTERM trapped!" SIGTERM +trap "echo SIGINT trapped!" SIGINT + +echo "$$" > "$1" + +while true; do + sleep 1.3 +done From 1e94e5977f9075af2f74e30a3b8e52f7ded67863 Mon Sep 17 00:00:00 2001 From: Vidura Mudalige Date: Thu, 14 Jun 2018 21:23:01 +0530 Subject: [PATCH 21/70] MAPREDUCE-7063. Fix log level inconsistency in CombineFileInputFormat.java Signed-off-by: Akira Ajisaka --- .../hadoop/mapreduce/lib/input/CombineFileInputFormat.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java index c7a737cf87..b16e127292 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java @@ -425,8 +425,8 @@ void createSplits(Map> nodeToBlocks, if (completedNodes.size() == totalNodes || totalLength == 0) { // All nodes have been walked over and marked as completed or all blocks // have been assigned. The rest should be handled via rackLock assignment. - LOG.info("DEBUG: Terminated node allocation with : CompletedNodes: " - + completedNodes.size() + ", size left: " + totalLength); + LOG.debug("Terminated node allocation with : CompletedNodes: {}, size left: {}", + completedNodes.size(), totalLength); break; } } From f386e78a4bc74e7c247b179c7d4ec27310fda4d3 Mon Sep 17 00:00:00 2001 From: Dedunu Dhananjaya Date: Mon, 23 Apr 2018 09:34:57 +0000 Subject: [PATCH 22/70] YARN-7668. Remove unused variables from ContainerLocalizer This closes #364 Signed-off-by: Akira Ajisaka --- .../containermanager/localizer/ContainerLocalizer.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java index 639a69d318..6a384aeff4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java @@ -93,9 +93,7 @@ public class ContainerLocalizer { public static final String FILECACHE = "filecache"; public static final String APPCACHE = "appcache"; public static final String USERCACHE = "usercache"; - public static final String OUTPUTDIR = "output"; public static final String TOKEN_FILE_NAME_FMT = "%s.tokens"; - public static final String WORKDIR = "work"; private static final String APPCACHE_CTXT_FMT = "%s.app.cache.dirs"; private static final String USERCACHE_CTXT_FMT = "%s.user.cache.dirs"; private static final FsPermission FILECACHE_PERMS = From 2d87592fc6a56bfe77dd3c11953caea2b701c846 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Tue, 19 Jun 2018 13:38:13 -0400 Subject: [PATCH 23/70] HADOOP-15527. Improve delay check for stopping processes. Fixed script location. Contributed by Vinod Kumar Vavilapalli --- .../hadoop-common}/src/test/scripts/process_with_sigterm_trap.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {hadoop-common => hadoop-common-project/hadoop-common}/src/test/scripts/process_with_sigterm_trap.sh (100%) diff --git a/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh b/hadoop-common-project/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh similarity index 100% rename from hadoop-common/src/test/scripts/process_with_sigterm_trap.sh rename to hadoop-common-project/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh From 4939ffedb151ce1550fcdd7ac04c79d8d0195891 Mon Sep 17 00:00:00 2001 From: Haibo Chen Date: Wed, 20 Jun 2018 10:35:52 -0700 Subject: [PATCH 24/70] YARN-8437. Build oom-listener fails on older versions. (Miklos Szegedi via Haibo Chen) --- .../src/CMakeLists.txt | 4 +-- .../test/oom_listener_test_main.cc | 33 ++++++++++--------- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt index a614f80482..300bb65c32 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt @@ -188,5 +188,5 @@ add_executable(test-oom-listener main/native/oom-listener/impl/oom_listener.h main/native/oom-listener/test/oom_listener_test_main.cc ) -target_link_libraries(test-oom-listener gtest) -output_directory(test-oom-listener test) \ No newline at end of file +target_link_libraries(test-oom-listener gtest rt) +output_directory(test-oom-listener test) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc index 9627632a96..421c21e067 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc @@ -20,6 +20,7 @@ extern "C" { #include "oom_listener.h" +#include } #include @@ -49,10 +50,10 @@ int main(int argc, char **argv) { class OOMListenerTest : public ::testing::Test { private: - char cgroup[PATH_MAX] = {}; - const char* cgroup_root = nullptr; + char cgroup[PATH_MAX]; + const char* cgroup_root; public: - OOMListenerTest() = default; + OOMListenerTest() : cgroup_root(NULL) {} virtual ~OOMListenerTest() = default; virtual const char* GetCGroup() { return cgroup; } @@ -99,7 +100,7 @@ public: if (cgroup[0] != '\0') { rmdir(cgroup); } - if (cgroup_root != nullptr && + if (cgroup_root != NULL && cgroup_root != cgroup_candidates[0]) { rmdir(cgroup_root); } @@ -184,7 +185,7 @@ TEST_F(OOMListenerTest, test_oom) { std::cout << "Consuming too much memory" << std::endl; for (;;) { auto buffer = (char *) malloc(bufferSize); - if (buffer != nullptr) { + if (buffer != NULL) { for (int i = 0; i < bufferSize; ++i) { buffer[i] = (char) std::rand(); } @@ -213,15 +214,15 @@ TEST_F(OOMListenerTest, test_oom) { if (listener == 0) { // child listener forwarding cgroup events _oom_listener_descriptors descriptors = { - .command = "test", - .event_fd = mock_oom_event_as_user, - .event_control_fd = -1, - .oom_control_fd = -1, - .event_control_path = {0}, - .oom_control_path = {0}, - .oom_command = {0}, - .oom_command_len = 0, - .watch_timeout = 100 + "test", + mock_oom_event_as_user, + -1, + -1, + {0}, + {0}, + {0}, + 0, + 100 }; int ret = oom_listener(&descriptors, GetCGroup(), test_pipe[1]); cleanup(&descriptors); @@ -256,7 +257,7 @@ TEST_F(OOMListenerTest, test_oom) { __pid_t exited0 = wait(mem_hog_status); ASSERT_EQ(mem_hog_pid, exited0) << "Wrong process exited"; - ASSERT_EQ(nullptr, mem_hog_status) + ASSERT_EQ(NULL, mem_hog_status) << "Test process killed with invalid status"; if (mock_oom_event_as_user != -1) { @@ -275,7 +276,7 @@ TEST_F(OOMListenerTest, test_oom) { __pid_t exited1 = wait(oom_listener_status); ASSERT_EQ(listener, exited1) << "Wrong process exited"; - ASSERT_EQ(nullptr, oom_listener_status) + ASSERT_EQ(NULL, oom_listener_status) << "Listener process exited with invalid status"; } } From 9a9e969570f23b627f9571819f388916d8fd7ec9 Mon Sep 17 00:00:00 2001 From: Miklos Szegedi Date: Wed, 20 Jun 2018 10:29:12 -0700 Subject: [PATCH 25/70] YARN-8391. Investigate AllocationFileLoaderService.reloadListener locking issue. Contributed by Szilard Nemeth. --- .../scheduler/fair/AllocationFileLoaderService.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index 56cc8873f6..3300948ce7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -114,7 +114,9 @@ public void serviceInit(Configuration conf) throws Exception { reloadThread = new Thread(() -> { while (running) { try { - reloadListener.onCheck(); + synchronized (this) { + reloadListener.onCheck(); + } long time = clock.getTime(); long lastModified = fs.getFileStatus(allocFile).getModificationTime(); From 55432b09810b59ee361d0d4a8958efabb49fab3c Mon Sep 17 00:00:00 2001 From: Miklos Szegedi Date: Wed, 20 Jun 2018 11:36:12 -0700 Subject: [PATCH 26/70] YARN-8440. Typo in YarnConfiguration javadoc: "Miniumum request grant-able..". Contributed by Szilard Nemeth. --- .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 5292a25053..5842d64357 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -240,7 +240,7 @@ private static void addDeprecatedKeys() { public static final String DEFAULT_RM_SCHEDULER_ADDRESS = "0.0.0.0:" + DEFAULT_RM_SCHEDULER_PORT; - /** Miniumum request grant-able by the RM scheduler. */ + /** Minimum request grant-able by the RM scheduler. */ public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_MB = YARN_PREFIX + "scheduler.minimum-allocation-mb"; public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB = 1024; From bbbc7cc426f71ad0fe4174efcd25e5ac3f62b501 Mon Sep 17 00:00:00 2001 From: Miklos Szegedi Date: Wed, 20 Jun 2018 11:40:56 -0700 Subject: [PATCH 27/70] YARN-7449. Split up class TestYarnClient to TestYarnClient and TestYarnClientImpl. Contributed by Szilard Nemeth. --- .../yarn/client/api/impl/TestYarnClient.java | 335 ++++-------------- .../client/api/impl/TestYarnClientImpl.java | 254 +++++++++++++ 2 files changed, 323 insertions(+), 266 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index 70ff47b746..17e43cacda 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -18,41 +18,9 @@ package org.apache.hadoop.yarn.client.api.impl; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.io.IOException; -import java.lang.Thread.State; -import java.nio.ByteBuffer; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.DataInputByteBuffer; -import org.apache.hadoop.io.DataOutputBuffer; -import org.apache.hadoop.io.Text; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; @@ -74,7 +42,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; -import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -92,7 +59,6 @@ import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.api.AHSClient; -import org.apache.hadoop.yarn.client.api.TimelineClient; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.client.api.YarnClientApplication; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -100,7 +66,6 @@ import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.ParameterizedSchedulerTestBase; @@ -115,8 +80,28 @@ import org.mockito.ArgumentCaptor; import org.slf4j.event.Level; +import java.io.IOException; +import java.lang.Thread.State; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + /** - * This class is to test class {@link YarnClient) and {@link YarnClientImpl}. + * This class is to test class {@link YarnClient). */ public class TestYarnClient extends ParameterizedSchedulerTestBase { @@ -146,17 +131,6 @@ public void testClientStop() { rm.stop(); } - @Test - public void testStartWithTimelineV15() throws Exception { - Configuration conf = getConf(); - conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); - conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5f); - YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient(); - client.init(conf); - client.start(); - client.stop(); - } - @Test public void testStartTimelineClientWithErrors() throws Exception { @@ -413,7 +387,7 @@ public void testApplicationType() throws Exception { RMApp app = rm.submitApp(2000); RMApp app1 = rm.submitApp(200, "name", "user", - new HashMap(), false, "default", -1, + new HashMap<>(), false, "default", -1, null, "MAPREDUCE"); Assert.assertEquals("YARN", app.getApplicationType()); Assert.assertEquals("MAPREDUCE", app1.getApplicationType()); @@ -427,7 +401,7 @@ public void testApplicationTypeLimit() throws Exception { rm.start(); RMApp app1 = rm.submitApp(200, "name", "user", - new HashMap(), false, "default", -1, + new HashMap<>(), false, "default", -1, null, "MAPREDUCE-LENGTH-IS-20"); Assert.assertEquals("MAPREDUCE-LENGTH-IS-", app1.getApplicationType()); rm.stop(); @@ -444,7 +418,7 @@ public void testGetApplications() throws YarnException, IOException { List reports = client.getApplications(); Assert.assertEquals(reports, expectedReports); - Set appTypes = new HashSet(); + Set appTypes = new HashSet<>(); appTypes.add("YARN"); appTypes.add("NON-YARN"); @@ -601,7 +575,7 @@ public void testGetLabelsToNodes() throws YarnException, IOException { Assert.assertEquals(labelsToNodes.size(), 3); // Get labels to nodes for selected labels - Set setLabels = new HashSet(Arrays.asList("x", "z")); + Set setLabels = new HashSet<>(Arrays.asList("x", "z")); expectedLabelsToNodes = ((MockYarnClient)client).getLabelsToNodesMap(setLabels); labelsToNodes = client.getLabelsToNodes(setLabels); @@ -633,12 +607,12 @@ private static class MockYarnClient extends YarnClientImpl { private ApplicationReport mockReport; private List reports; - private HashMap> attempts = - new HashMap>(); - private HashMap> containers = - new HashMap>(); + private HashMap> attempts = + new HashMap<>(); + private HashMap> containers = + new HashMap<>(); private HashMap> containersFromAHS = - new HashMap>(); + new HashMap<>(); GetApplicationsResponse mockAppResponse = mock(GetApplicationsResponse.class); @@ -739,9 +713,9 @@ private List createAppReports() { "user", "queue", "appname", "host", 124, null, YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); - List applicationReports = new ArrayList(); + List applicationReports = new ArrayList<>(); applicationReports.add(newApplicationReport); - List appAttempts = new ArrayList(); + List appAttempts = new ArrayList<>(); ApplicationAttemptReport attempt = ApplicationAttemptReport.newInstance( ApplicationAttemptId.newInstance(applicationId, 1), "host", @@ -767,7 +741,7 @@ private List createAppReports() { appAttempts.add(attempt1); attempts.put(applicationId, appAttempts); - List containerReports = new ArrayList(); + List containerReports = new ArrayList<>(); ContainerReport container = ContainerReport.newInstance( ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, @@ -785,7 +759,7 @@ private List createAppReports() { //add containers to be sent from AHS List containerReportsForAHS = - new ArrayList(); + new ArrayList<>(); container = ContainerReport.newInstance( ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null, @@ -843,7 +817,7 @@ private List getApplicationReports( List applicationReports, Set applicationTypes, EnumSet applicationStates) { - List appReports = new ArrayList(); + List appReports = new ArrayList<>(); for (ApplicationReport appReport : applicationReports) { if (applicationTypes != null && !applicationTypes.isEmpty()) { if (!applicationTypes.contains(appReport.getApplicationType())) { @@ -878,9 +852,9 @@ public Map> getLabelsToNodes(Set labels) } public Map> getLabelsToNodesMap() { - Map> map = new HashMap>(); + Map> map = new HashMap<>(); Set setNodeIds = - new HashSet(Arrays.asList( + new HashSet<>(Arrays.asList( NodeId.newInstance("host1", 0), NodeId.newInstance("host2", 0))); map.put("x", setNodeIds); map.put("y", setNodeIds); @@ -889,8 +863,8 @@ public Map> getLabelsToNodesMap() { } public Map> getLabelsToNodesMap(Set labels) { - Map> map = new HashMap>(); - Set setNodeIds = new HashSet(Arrays.asList( + Map> map = new HashMap<>(); + Set setNodeIds = new HashSet<>(Arrays.asList( NodeId.newInstance("host1", 0), NodeId.newInstance("host2", 0))); for (String label : labels) { map.put(label, setNodeIds); @@ -907,8 +881,8 @@ public Map> getNodeToLabels() throws YarnException, } public Map> getNodeToLabelsMap() { - Map> map = new HashMap>(); - Set setNodeLabels = new HashSet(Arrays.asList("x", "y")); + Map> map = new HashMap<>(); + Set setNodeLabels = new HashSet<>(Arrays.asList("x", "y")); map.put(NodeId.newInstance("host", 0), setNodeLabels); return map; } @@ -985,7 +959,7 @@ public List getContainersReport( private ContainerReport getContainer( ContainerId containerId, HashMap> containersToAppAttemptMapping) - throws YarnException, IOException { + throws YarnException { List containersForAppAttempt = containersToAppAttemptMapping.get(containerId .getApplicationAttemptId()); @@ -1119,174 +1093,6 @@ private void waitTillAccepted(YarnClient rmClient, ApplicationId appId, Assert.assertEquals(unmanagedApplication, report.isUnmanagedApp()); } - @Test - public void testAsyncAPIPollTimeout() { - testAsyncAPIPollTimeoutHelper(null, false); - testAsyncAPIPollTimeoutHelper(0L, true); - testAsyncAPIPollTimeoutHelper(1L, true); - } - - private void testAsyncAPIPollTimeoutHelper(Long valueForTimeout, - boolean expectedTimeoutEnforcement) { - YarnClientImpl client = new YarnClientImpl(); - try { - Configuration conf = getConf(); - if (valueForTimeout != null) { - conf.setLong( - YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS, - valueForTimeout); - } - - client.init(conf); - - Assert.assertEquals( - expectedTimeoutEnforcement, client.enforceAsyncAPITimeout()); - } finally { - IOUtils.closeQuietly(client); - } - } - - @Test - public void testBestEffortTimelineDelegationToken() - throws Exception { - Configuration conf = getConf(); - conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); - SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); - - YarnClientImpl client = spy(new YarnClientImpl() { - - @Override - TimelineClient createTimelineClient() throws IOException, YarnException { - timelineClient = mock(TimelineClient.class); - when(timelineClient.getDelegationToken(any(String.class))) - .thenThrow(new RuntimeException("Best effort test exception")); - return timelineClient; - } - }); - - client.init(conf); - conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, - true); - client.serviceInit(conf); - client.getTimelineDelegationToken(); - - try { - conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, false); - client.serviceInit(conf); - client.getTimelineDelegationToken(); - Assert.fail("Get delegation token should have thrown an exception"); - } catch (IOException e) { - // Success - } - } - - @Test - public void testAutomaticTimelineDelegationTokenLoading() - throws Exception { - Configuration conf = getConf(); - conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); - SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); - TimelineDelegationTokenIdentifier timelineDT = - new TimelineDelegationTokenIdentifier(); - final Token dToken = - new Token( - timelineDT.getBytes(), new byte[0], timelineDT.getKind(), new Text()); - // create a mock client - YarnClientImpl client = spy(new YarnClientImpl() { - - @Override - TimelineClient createTimelineClient() throws IOException, YarnException { - timelineClient = mock(TimelineClient.class); - when(timelineClient.getDelegationToken(any(String.class))) - .thenReturn(dToken); - return timelineClient; - } - - - @Override - protected void serviceStart() throws Exception { - rmClient = mock(ApplicationClientProtocol.class); - } - - @Override - protected void serviceStop() throws Exception { - } - - @Override - public ApplicationReport getApplicationReport(ApplicationId appId) { - ApplicationReport report = mock(ApplicationReport.class); - when(report.getYarnApplicationState()) - .thenReturn(YarnApplicationState.RUNNING); - return report; - } - - @Override - public boolean isSecurityEnabled() { - return true; - } - }); - client.init(conf); - client.start(); - try { - // when i == 0, timeline DT already exists, no need to get one more - // when i == 1, timeline DT doesn't exist, need to get one more - for (int i = 0; i < 2; ++i) { - ApplicationSubmissionContext context = - mock(ApplicationSubmissionContext.class); - ApplicationId applicationId = ApplicationId.newInstance(0, i + 1); - when(context.getApplicationId()).thenReturn(applicationId); - DataOutputBuffer dob = new DataOutputBuffer(); - Credentials credentials = new Credentials(); - if (i == 0) { - credentials.addToken(client.timelineService, dToken); - } - credentials.writeTokenStorageToStream(dob); - ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); - ContainerLaunchContext clc = ContainerLaunchContext.newInstance( - null, null, null, null, tokens, null); - when(context.getAMContainerSpec()).thenReturn(clc); - client.submitApplication(context); - if (i == 0) { - // GetTimelineDelegationToken shouldn't be called - verify(client, never()).getTimelineDelegationToken(); - } - // In either way, token should be there - credentials = new Credentials(); - DataInputByteBuffer dibb = new DataInputByteBuffer(); - tokens = clc.getTokens(); - if (tokens != null) { - dibb.reset(tokens); - credentials.readTokenStorageStream(dibb); - tokens.rewind(); - } - Collection> dTokens = - credentials.getAllTokens(); - Assert.assertEquals(1, dTokens.size()); - Assert.assertEquals(dToken, dTokens.iterator().next()); - } - } finally { - client.stop(); - } - } - - @Test - public void testParseTimelineDelegationTokenRenewer() throws Exception { - // Client side - YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient(); - Configuration conf = getConf(); - conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); - conf.set(YarnConfiguration.RM_PRINCIPAL, "rm/_HOST@EXAMPLE.COM"); - conf.set( - YarnConfiguration.RM_ADDRESS, "localhost:8188"); - try { - client.init(conf); - client.start(); - Assert.assertEquals("rm/localhost@EXAMPLE.COM", client.timelineDTRenewer); - } finally { - client.stop(); - } - } - @Test(timeout = 30000, expected = ApplicationNotFoundException.class) public void testShouldNotRetryForeverForNonNetworkExceptions() throws Exception { YarnConfiguration conf = getConf(); @@ -1353,38 +1159,35 @@ private void testCreateTimelineClientWithError( timelineClientBestEffort); conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, timelineVersion); - YarnClient client = new MockYarnClient(); - if (client instanceof YarnClientImpl) { - YarnClientImpl impl = (YarnClientImpl) client; - YarnClientImpl spyClient = spy(impl); - when(spyClient.createTimelineClient()).thenThrow(mockErr); - CreateTimelineClientErrorVerifier verifier = spy(errVerifier); - spyClient.init(conf); - spyClient.start(); + MockYarnClient client = new MockYarnClient(); + MockYarnClient spyClient = spy(client); + when(spyClient.createTimelineClient()).thenThrow(mockErr); + CreateTimelineClientErrorVerifier verifier = spy(errVerifier); + spyClient.init(conf); + spyClient.start(); - ApplicationSubmissionContext context = - mock(ApplicationSubmissionContext.class); - ContainerLaunchContext containerContext = - mock(ContainerLaunchContext.class); - ApplicationId applicationId = - ApplicationId.newInstance(System.currentTimeMillis(), 1); - when(containerContext.getTokens()).thenReturn(null); - when(context.getApplicationId()).thenReturn(applicationId); - when(spyClient.isSecurityEnabled()).thenReturn(true); - when(context.getAMContainerSpec()).thenReturn(containerContext); + ApplicationSubmissionContext context = + mock(ApplicationSubmissionContext.class); + ContainerLaunchContext containerContext = + mock(ContainerLaunchContext.class); + ApplicationId applicationId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + when(containerContext.getTokens()).thenReturn(null); + when(context.getApplicationId()).thenReturn(applicationId); + when(spyClient.isSecurityEnabled()).thenReturn(true); + when(context.getAMContainerSpec()).thenReturn(containerContext); - try { - spyClient.submitApplication(context); - } catch (Throwable e) { - verifier.verifyError(e); - } finally { - // Make sure the verifier runs with expected times - // This is required because in case throwable is swallowed - // and verifyError never gets the chance to run - verify(verifier, times(verifier.getExpectedTimes())) - .verifyError(any(Throwable.class)); - spyClient.stop(); - } + try { + spyClient.submitApplication(context); + } catch (Throwable e) { + verifier.verifyError(e); + } finally { + // Make sure the verifier runs with expected times + // This is required because in case throwable is swallowed + // and verifyError never gets the chance to run + verify(verifier, times(verifier.getExpectedTimes())) + .verifyError(any(Throwable.class)); + spyClient.stop(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java new file mode 100644 index 0000000000..dd0aa5c1a0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java @@ -0,0 +1,254 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.client.api.impl; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.DataInputByteBuffer; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.client.api.TimelineClient; +import org.apache.hadoop.yarn.client.api.YarnClient; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager + .ParameterizedSchedulerTestBase; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collection; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * This class is to test class {@link YarnClientImpl ). + */ +public class TestYarnClientImpl extends ParameterizedSchedulerTestBase { + + public TestYarnClientImpl(SchedulerType type) throws IOException { + super(type); + } + + @Before + public void setup() { + QueueMetrics.clearQueueMetrics(); + DefaultMetricsSystem.setMiniClusterMode(true); + } + + @Test + public void testStartWithTimelineV15() { + Configuration conf = getConf(); + conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); + conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5f); + YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient(); + client.init(conf); + client.start(); + client.stop(); + } + + @Test + public void testAsyncAPIPollTimeout() { + testAsyncAPIPollTimeoutHelper(null, false); + testAsyncAPIPollTimeoutHelper(0L, true); + testAsyncAPIPollTimeoutHelper(1L, true); + } + + private void testAsyncAPIPollTimeoutHelper(Long valueForTimeout, + boolean expectedTimeoutEnforcement) { + YarnClientImpl client = new YarnClientImpl(); + try { + Configuration conf = getConf(); + if (valueForTimeout != null) { + conf.setLong( + YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS, + valueForTimeout); + } + + client.init(conf); + + Assert.assertEquals( + expectedTimeoutEnforcement, client.enforceAsyncAPITimeout()); + } finally { + IOUtils.closeQuietly(client); + } + } + + @Test + public void testBestEffortTimelineDelegationToken() + throws Exception { + Configuration conf = getConf(); + conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); + SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf); + + YarnClientImpl client = spy(new YarnClientImpl() { + + @Override + TimelineClient createTimelineClient() throws IOException, YarnException { + timelineClient = mock(TimelineClient.class); + when(timelineClient.getDelegationToken(any(String.class))) + .thenThrow(new RuntimeException("Best effort test exception")); + return timelineClient; + } + }); + + client.init(conf); + conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, + true); + client.serviceInit(conf); + client.getTimelineDelegationToken(); + + try { + conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, false); + client.serviceInit(conf); + client.getTimelineDelegationToken(); + Assert.fail("Get delegation token should have thrown an exception"); + } catch (IOException e) { + // Success + } + } + + @Test + public void testAutomaticTimelineDelegationTokenLoading() + throws Exception { + Configuration conf = getConf(); + conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); + SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf); + TimelineDelegationTokenIdentifier timelineDT = + new TimelineDelegationTokenIdentifier(); + final Token dToken = + new Token<>( + timelineDT.getBytes(), new byte[0], timelineDT.getKind(), new Text()); + // create a mock client + YarnClientImpl client = spy(new YarnClientImpl() { + + @Override + TimelineClient createTimelineClient() throws IOException, YarnException { + timelineClient = mock(TimelineClient.class); + when(timelineClient.getDelegationToken(any(String.class))) + .thenReturn(dToken); + return timelineClient; + } + + + @Override + protected void serviceStart() { + rmClient = mock(ApplicationClientProtocol.class); + } + + @Override + protected void serviceStop() { + } + + @Override + public ApplicationReport getApplicationReport(ApplicationId appId) { + ApplicationReport report = mock(ApplicationReport.class); + when(report.getYarnApplicationState()) + .thenReturn(YarnApplicationState.RUNNING); + return report; + } + + @Override + public boolean isSecurityEnabled() { + return true; + } + }); + client.init(conf); + client.start(); + try { + // when i == 0, timeline DT already exists, no need to get one more + // when i == 1, timeline DT doesn't exist, need to get one more + for (int i = 0; i < 2; ++i) { + ApplicationSubmissionContext context = + mock(ApplicationSubmissionContext.class); + ApplicationId applicationId = ApplicationId.newInstance(0, i + 1); + when(context.getApplicationId()).thenReturn(applicationId); + DataOutputBuffer dob = new DataOutputBuffer(); + Credentials credentials = new Credentials(); + if (i == 0) { + credentials.addToken(client.timelineService, dToken); + } + credentials.writeTokenStorageToStream(dob); + ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); + ContainerLaunchContext clc = ContainerLaunchContext.newInstance( + null, null, null, null, tokens, null); + when(context.getAMContainerSpec()).thenReturn(clc); + client.submitApplication(context); + if (i == 0) { + // GetTimelineDelegationToken shouldn't be called + verify(client, never()).getTimelineDelegationToken(); + } + // In either way, token should be there + credentials = new Credentials(); + DataInputByteBuffer dibb = new DataInputByteBuffer(); + tokens = clc.getTokens(); + if (tokens != null) { + dibb.reset(tokens); + credentials.readTokenStorageStream(dibb); + tokens.rewind(); + } + Collection> dTokens = + credentials.getAllTokens(); + Assert.assertEquals(1, dTokens.size()); + Assert.assertEquals(dToken, dTokens.iterator().next()); + } + } finally { + client.stop(); + } + } + + @Test + public void testParseTimelineDelegationTokenRenewer() { + // Client side + YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient(); + Configuration conf = getConf(); + conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); + conf.set(YarnConfiguration.RM_PRINCIPAL, "rm/_HOST@EXAMPLE.COM"); + conf.set( + YarnConfiguration.RM_ADDRESS, "localhost:8188"); + try { + client.init(conf); + client.start(); + Assert.assertEquals("rm/localhost@EXAMPLE.COM", client.timelineDTRenewer); + } finally { + client.stop(); + } + } +} From 388fafa004dc405a4e10f4487cff7c5a714af32f Mon Sep 17 00:00:00 2001 From: Miklos Szegedi Date: Wed, 20 Jun 2018 11:55:43 -0700 Subject: [PATCH 28/70] YARN-8442. Strange characters and missing spaces in FairScheduler documentation. Contributed by Szilard Nemeth. --- .../hadoop-yarn-site/src/site/markdown/FairScheduler.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md index e253d0db06..269f5b40f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md @@ -148,7 +148,7 @@ The allocation file must be in XML format. The format contains five types of ele * **secondaryGroupExistingQueue**: the app is placed into a queue with a name that matches a secondary group of the user who submitted it. The first secondary group that matches a configured queue will be selected. Periods in group names will be replaced with "\_dot\_", i.e. a user with "one.two" as one of their secondary groups would be placed into the "one\_dot\_two" queue, if such a queue exists. - * **nestedUserQueue**: the app is placed into a queue with the name of the user under the queue suggested by the nested rule. This is similar to ‘user’ rule,the difference being in 'nestedUserQueue' rule,user queues can be created under any parent queue, while 'user' rule creates user queues only under root queue. Note that nestedUserQueue rule would be applied only if the nested rule returns a parent queue.One can configure a parent queue either by setting 'type' attribute of queue to 'parent' or by configuring at least one leaf under that queue which makes it a parent. See example allocation for a sample use case. + * **nestedUserQueue**: the app is placed into a queue with the name of the user under the queue suggested by the nested rule. This is similar to the 'user' rule, the difference being in 'nestedUserQueue' rule, user queues can be created under any parent queue, while 'user' rule creates user queues only under root queue. Note that nestedUserQueue rule would be applied only if the nested rule returns a parent queue. One can configure a parent queue either by setting 'type' attribute of queue to 'parent' or by configuring at least one leaf under that queue which makes it a parent. See example allocation for a sample use case. * **default**: the app is placed into the queue specified in the 'queue' attribute of the default rule. If 'queue' attribute is not specified, the app is placed into 'root.default' queue. From 46f90581641feec37e285964df983d221bee5e1d Mon Sep 17 00:00:00 2001 From: Miklos Szegedi Date: Wed, 20 Jun 2018 11:58:18 -0700 Subject: [PATCH 29/70] YARN-8441. Typo in CSQueueUtils local variable names: queueGuranteedResource. Contributed by Szilard Nemeth. --- .../scheduler/capacity/CSQueueUtils.java | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java index 0dfce83413..b5edbf7c61 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java @@ -184,7 +184,7 @@ public static void updateUsedCapacity(final ResourceCalculator rc, if (Resources.greaterThan(rc, totalPartitionResource, totalPartitionResource, Resources.none())) { - Resource queueGuranteedResource = childQueue + Resource queueGuaranteedResource = childQueue .getEffectiveCapacity(nodePartition); //TODO : Modify below code to support Absolute Resource configurations @@ -204,14 +204,14 @@ public static void updateUsedCapacity(final ResourceCalculator rc, QueueCapacities leafQueueTemplateCapacities = parentQueue .getLeafQueueTemplate() .getQueueCapacities(); - queueGuranteedResource = Resources.multiply(totalPartitionResource, + queueGuaranteedResource = Resources.multiply(totalPartitionResource, leafQueueTemplateCapacities.getAbsoluteCapacity (nodePartition)); } // make queueGuranteed >= minimum_allocation to avoid divided by 0. - queueGuranteedResource = - Resources.max(rc, totalPartitionResource, queueGuranteedResource, + queueGuaranteedResource = + Resources.max(rc, totalPartitionResource, queueGuaranteedResource, minimumAllocation); Resource usedResource = queueResourceUsage.getUsed(nodePartition); @@ -220,12 +220,12 @@ public static void updateUsedCapacity(final ResourceCalculator rc, totalPartitionResource); usedCapacity = Resources.divide(rc, totalPartitionResource, usedResource, - queueGuranteedResource); + queueGuaranteedResource); Resource resResource = queueResourceUsage.getReserved(nodePartition); reservedCapacity = Resources.divide(rc, totalPartitionResource, resResource, - queueGuranteedResource); + queueGuaranteedResource); absoluteReservedCapacity = Resources.divide(rc, totalPartitionResource, resResource, totalPartitionResource); @@ -258,16 +258,16 @@ private static Resource getMaxAvailableResourceToQueue( for (String partition : nodeLabels) { // Calculate guaranteed resource for a label in a queue by below logic. // (total label resource) * (absolute capacity of label in that queue) - Resource queueGuranteedResource = queue.getEffectiveCapacity(partition); + Resource queueGuaranteedResource = queue.getEffectiveCapacity(partition); // Available resource in queue for a specific label will be calculated as // {(guaranteed resource for a label in a queue) - // (resource usage of that label in the queue)} // Finally accumulate this available resource to get total. Resource available = (Resources.greaterThan(rc, cluster, - queueGuranteedResource, + queueGuaranteedResource, queue.getQueueResourceUsage().getUsed(partition))) ? Resources - .componentwiseMax(Resources.subtractFrom(queueGuranteedResource, + .componentwiseMax(Resources.subtractFrom(queueGuaranteedResource, queue.getQueueResourceUsage().getUsed(partition)), Resources .none()) : Resources.none(); Resources.addTo(totalAvailableResource, available); From d6ee4290df10ad3e0b087c21accd602508e0a197 Mon Sep 17 00:00:00 2001 From: Miklos Szegedi Date: Wed, 20 Jun 2018 12:04:44 -0700 Subject: [PATCH 30/70] MAPREDUCE-7113. Typos in test names in TestTaskAttempt: "testAppDiognostic". Contributed by Szilard Nemeth. --- .../hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java index 609923f6a1..b1b7b8f72d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java @@ -853,7 +853,7 @@ public void testDoubleTooManyFetchFailure() throws Exception { @Test - public void testAppDiognosticEventOnUnassignedTask() throws Exception { + public void testAppDiagnosticEventOnUnassignedTask() { ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( appId, 0); @@ -978,7 +978,7 @@ public void testTooManyFetchFailureAfterKill() throws Exception { } @Test - public void testAppDiognosticEventOnNewTask() throws Exception { + public void testAppDiagnosticEventOnNewTask() { ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( appId, 0); From b089a06793d94d42b7da1b7566e366ceb748e081 Mon Sep 17 00:00:00 2001 From: Sean Mackrory Date: Wed, 20 Jun 2018 16:10:36 -0600 Subject: [PATCH 31/70] HADOOP-14918. Remove the Local Dynamo DB test option. Contributed by Gabor Bota. --- hadoop-project/pom.xml | 5 - hadoop-tools/hadoop-aws/pom.xml | 42 +- .../org/apache/hadoop/fs/s3a/Constants.java | 11 + .../fs/s3a/s3guard/DynamoDBMetadataStore.java | 1 + .../hadoop/fs/s3a/AbstractS3ATestBase.java | 21 +- .../hadoop/fs/s3a/S3ATestConstants.java | 1 - .../apache/hadoop/fs/s3a/S3ATestUtils.java | 34 +- .../s3a/commit/staging/StagingTestBase.java | 30 +- .../s3guard/DynamoDBLocalClientFactory.java | 160 ----- .../fs/s3a/s3guard/MetadataStoreTestBase.java | 4 +- .../s3guard/TestDynamoDBMetadataStore.java | 589 ------------------ 11 files changed, 73 insertions(+), 825 deletions(-) delete mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBLocalClientFactory.java delete mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMetadataStore.java diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 8cb5bfc48b..ed0187b533 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -2060,10 +2060,5 @@ - - dynamodb-local-oregon - DynamoDB Local Release Repository - https://s3-us-west-2.amazonaws.com/dynamodb-local/release - diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index 24ed11dee4..c6dddb0223 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -36,7 +36,6 @@ true ${project.build.directory}/test - 1.11.86 unset @@ -49,6 +48,8 @@ false false local + + 200000 @@ -162,6 +163,7 @@ ${fs.s3a.s3guard.test.authoritative} ${fs.s3a.s3guard.test.implementation} + ${test.integration.timeout} @@ -299,23 +301,10 @@ - - - dynamodblocal - - - dynamodblocal - - - - dynamodblocal - - - - non-auth + auth auth @@ -346,6 +335,9 @@ maven-surefire-plugin 3600 + + ${test.integration.timeout} + @@ -417,26 +409,6 @@ aws-java-sdk-bundle compile - - com.amazonaws - DynamoDBLocal - ${dynamodb.local.version} - test - - - org.hamcrest - hamcrest-core - - - org.eclipse.jetty - jetty-http - - - org.apache.commons - commons-lang3 - - - junit junit diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index 4c958439b0..c52193698f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -401,6 +401,17 @@ private Constants() { public static final String S3GUARD_DDB_TABLE_NAME_KEY = "fs.s3a.s3guard.ddb.table"; + /** + * Test table name to use during DynamoDB integration test. + * + * The table will be modified, and deleted in the end of the tests. + * If this value is not set, the integration tests that would be destructive + * won't run. + */ + @InterfaceStability.Unstable + public static final String S3GUARD_DDB_TEST_TABLE_NAME_KEY = + "fs.s3a.s3guard.ddb.test.table"; + /** * Whether to create the DynamoDB table if the table does not exist. */ diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java index b942ed7349..0ab86962a9 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java @@ -261,6 +261,7 @@ private static DynamoDB createDynamoDB(Configuration conf, String s3Region) @Override @Retries.OnceRaw public void initialize(FileSystem fs) throws IOException { + Preconditions.checkNotNull(fs, "Null filesystem"); Preconditions.checkArgument(fs instanceof S3AFileSystem, "DynamoDBMetadataStore only supports S3A filesystem."); owner = (S3AFileSystem) fs; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java index 73e71f41fd..f22af49635 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java @@ -29,13 +29,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; import java.io.IOException; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset; -import static org.apache.hadoop.fs.s3a.S3ATestUtils.maybeEnableS3Guard; -import static org.apache.hadoop.fs.s3a.commit.CommitConstants.MAGIC_COMMITTER_ENABLED; /** * An extension of the contract test base set up for S3A tests. @@ -78,23 +75,7 @@ protected int getTestTimeoutMillis() { */ @Override protected Configuration createConfiguration() { - Configuration conf = super.createConfiguration(); - // patch in S3Guard options - maybeEnableS3Guard(conf); - // set hadoop temp dir to a default value - String testUniqueForkId = - System.getProperty(TEST_UNIQUE_FORK_ID); - String tmpDir = conf.get(Constants.HADOOP_TMP_DIR, "target/build/test"); - if (testUniqueForkId != null) { - // patch temp dir for the specific branch - tmpDir = tmpDir + File.pathSeparatorChar + testUniqueForkId; - conf.set(Constants.HADOOP_TMP_DIR, tmpDir); - } - conf.set(Constants.BUFFER_DIR, tmpDir); - // add this so that even on tests where the FS is shared, - // the FS is always "magic" - conf.setBoolean(MAGIC_COMMITTER_ENABLED, true); - return conf; + return S3ATestUtils.prepareTestConfiguration(super.createConfiguration()); } protected Configuration getConfiguration() { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java index 7f7802d24d..0f7b418c1e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java @@ -143,7 +143,6 @@ public interface S3ATestConstants { String TEST_S3GUARD_IMPLEMENTATION = TEST_S3GUARD_PREFIX + ".implementation"; String TEST_S3GUARD_IMPLEMENTATION_LOCAL = "local"; String TEST_S3GUARD_IMPLEMENTATION_DYNAMO = "dynamo"; - String TEST_S3GUARD_IMPLEMENTATION_DYNAMODBLOCAL = "dynamodblocal"; String TEST_S3GUARD_IMPLEMENTATION_NONE = "none"; /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java index 4414746f96..d259bf1d1e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java @@ -30,9 +30,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.s3a.commit.CommitConstants; -import org.apache.hadoop.fs.s3a.s3guard.DynamoDBClientFactory; -import org.apache.hadoop.fs.s3a.s3guard.DynamoDBLocalClientFactory; -import org.apache.hadoop.fs.s3a.s3guard.S3Guard; import org.hamcrest.core.Is; import org.junit.Assert; @@ -42,6 +39,7 @@ import org.slf4j.LoggerFactory; import java.io.Closeable; +import java.io.File; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -56,6 +54,7 @@ import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3AUtils.propagateBucketOptions; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.apache.hadoop.fs.s3a.commit.CommitConstants.MAGIC_COMMITTER_ENABLED; import static org.junit.Assert.*; /** @@ -393,9 +392,6 @@ public static void maybeEnableS3Guard(Configuration conf) { case TEST_S3GUARD_IMPLEMENTATION_LOCAL: implClass = S3GUARD_METASTORE_LOCAL; break; - case TEST_S3GUARD_IMPLEMENTATION_DYNAMODBLOCAL: - conf.setClass(S3Guard.S3GUARD_DDB_CLIENT_FACTORY_IMPL, - DynamoDBLocalClientFactory.class, DynamoDBClientFactory.class); case TEST_S3GUARD_IMPLEMENTATION_DYNAMO: implClass = S3GUARD_METASTORE_DYNAMO; break; @@ -489,6 +485,32 @@ public static E interceptClosing( }); } + /** + * Patch a configuration for testing. + * This includes possibly enabling s3guard, setting up the local + * FS temp dir and anything else needed for test runs. + * @param conf configuration to patch + * @return the now-patched configuration + */ + public static Configuration prepareTestConfiguration(final Configuration conf) { + // patch in S3Guard options + maybeEnableS3Guard(conf); + // set hadoop temp dir to a default value + String testUniqueForkId = + System.getProperty(TEST_UNIQUE_FORK_ID); + String tmpDir = conf.get(HADOOP_TMP_DIR, "target/build/test"); + if (testUniqueForkId != null) { + // patch temp dir for the specific branch + tmpDir = tmpDir + File.pathSeparatorChar + testUniqueForkId; + conf.set(HADOOP_TMP_DIR, tmpDir); + } + conf.set(BUFFER_DIR, tmpDir); + // add this so that even on tests where the FS is shared, + // the FS is always "magic" + conf.setBoolean(MAGIC_COMMITTER_ENABLED, true); + return conf; + } + /** * Helper class to do diffs of metrics. */ diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java index 38d5156ea0..d81c747fce 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java @@ -49,6 +49,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; +import org.mockito.invocation.InvocationOnMock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -514,6 +515,21 @@ public boolean isRecover() { } } + /** + * InvocationOnMock.getArgumentAt comes and goes with Mockito versions; this + * helper method is designed to be resilient to change. + * @param invocation invocation to query + * @param index argument index + * @param clazz class of return type + * @param type of return + * @return the argument of the invocation, cast to the given type. + */ + @SuppressWarnings("unchecked") + private static T getArgumentAt(InvocationOnMock invocation, int index, + Class clazz) { + return (T)invocation.getArguments()[index]; + } + /** * Instantiate mock client with the results and errors requested. * @param results results to accrue @@ -539,7 +555,7 @@ public static AmazonS3 newMockS3Client(final ClientResults results, "Mock Fail on init " + results.requests.size()); } String uploadId = UUID.randomUUID().toString(); - InitiateMultipartUploadRequest req = invocation.getArgumentAt( + InitiateMultipartUploadRequest req = getArgumentAt(invocation, 0, InitiateMultipartUploadRequest.class); results.requests.put(uploadId, req); results.activeUploads.put(uploadId, req.getKey()); @@ -561,7 +577,7 @@ public static AmazonS3 newMockS3Client(final ClientResults results, throw new AmazonClientException( "Mock Fail on upload " + results.parts.size()); } - UploadPartRequest req = invocation.getArgumentAt( + UploadPartRequest req = getArgumentAt(invocation, 0, UploadPartRequest.class); results.parts.add(req); String etag = UUID.randomUUID().toString(); @@ -588,7 +604,7 @@ public static AmazonS3 newMockS3Client(final ClientResults results, throw new AmazonClientException( "Mock Fail on commit " + results.commits.size()); } - CompleteMultipartUploadRequest req = invocation.getArgumentAt( + CompleteMultipartUploadRequest req = getArgumentAt(invocation, 0, CompleteMultipartUploadRequest.class); results.commits.add(req); results.activeUploads.remove(req.getUploadId()); @@ -608,7 +624,7 @@ public static AmazonS3 newMockS3Client(final ClientResults results, throw new AmazonClientException( "Mock Fail on abort " + results.aborts.size()); } - AbortMultipartUploadRequest req = invocation.getArgumentAt( + AbortMultipartUploadRequest req = getArgumentAt(invocation, 0, AbortMultipartUploadRequest.class); String id = req.getUploadId(); String p = results.activeUploads.remove(id); @@ -630,7 +646,7 @@ public static AmazonS3 newMockS3Client(final ClientResults results, doAnswer(invocation -> { LOG.debug("deleteObject for {}", mockClient); synchronized (lock) { - results.deletes.add(invocation.getArgumentAt( + results.deletes.add(getArgumentAt(invocation, 0, DeleteObjectRequest.class)); return null; } @@ -643,8 +659,8 @@ public static AmazonS3 newMockS3Client(final ClientResults results, LOG.debug("deleteObject for {}", mockClient); synchronized (lock) { results.deletes.add(new DeleteObjectRequest( - invocation.getArgumentAt(0, String.class), - invocation.getArgumentAt(1, String.class) + getArgumentAt(invocation, 0, String.class), + getArgumentAt(invocation, 1, String.class) )); return null; } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBLocalClientFactory.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBLocalClientFactory.java deleted file mode 100644 index 9894ac4347..0000000000 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBLocalClientFactory.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.s3a.s3guard; - -import java.io.File; -import java.io.IOException; - -import com.amazonaws.ClientConfiguration; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; -import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder; -import com.amazonaws.services.dynamodbv2.local.main.ServerRunner; -import com.amazonaws.services.dynamodbv2.local.server.DynamoDBProxyServer; -import org.apache.commons.lang3.StringUtils; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.fs.s3a.DefaultS3ClientFactory; -import org.apache.hadoop.net.ServerSocketUtil; - -import static org.apache.hadoop.fs.s3a.Constants.AWS_CREDENTIALS_PROVIDER; -import static org.apache.hadoop.fs.s3a.S3AUtils.createAWSCredentialProviderSet; -import static org.apache.hadoop.fs.s3a.s3guard.DynamoDBClientFactory.DefaultDynamoDBClientFactory.getRegion; - -/** - * A DynamoDBClientFactory implementation that creates AmazonDynamoDB clients - * against an in-memory DynamoDBLocal server instance. - * - * You won't be charged bills for issuing any DynamoDB requests. However, the - * DynamoDBLocal is considered a simulator of the DynamoDB web service, so it - * may be stale or different. For example, the throttling is not yet supported - * in DynamoDBLocal. This is for testing purpose only. - * - * To use this for creating DynamoDB client in tests: - *

      - *
    1. - * As all DynamoDBClientFactory implementations, this should be configured. - *
    2. - *
    3. - * The singleton DynamoDBLocal server instance is started automatically when - * creating the AmazonDynamoDB client for the first time. It still merits to - * launch the server before all the tests and fail fast if error happens. - *
    4. - *
    5. - * The server can be stopped explicitly, which is not actually needed in - * tests as JVM termination will do that. - *
    6. - *
    - * - * @see DefaultDynamoDBClientFactory - */ -public class DynamoDBLocalClientFactory extends Configured - implements DynamoDBClientFactory { - - /** The DynamoDBLocal dynamoDBLocalServer instance for testing. */ - private static DynamoDBProxyServer dynamoDBLocalServer; - private static String ddbEndpoint; - - private static final String SYSPROP_SQLITE_LIB = "sqlite4java.library.path"; - - @Override - public AmazonDynamoDB createDynamoDBClient(String defaultRegion) - throws IOException { - startSingletonServer(); - - final Configuration conf = getConf(); - // use the default credential provider chain - conf.unset(AWS_CREDENTIALS_PROVIDER); - final AWSCredentialsProvider credentials = - createAWSCredentialProviderSet(null, conf); - final ClientConfiguration awsConf = - DefaultS3ClientFactory.createAwsConf(conf); - // fail fast in case of service errors - awsConf.setMaxErrorRetry(3); - - final String region = getRegion(conf, defaultRegion); - LOG.info("Creating DynamoDBLocal client using endpoint {} in region {}", - ddbEndpoint, region); - - return AmazonDynamoDBClientBuilder.standard() - .withCredentials(credentials) - .withClientConfiguration(awsConf) - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration(ddbEndpoint, region)) - .build(); - } - - /** - * Start a singleton in-memory DynamoDBLocal server if not started yet. - * @throws IOException if any error occurs - */ - public synchronized static void startSingletonServer() throws IOException { - if (dynamoDBLocalServer != null) { - return; - } - - // Set this property if it has not been set elsewhere - if (StringUtils.isEmpty(System.getProperty(SYSPROP_SQLITE_LIB))) { - String projectBuildDir = System.getProperty("project.build.directory"); - if (StringUtils.isEmpty(projectBuildDir)) { - projectBuildDir = "target"; - } - // sqlite4java lib should have been copied to $projectBuildDir/native-libs - System.setProperty(SYSPROP_SQLITE_LIB, - projectBuildDir + File.separator + "native-libs"); - LOG.info("Setting {} -> {}", - SYSPROP_SQLITE_LIB, System.getProperty(SYSPROP_SQLITE_LIB)); - } - - try { - // Start an in-memory local DynamoDB instance - final String port = String.valueOf(ServerSocketUtil.getPort(0, 100)); - ddbEndpoint = "http://localhost:" + port; - dynamoDBLocalServer = ServerRunner.createServerFromCommandLineArgs( - new String[]{"-inMemory", "-port", port}); - dynamoDBLocalServer.start(); - LOG.info("DynamoDBLocal singleton server was started at {}", ddbEndpoint); - } catch (Exception t) { - String msg = "Error starting DynamoDBLocal server at " + ddbEndpoint - + " " + t; - LOG.error(msg, t); - throw new IOException(msg, t); - } - } - - /** - * Stop the in-memory DynamoDBLocal server if it is started. - * @throws IOException if any error occurs - */ - public synchronized static void stopSingletonServer() throws IOException { - if (dynamoDBLocalServer != null) { - LOG.info("Shutting down the in-memory DynamoDBLocal server"); - try { - dynamoDBLocalServer.stop(); - } catch (Throwable t) { - String msg = "Error stopping DynamoDBLocal server at " + ddbEndpoint; - LOG.error(msg, t); - throw new IOException(msg, t); - } - } - } - -} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java index 806940bb7b..56618cb233 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java @@ -28,7 +28,6 @@ import com.google.common.collect.Sets; import org.junit.After; -import org.junit.Assert; import org.junit.Assume; import org.junit.Before; import org.junit.Test; @@ -43,6 +42,7 @@ import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.Tristate; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.HadoopTestBase; /** * Main test class for MetadataStore implementations. @@ -51,7 +51,7 @@ * If your implementation may return missing results for recently set paths, * override {@link MetadataStoreTestBase#allowMissing()}. */ -public abstract class MetadataStoreTestBase extends Assert { +public abstract class MetadataStoreTestBase extends HadoopTestBase { private static final Logger LOG = LoggerFactory.getLogger(MetadataStoreTestBase.class); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMetadataStore.java deleted file mode 100644 index 5763b8336e..0000000000 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMetadataStore.java +++ /dev/null @@ -1,589 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.s3a.s3guard; - -import java.io.IOException; -import java.net.URI; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -import com.amazonaws.AmazonServiceException; -import com.amazonaws.services.dynamodbv2.document.DynamoDB; -import com.amazonaws.services.dynamodbv2.document.Item; -import com.amazonaws.services.dynamodbv2.document.PrimaryKey; -import com.amazonaws.services.dynamodbv2.document.Table; -import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputDescription; -import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException; -import com.amazonaws.services.dynamodbv2.model.TableDescription; - -import com.google.common.collect.Lists; -import org.apache.commons.collections.CollectionUtils; -import org.apache.hadoop.fs.s3a.Tristate; - -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.s3a.MockS3ClientFactory; -import org.apache.hadoop.fs.s3a.S3AFileStatus; -import org.apache.hadoop.fs.s3a.S3AFileSystem; -import org.apache.hadoop.fs.s3a.S3ClientFactory; -import org.apache.hadoop.security.UserGroupInformation; - -import static org.apache.hadoop.fs.s3a.Constants.*; -import static org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation.*; -import static org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore.*; -import static org.apache.hadoop.test.LambdaTestUtils.*; - -/** - * Test that {@link DynamoDBMetadataStore} implements {@link MetadataStore}. - * - * In this unit test, we use an in-memory DynamoDBLocal server instead of real - * AWS DynamoDB. An {@link S3AFileSystem} object is created and shared for - * initializing {@link DynamoDBMetadataStore} objects. There are no real S3 - * request issued as the underlying AWS S3Client is mocked. You won't be - * charged bills for AWS S3 or DynamoDB when you run this test. - * - * According to the base class, every test case will have independent contract - * to create a new {@link DynamoDBMetadataStore} instance and initializes it. - * A table will be created for each test by the test contract, and will be - * destroyed after the test case finishes. - */ -public class TestDynamoDBMetadataStore extends MetadataStoreTestBase { - private static final Logger LOG = - LoggerFactory.getLogger(TestDynamoDBMetadataStore.class); - private static final String BUCKET = "TestDynamoDBMetadataStore"; - private static final String S3URI = - URI.create(FS_S3A + "://" + BUCKET + "/").toString(); - public static final PrimaryKey - VERSION_MARKER_PRIMARY_KEY = createVersionMarkerPrimaryKey( - DynamoDBMetadataStore.VERSION_MARKER); - - /** The DynamoDB instance that can issue requests directly to server. */ - private static DynamoDB dynamoDB; - - @Rule - public final Timeout timeout = new Timeout(60 * 1000); - - /** - * Start the in-memory DynamoDBLocal server and initializes s3 file system. - */ - @BeforeClass - public static void setUpBeforeClass() throws Exception { - DynamoDBLocalClientFactory.startSingletonServer(); - try { - dynamoDB = new DynamoDBMSContract().getMetadataStore().getDynamoDB(); - } catch (AmazonServiceException e) { - final String msg = "Cannot initialize a DynamoDBMetadataStore instance " - + "against the local DynamoDB server. Perhaps the DynamoDBLocal " - + "server is not configured correctly. "; - LOG.error(msg, e); - // fail fast if the DynamoDBLocal server can not work - throw e; - } - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - if (dynamoDB != null) { - dynamoDB.shutdown(); - } - DynamoDBLocalClientFactory.stopSingletonServer(); - } - - /** - * Each contract has its own S3AFileSystem and DynamoDBMetadataStore objects. - */ - private static class DynamoDBMSContract extends AbstractMSContract { - private final S3AFileSystem s3afs; - private final DynamoDBMetadataStore ms = new DynamoDBMetadataStore(); - - DynamoDBMSContract() throws IOException { - this(new Configuration()); - } - - DynamoDBMSContract(Configuration conf) throws IOException { - // using mocked S3 clients - conf.setClass(S3_CLIENT_FACTORY_IMPL, MockS3ClientFactory.class, - S3ClientFactory.class); - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, S3URI); - // setting config for creating a DynamoDBClient against local server - conf.set(ACCESS_KEY, "dummy-access-key"); - conf.set(SECRET_KEY, "dummy-secret-key"); - conf.setBoolean(S3GUARD_DDB_TABLE_CREATE_KEY, true); - conf.setClass(S3Guard.S3GUARD_DDB_CLIENT_FACTORY_IMPL, - DynamoDBLocalClientFactory.class, DynamoDBClientFactory.class); - - // always create new file system object for a test contract - s3afs = (S3AFileSystem) FileSystem.newInstance(conf); - ms.initialize(s3afs); - } - - @Override - public S3AFileSystem getFileSystem() { - return s3afs; - } - - @Override - public DynamoDBMetadataStore getMetadataStore() { - return ms; - } - } - - @Override - public DynamoDBMSContract createContract() throws IOException { - return new DynamoDBMSContract(); - } - - @Override - public DynamoDBMSContract createContract(Configuration conf) throws - IOException { - return new DynamoDBMSContract(conf); - } - - @Override - FileStatus basicFileStatus(Path path, int size, boolean isDir) - throws IOException { - String owner = UserGroupInformation.getCurrentUser().getShortUserName(); - return isDir - ? new S3AFileStatus(true, path, owner) - : new S3AFileStatus(size, getModTime(), path, BLOCK_SIZE, owner); - } - - private DynamoDBMetadataStore getDynamoMetadataStore() throws IOException { - return (DynamoDBMetadataStore) getContract().getMetadataStore(); - } - - private S3AFileSystem getFileSystem() throws IOException { - return (S3AFileSystem) getContract().getFileSystem(); - } - - /** - * This tests that after initialize() using an S3AFileSystem object, the - * instance should have been initialized successfully, and tables are ACTIVE. - */ - @Test - public void testInitialize() throws IOException { - final String tableName = "testInitializeWithFileSystem"; - final S3AFileSystem s3afs = getFileSystem(); - final Configuration conf = s3afs.getConf(); - conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { - ddbms.initialize(s3afs); - verifyTableInitialized(tableName); - assertNotNull(ddbms.getTable()); - assertEquals(tableName, ddbms.getTable().getTableName()); - String expectedRegion = conf.get(S3GUARD_DDB_REGION_KEY, - s3afs.getBucketLocation(tableName)); - assertEquals("DynamoDB table should be in configured region or the same" + - " region as S3 bucket", - expectedRegion, - ddbms.getRegion()); - } - } - - /** - * This tests that after initialize() using a Configuration object, the - * instance should have been initialized successfully, and tables are ACTIVE. - */ - @Test - public void testInitializeWithConfiguration() throws IOException { - final String tableName = "testInitializeWithConfiguration"; - final Configuration conf = getFileSystem().getConf(); - conf.unset(S3GUARD_DDB_TABLE_NAME_KEY); - String savedRegion = conf.get(S3GUARD_DDB_REGION_KEY, - getFileSystem().getBucketLocation()); - conf.unset(S3GUARD_DDB_REGION_KEY); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { - ddbms.initialize(conf); - fail("Should have failed because the table name is not set!"); - } catch (IllegalArgumentException ignored) { - } - // config table name - conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { - ddbms.initialize(conf); - fail("Should have failed because as the region is not set!"); - } catch (IllegalArgumentException ignored) { - } - // config region - conf.set(S3GUARD_DDB_REGION_KEY, savedRegion); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { - ddbms.initialize(conf); - verifyTableInitialized(tableName); - assertNotNull(ddbms.getTable()); - assertEquals(tableName, ddbms.getTable().getTableName()); - assertEquals("Unexpected key schema found!", - keySchema(), - ddbms.getTable().describe().getKeySchema()); - } - } - - /** - * Test that for a large batch write request, the limit is handled correctly. - */ - @Test - public void testBatchWrite() throws IOException { - final int[] numMetasToDeleteOrPut = { - -1, // null - 0, // empty collection - 1, // one path - S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT, // exact limit of a batch request - S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT + 1 // limit + 1 - }; - for (int numOldMetas : numMetasToDeleteOrPut) { - for (int numNewMetas : numMetasToDeleteOrPut) { - doTestBatchWrite(numOldMetas, numNewMetas); - } - } - } - - private void doTestBatchWrite(int numDelete, int numPut) throws IOException { - final String root = S3URI + "/testBatchWrite_" + numDelete + '_' + numPut; - final Path oldDir = new Path(root, "oldDir"); - final Path newDir = new Path(root, "newDir"); - LOG.info("doTestBatchWrite: oldDir={}, newDir={}", oldDir, newDir); - - DynamoDBMetadataStore ms = getDynamoMetadataStore(); - ms.put(new PathMetadata(basicFileStatus(oldDir, 0, true))); - ms.put(new PathMetadata(basicFileStatus(newDir, 0, true))); - - final List oldMetas = - numDelete < 0 ? null : new ArrayList(numDelete); - for (int i = 0; i < numDelete; i++) { - oldMetas.add(new PathMetadata( - basicFileStatus(new Path(oldDir, "child" + i), i, true))); - } - final List newMetas = - numPut < 0 ? null : new ArrayList(numPut); - for (int i = 0; i < numPut; i++) { - newMetas.add(new PathMetadata( - basicFileStatus(new Path(newDir, "child" + i), i, false))); - } - - Collection pathsToDelete = null; - if (oldMetas != null) { - // put all metadata of old paths and verify - ms.put(new DirListingMetadata(oldDir, oldMetas, false)); - assertEquals(0, ms.listChildren(newDir).withoutTombstones().numEntries()); - assertTrue(CollectionUtils.isEqualCollection(oldMetas, - ms.listChildren(oldDir).getListing())); - - pathsToDelete = new ArrayList<>(oldMetas.size()); - for (PathMetadata meta : oldMetas) { - pathsToDelete.add(meta.getFileStatus().getPath()); - } - } - - // move the old paths to new paths and verify - ms.move(pathsToDelete, newMetas); - assertEquals(0, ms.listChildren(oldDir).withoutTombstones().numEntries()); - if (newMetas != null) { - assertTrue(CollectionUtils.isEqualCollection(newMetas, - ms.listChildren(newDir).getListing())); - } - } - - @Test - public void testInitExistingTable() throws IOException { - final DynamoDBMetadataStore ddbms = getDynamoMetadataStore(); - final String tableName = ddbms.getTable().getTableName(); - verifyTableInitialized(tableName); - // create existing table - ddbms.initTable(); - verifyTableInitialized(tableName); - } - - /** - * Test the low level version check code. - */ - @Test - public void testItemVersionCompatibility() throws Throwable { - verifyVersionCompatibility("table", - createVersionMarker(VERSION_MARKER, VERSION, 0)); - } - - /** - * Test that a version marker entry without the version number field - * is rejected as incompatible with a meaningful error message. - */ - @Test - public void testItemLacksVersion() throws Throwable { - intercept(IOException.class, E_NOT_VERSION_MARKER, - new VoidCallable() { - @Override - public void call() throws Exception { - verifyVersionCompatibility("table", - new Item().withPrimaryKey( - createVersionMarkerPrimaryKey(VERSION_MARKER))); - } - }); - } - - /** - * Delete the version marker and verify that table init fails. - */ - @Test - public void testTableVersionRequired() throws Exception { - Configuration conf = getFileSystem().getConf(); - int maxRetries = conf.getInt(S3GUARD_DDB_MAX_RETRIES, - S3GUARD_DDB_MAX_RETRIES_DEFAULT); - conf.setInt(S3GUARD_DDB_MAX_RETRIES, 3); - - final DynamoDBMetadataStore ddbms = createContract(conf).getMetadataStore(); - String tableName = conf.get(S3GUARD_DDB_TABLE_NAME_KEY, BUCKET); - Table table = verifyTableInitialized(tableName); - table.deleteItem(VERSION_MARKER_PRIMARY_KEY); - - // create existing table - intercept(IOException.class, E_NO_VERSION_MARKER, - new VoidCallable() { - @Override - public void call() throws Exception { - ddbms.initTable(); - } - }); - - conf.setInt(S3GUARD_DDB_MAX_RETRIES, maxRetries); - } - - /** - * Set the version value to a different number and verify that - * table init fails. - */ - @Test - public void testTableVersionMismatch() throws Exception { - final DynamoDBMetadataStore ddbms = createContract().getMetadataStore(); - String tableName = getFileSystem().getConf() - .get(S3GUARD_DDB_TABLE_NAME_KEY, BUCKET); - Table table = verifyTableInitialized(tableName); - table.deleteItem(VERSION_MARKER_PRIMARY_KEY); - Item v200 = createVersionMarker(VERSION_MARKER, 200, 0); - table.putItem(v200); - - // create existing table - intercept(IOException.class, E_INCOMPATIBLE_VERSION, - new VoidCallable() { - @Override - public void call() throws Exception { - ddbms.initTable(); - } - }); - } - - /** - * Test that initTable fails with IOException when table does not exist and - * table auto-creation is disabled. - */ - @Test - public void testFailNonexistentTable() throws IOException { - final String tableName = "testFailNonexistentTable"; - final S3AFileSystem s3afs = getFileSystem(); - final Configuration conf = s3afs.getConf(); - conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - conf.unset(S3GUARD_DDB_TABLE_CREATE_KEY); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { - ddbms.initialize(s3afs); - fail("Should have failed as table does not exist and table auto-creation" - + " is disabled"); - } catch (IOException ignored) { - } - } - - /** - * Test cases about root directory as it is not in the DynamoDB table. - */ - @Test - public void testRootDirectory() throws IOException { - final DynamoDBMetadataStore ddbms = getDynamoMetadataStore(); - Path rootPath = new Path(S3URI); - verifyRootDirectory(ddbms.get(rootPath), true); - - ddbms.put(new PathMetadata(new S3AFileStatus(true, - new Path(rootPath, "foo"), - UserGroupInformation.getCurrentUser().getShortUserName()))); - verifyRootDirectory(ddbms.get(new Path(S3URI)), false); - } - - private void verifyRootDirectory(PathMetadata rootMeta, boolean isEmpty) { - assertNotNull(rootMeta); - final FileStatus status = rootMeta.getFileStatus(); - assertNotNull(status); - assertTrue(status.isDirectory()); - // UNKNOWN is always a valid option, but true / false should not contradict - if (isEmpty) { - assertNotSame("Should not be marked non-empty", - Tristate.FALSE, - rootMeta.isEmptyDirectory()); - } else { - assertNotSame("Should not be marked empty", - Tristate.TRUE, - rootMeta.isEmptyDirectory()); - } - } - - /** - * Test that when moving nested paths, all its ancestors up to destination - * root will also be created. - * Here is the directory tree before move: - *
    -   * testMovePopulateAncestors
    -   * ├── a
    -   * │   └── b
    -   * │       └── src
    -   * │           ├── dir1
    -   * │           │   └── dir2
    -   * │           └── file1.txt
    -   * └── c
    -   *     └── d
    -   *         └── dest
    -   *
    - * As part of rename(a/b/src, d/c/dest), S3A will enumerate the subtree at - * a/b/src. This test verifies that after the move, the new subtree at - * 'dest' is reachable from the root (i.e. c/ and c/d exist in the table. - * DynamoDBMetadataStore depends on this property to do recursive delete - * without a full table scan. - */ - @Test - public void testMovePopulatesAncestors() throws IOException { - final DynamoDBMetadataStore ddbms = getDynamoMetadataStore(); - final String testRoot = "/testMovePopulatesAncestors"; - final String srcRoot = testRoot + "/a/b/src"; - final String destRoot = testRoot + "/c/d/e/dest"; - - final Path nestedPath1 = strToPath(srcRoot + "/file1.txt"); - ddbms.put(new PathMetadata(basicFileStatus(nestedPath1, 1024, false))); - final Path nestedPath2 = strToPath(srcRoot + "/dir1/dir2"); - ddbms.put(new PathMetadata(basicFileStatus(nestedPath2, 0, true))); - - // We don't put the destRoot path here, since put() would create ancestor - // entries, and we want to ensure that move() does it, instead. - - // Build enumeration of src / dest paths and do the move() - final Collection fullSourcePaths = Lists.newArrayList( - strToPath(srcRoot), - strToPath(srcRoot + "/dir1"), - strToPath(srcRoot + "/dir1/dir2"), - strToPath(srcRoot + "/file1.txt") - ); - final Collection pathsToCreate = Lists.newArrayList( - new PathMetadata(basicFileStatus(strToPath(destRoot), - 0, true)), - new PathMetadata(basicFileStatus(strToPath(destRoot + "/dir1"), - 0, true)), - new PathMetadata(basicFileStatus(strToPath(destRoot + "/dir1/dir2"), - 0, true)), - new PathMetadata(basicFileStatus(strToPath(destRoot + "/file1.txt"), - 1024, false)) - ); - - ddbms.move(fullSourcePaths, pathsToCreate); - - // assert that all the ancestors should have been populated automatically - assertCached(testRoot + "/c"); - assertCached(testRoot + "/c/d"); - assertCached(testRoot + "/c/d/e"); - assertCached(destRoot /* /c/d/e/dest */); - - // Also check moved files while we're at it - assertCached(destRoot + "/dir1"); - assertCached(destRoot + "/dir1/dir2"); - assertCached(destRoot + "/file1.txt"); - } - - @Test - public void testProvisionTable() throws IOException { - final DynamoDBMetadataStore ddbms = getDynamoMetadataStore(); - final String tableName = ddbms.getTable().getTableName(); - final ProvisionedThroughputDescription oldProvision = - dynamoDB.getTable(tableName).describe().getProvisionedThroughput(); - ddbms.provisionTable(oldProvision.getReadCapacityUnits() * 2, - oldProvision.getWriteCapacityUnits() * 2); - final ProvisionedThroughputDescription newProvision = - dynamoDB.getTable(tableName).describe().getProvisionedThroughput(); - LOG.info("Old provision = {}, new provision = {}", - oldProvision, newProvision); - assertEquals(oldProvision.getReadCapacityUnits() * 2, - newProvision.getReadCapacityUnits().longValue()); - assertEquals(oldProvision.getWriteCapacityUnits() * 2, - newProvision.getWriteCapacityUnits().longValue()); - } - - @Test - public void testDeleteTable() throws Exception { - final String tableName = "testDeleteTable"; - final S3AFileSystem s3afs = getFileSystem(); - final Configuration conf = s3afs.getConf(); - conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { - ddbms.initialize(s3afs); - // we can list the empty table - ddbms.listChildren(new Path(S3URI)); - - ddbms.destroy(); - verifyTableNotExist(tableName); - - // delete table once more; be ResourceNotFoundException swallowed silently - ddbms.destroy(); - verifyTableNotExist(tableName); - try { - // we can no longer list the destroyed table - ddbms.listChildren(new Path(S3URI)); - fail("Should have failed after the table is destroyed!"); - } catch (IOException ignored) { - } - } - } - - /** - * This validates the table is created and ACTIVE in DynamoDB. - * - * This should not rely on the {@link DynamoDBMetadataStore} implementation. - * Return the table - */ - private static Table verifyTableInitialized(String tableName) { - final Table table = dynamoDB.getTable(tableName); - final TableDescription td = table.describe(); - assertEquals(tableName, td.getTableName()); - assertEquals("ACTIVE", td.getTableStatus()); - return table; - } - - /** - * This validates the table is not found in DynamoDB. - * - * This should not rely on the {@link DynamoDBMetadataStore} implementation. - */ - private static void verifyTableNotExist(String tableName) throws Exception{ - intercept(ResourceNotFoundException.class, - () -> dynamoDB.getTable(tableName).describe()); - } - -} From 32f867a6a907c05a312657139d295a92756d98ef Mon Sep 17 00:00:00 2001 From: Xiao Chen Date: Wed, 20 Jun 2018 15:58:01 -0700 Subject: [PATCH 32/70] HDFS-13682. Cannot create encryption zone after KMS auth token expires. --- .../crypto/key/kms/KMSClientProvider.java | 9 +++-- .../hadoop/security/UserGroupInformation.java | 4 ++- .../hdfs/TestSecureEncryptionZoneWithKMS.java | 34 +++++++++++++++++-- 3 files changed, 41 insertions(+), 6 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java index 08787a51bd..edbf897664 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java @@ -32,7 +32,9 @@ import org.apache.hadoop.security.ProviderUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; +import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -543,7 +545,9 @@ private T call(HttpURLConnection conn, Object jsonOutput, String requestMethod = conn.getRequestMethod(); URL url = conn.getURL(); conn = createConnection(url, requestMethod); - conn.setRequestProperty(CONTENT_TYPE, contentType); + if (contentType != null && !contentType.isEmpty()) { + conn.setRequestProperty(CONTENT_TYPE, contentType); + } return call(conn, jsonOutput, expectedResponse, klass, authRetryCount - 1); } @@ -1087,8 +1091,7 @@ private UserGroupInformation getActualUgi() throws IOException { actualUgi = currentUgi.getRealUser(); } if (UserGroupInformation.isSecurityEnabled() && - !containsKmsDt(actualUgi) && - !actualUgi.hasKerberosCredentials()) { + !containsKmsDt(actualUgi) && !actualUgi.shouldRelogin()) { // Use login user is only necessary when Kerberos is enabled // but the actual user does not have either // Kerberos credential or KMS delegation token for KMS operations diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 3872810748..29b9fea424 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -831,7 +831,9 @@ private long getRefreshTime(KerberosTicket tgt) { return start + (long) ((end - start) * TICKET_RENEW_WINDOW); } - private boolean shouldRelogin() { + @InterfaceAudience.Private + @InterfaceStability.Unstable + public boolean shouldRelogin() { return hasKerberosCredentials() && isHadoopLogin(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java index 7c4763c13f..db97c02e09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java @@ -107,6 +107,8 @@ public class TestSecureEncryptionZoneWithKMS { // MiniKMS private static MiniKMS miniKMS; private final String testKey = "test_key"; + private static boolean testKeyCreated = false; + private static final long AUTH_TOKEN_VALIDITY = 1; // MiniDFS private MiniDFSCluster cluster; @@ -128,7 +130,7 @@ public static File getTestDir() throws Exception { } @Rule - public Timeout timeout = new Timeout(30000); + public Timeout timeout = new Timeout(120000); @BeforeClass public static void init() throws Exception { @@ -215,6 +217,9 @@ public static void init() throws Exception { "HTTP/localhost"); kmsConf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT"); kmsConf.set("hadoop.kms.acl.GENERATE_EEK", "hdfs"); + // set kms auth token expiration low for testCreateZoneAfterAuthTokenExpiry + kmsConf.setLong("hadoop.kms.authentication.token.validity", + AUTH_TOKEN_VALIDITY); Writer writer = new FileWriter(kmsFile); kmsConf.writeXml(writer); @@ -260,7 +265,10 @@ public void setup() throws Exception { cluster.waitActive(); // Create a test key - DFSTestUtil.createKey(testKey, cluster, conf); + if (!testKeyCreated) { + DFSTestUtil.createKey(testKey, cluster, conf); + testKeyCreated = true; + } } @After @@ -307,4 +315,26 @@ public Void run() throws IOException { } }); } + + @Test + public void testCreateZoneAfterAuthTokenExpiry() throws Exception { + final UserGroupInformation ugi = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(hdfsPrincipal, keytab); + LOG.info("Created ugi: {} ", ugi); + + ugi.doAs((PrivilegedExceptionAction) () -> { + final Path zone = new Path("/expire1"); + fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true); + dfsAdmin.createEncryptionZone(zone, testKey, NO_TRASH); + + final Path zone1 = new Path("/expire2"); + fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); + final long sleepInterval = (AUTH_TOKEN_VALIDITY + 1) * 1000; + LOG.info("Sleeping {} seconds to wait for kms auth token expiration", + sleepInterval); + Thread.sleep(sleepInterval); + dfsAdmin.createEncryptionZone(zone1, testKey, NO_TRASH); + return null; + }); + } } \ No newline at end of file From 43541a18907d2303b708ae27a9a2cb5df891da4f Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Wed, 20 Jun 2018 12:38:59 -0700 Subject: [PATCH 33/70] HADOOP-15551. Avoid use of Arrays.stream in Configuration.addTags --- .../main/java/org/apache/hadoop/conf/Configuration.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 19bd5dab22..b1125e588c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -3189,25 +3189,25 @@ public void addTags(Properties prop) { if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_SYSTEM)) { String systemTags = prop.getProperty(CommonConfigurationKeys .HADOOP_TAGS_SYSTEM); - Arrays.stream(systemTags.split(",")).forEach(tag -> TAGS.add(tag)); + TAGS.addAll(Arrays.asList(systemTags.split(","))); } // Get all custom tags if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_CUSTOM)) { String customTags = prop.getProperty(CommonConfigurationKeys .HADOOP_TAGS_CUSTOM); - Arrays.stream(customTags.split(",")).forEach(tag -> TAGS.add(tag)); + TAGS.addAll(Arrays.asList(customTags.split(","))); } if (prop.containsKey(CommonConfigurationKeys.HADOOP_SYSTEM_TAGS)) { String systemTags = prop.getProperty(CommonConfigurationKeys .HADOOP_SYSTEM_TAGS); - Arrays.stream(systemTags.split(",")).forEach(tag -> TAGS.add(tag)); + TAGS.addAll(Arrays.asList(systemTags.split(","))); } // Get all custom tags if (prop.containsKey(CommonConfigurationKeys.HADOOP_CUSTOM_TAGS)) { String customTags = prop.getProperty(CommonConfigurationKeys .HADOOP_CUSTOM_TAGS); - Arrays.stream(customTags.split(",")).forEach(tag -> TAGS.add(tag)); + TAGS.addAll(Arrays.asList(customTags.split(","))); } } catch (Exception ex) { From 9f15483c5d7c94251f4c84e0155449188f202779 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Thu, 21 Jun 2018 11:18:14 -0400 Subject: [PATCH 34/70] YARN-8445. Improved error message for duplicated service and component names. Contributed by Chandni Singh --- .../exceptions/RestApiErrorMessages.java | 2 ++ .../yarn/service/utils/ServiceApiUtil.java | 5 +++++ .../yarn/service/TestServiceApiUtil.java | 18 ++++++++++++++++++ 3 files changed, 25 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java index 1d2d719d32..5b3c72cae4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java @@ -28,6 +28,8 @@ public interface RestApiErrorMessages { "than 63 characters"; String ERROR_COMPONENT_NAME_INVALID = "Component name must be no more than %s characters: %s"; + String ERROR_COMPONENT_NAME_CONFLICTS_WITH_SERVICE_NAME = + "Component name %s must not be same as service name %s"; String ERROR_USER_NAME_INVALID = "User name must be no more than 63 characters"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java index 549927327d..705e04065c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java @@ -143,6 +143,11 @@ public static void validateAndResolveService(Service service, throw new IllegalArgumentException(String.format(RestApiErrorMessages .ERROR_COMPONENT_NAME_INVALID, maxCompLength, comp.getName())); } + if (service.getName().equals(comp.getName())) { + throw new IllegalArgumentException(String.format(RestApiErrorMessages + .ERROR_COMPONENT_NAME_CONFLICTS_WITH_SERVICE_NAME, + comp.getName(), service.getName())); + } if (componentNames.contains(comp.getName())) { throw new IllegalArgumentException("Component name collision: " + comp.getName()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java index 243c6b3a61..ae031d4aad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java @@ -333,6 +333,24 @@ public void testDuplicateComponents() throws IOException { } } + @Test + public void testComponentNameSameAsServiceName() throws IOException { + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + Service app = new Service(); + app.setName("test"); + app.setVersion("v1"); + app.addComponent(createValidComponent("test")); + + //component name same as service name + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "component name matches service name"); + } catch (IllegalArgumentException e) { + assertEquals("Component name test must not be same as service name test", + e.getMessage()); + } + } + @Test public void testExternalDuplicateComponent() throws IOException { Service ext = createValidApplication("comp1"); From 59de9679540f6d0edfb34cf9f88e52b51d94b4f4 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Thu, 21 Jun 2018 10:32:52 -0700 Subject: [PATCH 35/70] HADOOP-15549. Upgrade to commons-configuration 2.1 regresses task CPU consumption --- .../hadoop/metrics2/impl/MetricsConfig.java | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java index 027450cb65..976f16bedd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java @@ -37,10 +37,8 @@ import org.apache.commons.configuration2.Configuration; import org.apache.commons.configuration2.PropertiesConfiguration; import org.apache.commons.configuration2.SubsetConfiguration; -import org.apache.commons.configuration2.builder.fluent.Configurations; -import org.apache.commons.configuration2.builder.fluent.Parameters; -import org.apache.commons.configuration2.convert.DefaultListDelimiterHandler; import org.apache.commons.configuration2.ex.ConfigurationException; +import org.apache.commons.configuration2.io.FileHandler; import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsPlugin; import org.apache.hadoop.metrics2.filter.GlobFilter; @@ -112,12 +110,11 @@ static MetricsConfig create(String prefix, String... fileNames) { static MetricsConfig loadFirst(String prefix, String... fileNames) { for (String fname : fileNames) { try { - Configuration cf = new Configurations().propertiesBuilder(fname) - .configure(new Parameters().properties() - .setFileName(fname) - .setListDelimiterHandler(new DefaultListDelimiterHandler(','))) - .getConfiguration() - .interpolatedConfiguration(); + PropertiesConfiguration pcf = new PropertiesConfiguration(); + FileHandler fh = new FileHandler(pcf); + fh.setFileName(fname); + fh.load(); + Configuration cf = pcf.interpolatedConfiguration(); LOG.info("Loaded properties from {}", fname); if (LOG.isDebugEnabled()) { LOG.debug("Properties: {}", toString(cf)); From 99948565cb5d5706241d7a8fc591e1617c499e03 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 21 Jun 2018 18:24:10 -0700 Subject: [PATCH 36/70] YARN-8412. Move ResourceRequest.clone logic everywhere into a proper API. Contributed by Botong Huang. --- .../yarn/api/records/ResourceRequest.java | 20 +++++++++++++++++++ .../yarn/client/api/impl/AMRMClientImpl.java | 11 +--------- .../api/impl/TestAMRMClientOnRMRestart.java | 6 +----- .../hadoop/yarn/server/AMRMClientRelayer.java | 8 +------- .../LocalityMulticastAMRMProxyPolicy.java | 10 +--------- .../server/scheduler/ResourceRequestSet.java | 14 ++----------- .../yarn/server/utils/BuilderUtils.java | 12 ----------- .../LocalityAppPlacementAllocator.java | 10 ++-------- .../server/resourcemanager/Application.java | 10 ++++------ .../resourcemanager/TestAppManager.java | 18 ++--------------- 10 files changed, 34 insertions(+), 85 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java index eea81fe44d..a863910861 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java @@ -102,6 +102,26 @@ public static ResourceRequest newInstance(Priority priority, String hostName, .build(); } + /** + * Clone a ResourceRequest object (shallow copy). Please keep it loaded with + * all (new) fields + * + * @param rr the object to copy from + * @return the copied object + */ + @Public + @Evolving + public static ResourceRequest clone(ResourceRequest rr) { + // Please keep it loaded with all (new) fields + return ResourceRequest.newBuilder().priority(rr.getPriority()) + .resourceName(rr.getResourceName()).capability(rr.getCapability()) + .numContainers(rr.getNumContainers()) + .relaxLocality(rr.getRelaxLocality()) + .nodeLabelExpression(rr.getNodeLabelExpression()) + .executionTypeRequest(rr.getExecutionTypeRequest()) + .allocationRequestId(rr.getAllocationRequestId()).build(); + } + @Public @Unstable public static ResourceRequestBuilder newBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java index 36c3cf1d4e..7265d24ac0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java @@ -451,16 +451,7 @@ private List cloneAsks() { for(ResourceRequest r : ask) { // create a copy of ResourceRequest as we might change it while the // RPC layer is using it to send info across - ResourceRequest rr = - ResourceRequest.newBuilder().priority(r.getPriority()) - .resourceName(r.getResourceName()).capability(r.getCapability()) - .numContainers(r.getNumContainers()) - .relaxLocality(r.getRelaxLocality()) - .nodeLabelExpression(r.getNodeLabelExpression()) - .executionTypeRequest(r.getExecutionTypeRequest()) - .allocationRequestId(r.getAllocationRequestId()) - .build(); - askList.add(rr); + askList.add(ResourceRequest.clone(r)); } return askList; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java index 11d703d890..51048660ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java @@ -570,11 +570,7 @@ public synchronized Allocation allocate( ContainerUpdates updateRequests) { List askCopy = new ArrayList(); for (ResourceRequest req : ask) { - ResourceRequest reqCopy = - ResourceRequest.newInstance(req.getPriority(), - req.getResourceName(), req.getCapability(), - req.getNumContainers(), req.getRelaxLocality()); - askCopy.add(reqCopy); + askCopy.add(ResourceRequest.clone(req)); } lastAsk = ask; lastRelease = release; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java index c216aceed5..e8a7f64a14 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java @@ -220,13 +220,7 @@ public AllocateResponse allocate(AllocateRequest allocateRequest) for (ResourceRequest r : ask) { // create a copy of ResourceRequest as we might change it while the // RPC layer is using it to send info across - askList.add(ResourceRequest.newBuilder().priority(r.getPriority()) - .resourceName(r.getResourceName()).capability(r.getCapability()) - .numContainers(r.getNumContainers()) - .relaxLocality(r.getRelaxLocality()) - .nodeLabelExpression(r.getNodeLabelExpression()) - .executionTypeRequest(r.getExecutionTypeRequest()) - .allocationRequestId(r.getAllocationRequestId()).build()); + askList.add(ResourceRequest.clone(r)); } allocateRequest = AllocateRequest.newBuilder() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java index d303d6ff14..1481f347fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java @@ -361,15 +361,7 @@ private void splitIndividualAny(ResourceRequest originalResourceRequest, for (SubClusterId targetId : targetSCs) { // if the calculated request is non-empty add it to the answer if (containerNums.get(i) > 0) { - ResourceRequest out = - ResourceRequest.newInstance(originalResourceRequest.getPriority(), - originalResourceRequest.getResourceName(), - originalResourceRequest.getCapability(), - originalResourceRequest.getNumContainers(), - originalResourceRequest.getRelaxLocality(), - originalResourceRequest.getNodeLabelExpression(), - originalResourceRequest.getExecutionTypeRequest()); - out.setAllocationRequestId(allocationId); + ResourceRequest out = ResourceRequest.clone(originalResourceRequest); out.setNumContainers(containerNums.get(i)); if (ResourceRequest.isAnyLocation(out.getResourceName())) { allocationBookkeeper.addAnyRR(targetId, out); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSet.java index b1e6b6e211..cf24bbf361 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSet.java @@ -165,7 +165,7 @@ public void setNumContainers(int newValue) throws YarnException { // the same numContainers value Map newAsks = new HashMap<>(); for (ResourceRequest rr : this.asks.values()) { - ResourceRequest clone = cloneResourceRequest(rr); + ResourceRequest clone = ResourceRequest.clone(rr); clone.setNumContainers(newValue); newAsks.put(clone.getResourceName(), clone); } @@ -176,22 +176,12 @@ public void setNumContainers(int newValue) throws YarnException { throw new YarnException( "No ANY RR found in requestSet with numContainers=" + oldValue); } - ResourceRequest clone = cloneResourceRequest(rr); + ResourceRequest clone = ResourceRequest.clone(rr); clone.setNumContainers(newValue); this.asks.put(ResourceRequest.ANY, clone); } } - private ResourceRequest cloneResourceRequest(ResourceRequest rr) { - return ResourceRequest.newBuilder().priority(rr.getPriority()) - .resourceName(rr.getResourceName()).capability(rr.getCapability()) - .numContainers(rr.getNumContainers()) - .relaxLocality(rr.getRelaxLocality()) - .nodeLabelExpression(rr.getNodeLabelExpression()) - .executionTypeRequest(rr.getExecutionTypeRequest()) - .allocationRequestId(rr.getAllocationRequestId()).build(); - } - @Override public String toString() { StringBuilder builder = new StringBuilder(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java index e06b55e4c4..b6145c99ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java @@ -375,18 +375,6 @@ public static ResourceRequest newResourceRequest(Priority priority, return request; } - public static ResourceRequest newResourceRequest(ResourceRequest r) { - ResourceRequest request = recordFactory - .newRecordInstance(ResourceRequest.class); - request.setPriority(r.getPriority()); - request.setResourceName(r.getResourceName()); - request.setCapability(r.getCapability()); - request.setNumContainers(r.getNumContainers()); - request.setNodeLabelExpression(r.getNodeLabelExpression()); - request.setExecutionTypeRequest(r.getExecutionTypeRequest()); - return request; - } - public static ApplicationReport newApplicationReport( ApplicationId applicationId, ApplicationAttemptId applicationAttemptId, String user, String queue, String name, String host, int rpcPort, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java index a0358b4ada..e1239a9db3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java @@ -251,14 +251,8 @@ private void decrementOutstanding(SchedulerRequestKey schedulerRequestKey, } public ResourceRequest cloneResourceRequest(ResourceRequest request) { - ResourceRequest newRequest = ResourceRequest.newBuilder() - .priority(request.getPriority()) - .allocationRequestId(request.getAllocationRequestId()) - .resourceName(request.getResourceName()) - .capability(request.getCapability()) - .numContainers(1) - .relaxLocality(request.getRelaxLocality()) - .nodeLabelExpression(request.getNodeLabelExpression()).build(); + ResourceRequest newRequest = ResourceRequest.clone(request); + newRequest.setNumContainers(1); return newRequest; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java index 7d1140d1d7..9178009864 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java @@ -305,9 +305,8 @@ private synchronized void addResourceRequest( // Note this down for next interaction with ResourceManager ask.remove(request); - ask.add( - org.apache.hadoop.yarn.server.utils.BuilderUtils.newResourceRequest( - request)); // clone to ensure the RM doesn't manipulate the same obj + // clone to ensure the RM doesn't manipulate the same obj + ask.add(ResourceRequest.clone(request)); if (LOG.isDebugEnabled()) { LOG.debug("addResourceRequest: applicationId=" + applicationId.getId() @@ -462,9 +461,8 @@ private void updateResourceRequest(ResourceRequest request) { // Note this for next interaction with ResourceManager ask.remove(request); - ask.add( - org.apache.hadoop.yarn.server.utils.BuilderUtils.newResourceRequest( - request)); // clone to ensure the RM doesn't manipulate the same obj + // clone to ensure the RM doesn't manipulate the same obj + ask.add(ResourceRequest.clone(request)); if(LOG.isDebugEnabled()) { LOG.debug("updateResourceRequest:" + " application=" + applicationId diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index e79ba08ccd..6a6f9cf027 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -733,7 +733,7 @@ public void testRMAppSubmitAMContainerResourceRequest() throws Exception { ResourceRequest.newInstance(Priority.newInstance(0), ResourceRequest.ANY, Resources.createResource(1025), 1, true); req.setNodeLabelExpression(RMNodeLabelsManager.NO_LABEL); - asContext.setAMContainerResourceRequest(cloneResourceRequest(req)); + asContext.setAMContainerResourceRequest(ResourceRequest.clone(req)); // getAMContainerResourceRequests uses a singleton list of // getAMContainerResourceRequest Assert.assertEquals(req, asContext.getAMContainerResourceRequest()); @@ -1099,25 +1099,11 @@ private static Resource mockResource() { YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB); } - private static ResourceRequest cloneResourceRequest(ResourceRequest req) { - return ResourceRequest.newInstance( - Priority.newInstance(req.getPriority().getPriority()), - new String(req.getResourceName()), - Resource.newInstance(req.getCapability().getMemorySize(), - req.getCapability().getVirtualCores()), - req.getNumContainers(), - req.getRelaxLocality(), - req.getNodeLabelExpression() != null - ? new String(req.getNodeLabelExpression()) : null, - ExecutionTypeRequest.newInstance( - req.getExecutionTypeRequest().getExecutionType())); - } - private static List cloneResourceRequests( List reqs) { List cloneReqs = new ArrayList<>(); for (ResourceRequest req : reqs) { - cloneReqs.add(cloneResourceRequest(req)); + cloneReqs.add(ResourceRequest.clone(req)); } return cloneReqs; } From 30728aced4a6b05394b3fc8c613f39fade9cf3c2 Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Fri, 22 Jun 2018 10:50:54 +0800 Subject: [PATCH 37/70] HDFS-13692. StorageInfoDefragmenter floods log when compacting StorageInfo TreeSet. Contributed by Bharat Viswanadham. --- .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 76a77816d5..72ea1c0692 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -4576,7 +4576,7 @@ private void scanAndCompactStorages() throws InterruptedException { datanodesAndStorages.add(node.getDatanodeUuid()); datanodesAndStorages.add(storage.getStorageID()); } - LOG.info("StorageInfo TreeSet fill ratio {} : {}{}", + LOG.debug("StorageInfo TreeSet fill ratio {} : {}{}", storage.getStorageID(), ratio, (ratio < storageInfoDefragmentRatio) ? " (queued for defragmentation)" : ""); From 6432128622d64f3f9dd638b9c254c77cdf5408aa Mon Sep 17 00:00:00 2001 From: Eric E Payne Date: Fri, 22 Jun 2018 17:15:29 +0000 Subject: [PATCH 38/70] YARN-8444: NodeResourceMonitor crashes on bad swapFree value. Contributed by Jim Brennan. --- .../org/apache/hadoop/util/SysInfoLinux.java | 19 +++++- .../apache/hadoop/util/TestSysInfoLinux.java | 60 +++++++++++++++++++ 2 files changed, 77 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java index 7fd19907fd..2c2aca3a6b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java @@ -216,6 +216,21 @@ private void readProcMemInfoFile() { readProcMemInfoFile(false); } + /** + * + * Wrapper for Long.parseLong() that returns zero if the value is + * invalid. Under some circumstances, swapFree in /proc/meminfo can + * go negative, reported as a very large decimal value. + */ + private long safeParseLong(String strVal) { + long parsedVal; + try { + parsedVal = Long.parseLong(strVal); + } catch (NumberFormatException nfe) { + parsedVal = 0; + } + return parsedVal; + } /** * Read /proc/meminfo, parse and compute memory information. * @param readAgain if false, read only on the first time @@ -252,9 +267,9 @@ private void readProcMemInfoFile(boolean readAgain) { } else if (mat.group(1).equals(SWAPTOTAL_STRING)) { swapSize = Long.parseLong(mat.group(2)); } else if (mat.group(1).equals(MEMFREE_STRING)) { - ramSizeFree = Long.parseLong(mat.group(2)); + ramSizeFree = safeParseLong(mat.group(2)); } else if (mat.group(1).equals(SWAPFREE_STRING)) { - swapSizeFree = Long.parseLong(mat.group(2)); + swapSizeFree = safeParseLong(mat.group(2)); } else if (mat.group(1).equals(INACTIVE_STRING)) { inactiveSize = Long.parseLong(mat.group(2)); } else if (mat.group(1).equals(INACTIVEFILE_STRING)) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java index a646a41271..0ae5d3ce8c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java @@ -161,6 +161,36 @@ int readDiskBlockInformation(String diskName, int defSector) { "DirectMap2M: 2027520 kB\n" + "DirectMap1G: 132120576 kB\n"; + static final String MEMINFO_FORMAT3 = + "MemTotal: %d kB\n" + + "MemFree: %s kB\n" + + "Buffers: 138244 kB\n" + + "Cached: 947780 kB\n" + + "SwapCached: 142880 kB\n" + + "Active: 3229888 kB\n" + + "Inactive: %d kB\n" + + "SwapTotal: %d kB\n" + + "SwapFree: %s kB\n" + + "Dirty: 122012 kB\n" + + "Writeback: 0 kB\n" + + "AnonPages: 2710792 kB\n" + + "Mapped: 24740 kB\n" + + "Slab: 132528 kB\n" + + "SReclaimable: 105096 kB\n" + + "SUnreclaim: 27432 kB\n" + + "PageTables: 11448 kB\n" + + "NFS_Unstable: 0 kB\n" + + "Bounce: 0 kB\n" + + "CommitLimit: 4125904 kB\n" + + "Committed_AS: 4143556 kB\n" + + "VmallocTotal: 34359738367 kB\n" + + "VmallocUsed: 1632 kB\n" + + "VmallocChunk: 34359736375 kB\n" + + "HugePages_Total: %d\n" + + "HugePages_Free: 0\n" + + "HugePages_Rsvd: 0\n" + + "Hugepagesize: 2048 kB"; + static final String CPUINFO_FORMAT = "processor : %s\n" + "vendor_id : AuthenticAMD\n" + @@ -384,6 +414,36 @@ public void parsingProcMemFile2() throws IOException { (nrHugePages * 2048) + swapTotal)); } + /** + * Test parsing /proc/meminfo + * @throws IOException + */ + @Test + public void parsingProcMemFileWithBadValues() throws IOException { + long memTotal = 4058864L; + long memFree = 0L; // bad value should return 0 + long inactive = 567732L; + long swapTotal = 2096472L; + long swapFree = 0L; // bad value should return 0 + int nrHugePages = 10; + String badFreeValue = "18446744073709551596"; + File tempFile = new File(FAKE_MEMFILE); + tempFile.deleteOnExit(); + FileWriter fWriter = new FileWriter(FAKE_MEMFILE); + fWriter.write(String.format(MEMINFO_FORMAT3, + memTotal, badFreeValue, inactive, swapTotal, badFreeValue, nrHugePages)); + + fWriter.close(); + assertEquals(plugin.getAvailablePhysicalMemorySize(), + 1024L * (memFree + inactive)); + assertEquals(plugin.getAvailableVirtualMemorySize(), + 1024L * (memFree + inactive + swapFree)); + assertEquals(plugin.getPhysicalMemorySize(), + 1024L * (memTotal - (nrHugePages * 2048))); + assertEquals(plugin.getVirtualMemorySize(), + 1024L * (memTotal - (nrHugePages * 2048) + swapTotal)); + } + @Test public void testCoreCounts() throws IOException { From 55fad6a3de3125d9e7e2e9a5f8fa5b1b22a1de60 Mon Sep 17 00:00:00 2001 From: Sean Mackrory Date: Fri, 22 Jun 2018 11:36:36 -0600 Subject: [PATCH 39/70] HADOOP-15416. Clear error message in S3Guard diff if source not found. Contributed by Gabor Bota. --- .../java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java index fbffba915d..ac10e0876a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java @@ -805,7 +805,9 @@ private static void printDiff(FileStatus msStatus, */ private void compareDir(FileStatus msDir, FileStatus s3Dir, PrintStream out) throws IOException { - Preconditions.checkArgument(!(msDir == null && s3Dir == null)); + Preconditions.checkArgument(!(msDir == null && s3Dir == null), + "The path does not exist in metadata store and on s3."); + if (msDir != null && s3Dir != null) { Preconditions.checkArgument(msDir.getPath().equals(s3Dir.getPath()), String.format("The path from metadata store and s3 are different:" + From ae055622edeb3cbf82baa6ed952fc2abc84c021e Mon Sep 17 00:00:00 2001 From: Miklos Szegedi Date: Fri, 22 Jun 2018 13:05:41 -0700 Subject: [PATCH 40/70] MAPREDUCE-7114. Make FrameworkUploader symlink ignore improvement. Contributed by Gergo Repas. --- .../hadoop/mapred/uploader/FrameworkUploader.java | 2 +- .../mapred/uploader/TestFrameworkUploader.java | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java index 5316f383b0..d2116c041d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java @@ -409,7 +409,7 @@ boolean checkSymlink(File jar) { linkPath == null ? null : linkPath.getParent(); java.nio.file.Path normalizedLinkPath = linkPathParent == null ? null : linkPathParent.normalize(); - if (normalizedLinkPath != null && jarParent.equals( + if (normalizedLinkPath != null && jarParent.normalize().equals( normalizedLinkPath)) { LOG.info(String.format("Ignoring same directory link %s to %s", jarPath.toString(), link.toString())); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java index c12902c399..9c72f72713 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java @@ -440,6 +440,19 @@ public void testNativeIO() throws IOException { } Assert.assertTrue(uploader.checkSymlink(symlinkToTarget)); + // Create a symlink to the target with /./ in the path + symlinkToTarget = new File(parent.getAbsolutePath() + + "/./symlinkToTarget2.txt"); + try { + Files.createSymbolicLink( + Paths.get(symlinkToTarget.getAbsolutePath()), + Paths.get(targetFile.getAbsolutePath())); + } catch (UnsupportedOperationException e) { + // Symlinks are not supported, so ignore the test + Assume.assumeTrue(false); + } + Assert.assertTrue(uploader.checkSymlink(symlinkToTarget)); + // Create a symlink outside the current directory File symlinkOutside = new File(parent, "symlinkToParent.txt"); try { From 1cdce86d33d4b73ba6dd4136c966eb7e822b6f36 Mon Sep 17 00:00:00 2001 From: Yufei Gu Date: Fri, 22 Jun 2018 14:02:32 -0700 Subject: [PATCH 41/70] YARN-8184. Too many metrics if containerLocalizer/ResourceLocalizationService uses ReadWriteDiskValidator. Contributed by Yufei Gu --- .../containermanager/localizer/ContainerLocalizer.java | 5 +---- .../localizer/ResourceLocalizationService.java | 5 +---- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java index 6a384aeff4..c0343692a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java @@ -132,10 +132,7 @@ public ContainerLocalizer(FileContext lfs, String user, String appId, this.recordFactory = recordFactory; this.conf = new YarnConfiguration(); this.diskValidator = DiskValidatorFactory.getInstance( - conf.get(YarnConfiguration.DISK_VALIDATOR, - YarnConfiguration.DEFAULT_DISK_VALIDATOR)); - LOG.info("Disk Validator: " + YarnConfiguration.DISK_VALIDATOR + - " is loaded."); + YarnConfiguration.DEFAULT_DISK_VALIDATOR); this.appCacheDirContextName = String.format(APPCACHE_CTXT_FMT, appId); this.pendingResources = new HashMap>(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index ddae2ae272..3f0a6fb0a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -264,10 +264,7 @@ public void serviceInit(Configuration conf) throws Exception { } diskValidator = DiskValidatorFactory.getInstance( - conf.get(YarnConfiguration.DISK_VALIDATOR, - YarnConfiguration.DEFAULT_DISK_VALIDATOR)); - LOG.info("Disk Validator: " + YarnConfiguration.DISK_VALIDATOR + - " is loaded."); + YarnConfiguration.DEFAULT_DISK_VALIDATOR); cacheTargetSize = conf.getLong(YarnConfiguration.NM_LOCALIZER_CACHE_TARGET_SIZE_MB, YarnConfiguration.DEFAULT_NM_LOCALIZER_CACHE_TARGET_SIZE_MB) << 20; cacheCleanupPeriod = From 8a32bc39eb210fca8052c472601e24c2446b4cc2 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Fri, 22 Jun 2018 19:12:48 -0400 Subject: [PATCH 42/70] YARN-8326. Removed exit code file check for launched container. Contributed by Shane Kumpf --- .../launcher/ContainerLaunch.java | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java index bb842af0f9..04295e1367 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java @@ -808,25 +808,6 @@ public void cleanupContainer() throws IOException { } } - final int sleepMsec = 100; - int msecLeft = 2000; - if (pidFilePath != null) { - File file = new File(getExitCodeFile(pidFilePath.toString())); - while (!file.exists() && msecLeft >= 0) { - try { - Thread.sleep(sleepMsec); - } catch (InterruptedException e) { - } - msecLeft -= sleepMsec; - } - if (msecLeft < 0) { - if (LOG.isDebugEnabled()) { - LOG.debug("Timeout while waiting for the exit code file: " - + file.getAbsolutePath()); - } - } - } - // Reap the container boolean result = exec.reapContainer( new ContainerReapContext.Builder() From ca14fec02fb14e1b708f266bc715e84ae9784d6f Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Sat, 23 Jun 2018 15:49:44 -0700 Subject: [PATCH 43/70] HDDS-184. Upgrade common-langs version to 3.7 in hadoop-tools/hadoop-ozone. Contributed by Takanobu Asanuma. --- .../ozone/genconf/TestGenerateOzoneRequiredConfigurations.java | 2 +- .../main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java | 2 +- .../java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java | 2 +- .../org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java | 2 +- .../java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java index cfd1159469..c2f5eb7f76 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.genconf; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.test.GenericTestUtils; import org.hamcrest.CoreMatchers; import org.junit.AfterClass; diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index 0ff1d50cd6..6906a9dc47 100644 --- a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -53,7 +53,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.http.client.utils.URIBuilder; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java index b82c4a135d..ad21f28ec4 100644 --- a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java +++ b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.ozone; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index 5a7cb4f6f8..a2257023b6 100644 --- a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -31,7 +31,7 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.junit.After; import org.apache.hadoop.hdds.conf.OzoneConfiguration; diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java index 176b614d4d..8417e463f9 100644 --- a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java +++ b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.ozone.contract; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.fs.FileSystem; From e16e5b307d6c4404db0698b9d128e5bf4aa16a8a Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Sun, 24 Jun 2018 01:05:04 -0700 Subject: [PATCH 44/70] HDDS-177. Create a releasable ozonefs artifact Contributed by Marton, Elek. --- dev-support/bin/ozone-dist-layout-stitching | 4 +- hadoop-dist/pom.xml | 5 ++ .../acceptance/ozonefs/docker-compose.yaml | 71 +++++++++++++++++++ .../src/test/acceptance/ozonefs/docker-config | 39 ++++++++++ .../src/test/acceptance/ozonefs/ozonefs.robot | 39 ++++++++++ .../ozonefs}/pom.xml | 55 +++++++++++--- .../org/apache/hadoop/fs/ozone/Constants.java | 0 .../java/org/apache/hadoop/fs/ozone/OzFs.java | 0 .../hadoop/fs/ozone/OzoneFSInputStream.java | 0 .../hadoop/fs/ozone/OzoneFSOutputStream.java | 0 .../hadoop/fs/ozone/OzoneFileSystem.java | 0 .../apache/hadoop/fs/ozone/package-info.java | 0 .../fs/ozone/TestOzoneFSInputStream.java | 0 .../fs/ozone/TestOzoneFileInterfaces.java | 0 .../contract/ITestOzoneContractCreate.java | 0 .../contract/ITestOzoneContractDelete.java | 0 .../contract/ITestOzoneContractDistCp.java | 0 .../ITestOzoneContractGetFileStatus.java | 0 .../contract/ITestOzoneContractMkdir.java | 0 .../contract/ITestOzoneContractOpen.java | 0 .../contract/ITestOzoneContractRename.java | 0 .../contract/ITestOzoneContractRootDir.java | 0 .../contract/ITestOzoneContractSeek.java | 0 .../fs/ozone/contract/OzoneContract.java | 0 .../src/test/resources/contract/ozone.xml | 0 .../src/test/resources/log4j.properties | 0 hadoop-ozone/pom.xml | 1 + hadoop-project/pom.xml | 6 +- hadoop-tools/hadoop-tools-dist/pom.xml | 15 ---- hadoop-tools/pom.xml | 11 --- 30 files changed, 209 insertions(+), 37 deletions(-) create mode 100644 hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml create mode 100644 hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config create mode 100644 hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/pom.xml (70%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/main/java/org/apache/hadoop/fs/ozone/Constants.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/main/java/org/apache/hadoop/fs/ozone/package-info.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/resources/contract/ozone.xml (100%) rename {hadoop-tools/hadoop-ozone => hadoop-ozone/ozonefs}/src/test/resources/log4j.properties (100%) diff --git a/dev-support/bin/ozone-dist-layout-stitching b/dev-support/bin/ozone-dist-layout-stitching index ad8abe294c..be330d5aaa 100755 --- a/dev-support/bin/ozone-dist-layout-stitching +++ b/dev-support/bin/ozone-dist-layout-stitching @@ -145,6 +145,8 @@ run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-$ run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" . run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" . run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" . +mkdir -p "./share/hadoop/ozonefs" +cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" "./share/hadoop/ozonefs/hadoop-ozone-filesystem.jar" # Optional documentation, could be missing cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ksm/ cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdds/webapps/scm/ @@ -153,5 +155,5 @@ cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdd mkdir -p ./share/hadoop/mapreduce mkdir -p ./share/hadoop/yarn echo -echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone-${HDDS_VERSION}" +echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone" echo diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index dfbf8184f8..5de6759ce9 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -265,6 +265,11 @@ hadoop-ozone-docs provided + + org.apache.hadoop + hadoop-ozone-filesystem + provided + diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml new file mode 100644 index 0000000000..3323557511 --- /dev/null +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml @@ -0,0 +1,71 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3" +services: + namenode: + image: apache/hadoop-runner + hostname: namenode + volumes: + - ${OZONEDIR}:/opt/hadoop + ports: + - 9870 + environment: + ENSURE_NAMENODE_DIR: /data/namenode + env_file: + - ./docker-config + command: ["/opt/hadoop/bin/hdfs","namenode"] + datanode: + image: apache/hadoop-runner + volumes: + - ${OZONEDIR}:/opt/hadoop + ports: + - 9864 + command: ["/opt/hadoop/bin/ozone","datanode"] + env_file: + - ./docker-config + ksm: + image: apache/hadoop-runner + hostname: ksm + volumes: + - ${OZONEDIR}:/opt/hadoop + ports: + - 9874 + environment: + ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION + env_file: + - ./docker-config + command: ["/opt/hadoop/bin/ozone","ksm"] + scm: + image: apache/hadoop-runner + volumes: + - ${OZONEDIR}:/opt/hadoop + ports: + - 9876 + env_file: + - ./docker-config + environment: + ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + command: ["/opt/hadoop/bin/ozone","scm"] + hadooplast: + image: flokkr/hadoop:3.1.0 + volumes: + - ${OZONEDIR}:/opt/ozone + env_file: + - ./docker-config + environment: + HADOOP_CLASSPATH: /opt/ozone/share/hadoop/ozonefs/hadoop-ozone-filesystem.jar + command: ["watch","-n","100000","ls"] diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config new file mode 100644 index 0000000000..dec863e94e --- /dev/null +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 +CORE-SITE.XML_fs.o3.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem +OZONE-SITE.XML_ozone.ksm.address=ksm +OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874 +OZONE-SITE.XML_ozone.scm.names=scm +OZONE-SITE.XML_ozone.enabled=True +OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id +OZONE-SITE.XML_ozone.scm.block.client.address=scm +OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata +OZONE-SITE.XML_ozone.handler.type=distributed +OZONE-SITE.XML_ozone.scm.client.address=scm +OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService +OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s +HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 +HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode +HDFS-SITE.XML_rpc.metrics.quantile.enable=true +HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 +HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService +LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout +LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender +LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +LOG4J.PROPERTIES_log4j.category.org.apache.hadoop.util.NativeCodeLoader=ERROR diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot new file mode 100644 index 0000000000..9e8a5d2004 --- /dev/null +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Ozonefs test +Library OperatingSystem +Suite Setup Startup Ozone cluster with size 5 +Suite Teardown Teardown Ozone cluster +Resource ../commonlib.robot + +*** Variables *** +${COMPOSEFILE} ${CURDIR}/docker-compose.yaml +${PROJECTDIR} ${CURDIR}/../../../../../.. + + +*** Test Cases *** +Create volume and bucket + Execute on datanode ozone oz -createVolume http://ksm/fstest -user bilbo -quota 100TB -root + Execute on datanode ozone oz -createBucket http://ksm/fstest/bucket1 + +Check volume from ozonefs + ${result} = Execute on hadooplast hdfs dfs -ls o3://bucket1.fstest/ + +Create directory from ozonefs + Execute on hadooplast hdfs dfs -mkdir -p o3://bucket1.fstest/testdir/deep + ${result} = Execute on ksm ozone oz -listKey o3://ksm/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' + Should contain ${result} testdir/deep diff --git a/hadoop-tools/hadoop-ozone/pom.xml b/hadoop-ozone/ozonefs/pom.xml similarity index 70% rename from hadoop-tools/hadoop-ozone/pom.xml rename to hadoop-ozone/ozonefs/pom.xml index a7d0cfaf83..c3de4d1b32 100644 --- a/hadoop-tools/hadoop-ozone/pom.xml +++ b/hadoop-ozone/ozonefs/pom.xml @@ -18,14 +18,13 @@ 4.0.0 org.apache.hadoop - hadoop-project - 3.2.0-SNAPSHOT - ../../hadoop-project + hadoop-ozone + 0.2.1-SNAPSHOT hadoop-ozone-filesystem Apache Hadoop Ozone FileSystem jar - + 0.2.1-SNAPSHOT UTF-8 true @@ -44,6 +43,46 @@ + + org.apache.maven.plugins + maven-shade-plugin + 3.1.1 + + + + com.google.guava:guava:jar + org.slf4j:slf4j-api:jar + com.google.protobuf:protobuf-java + com.nimbusds:nimbus-jose-jwt:jar + com.github.stephenc.jcip:jcip-annotations + com.google.code.findbugs:jsr305:jar + org.apache.hadoop:hadoop-ozone-client + org.apache.hadoop:hadoop-hdds-client + org.apache.hadoop:hadoop-hdds-common + org.fusesource.leveldbjni:leveldbjni-all + org.apache.ratis:ratis-server + org.apache.ratis:ratis-proto-shaded:jar + com.google.auto.value:auto-value-annotations + com.squareup:javapoet:jar + org.jctools:jctools-core + org.apache.ratis:ratis-common + org.apache.ratis:ratis-client + org.apache.ratis:ratis-netty + org.apache.ratis:ratis-grpc + org.rocksdb:rocksdbjni + org.apache.hadoop:hadoop-ozone-common + + + + + + package + + shade + + + + org.apache.maven.plugins maven-dependency-plugin @@ -56,7 +95,9 @@ - ${project.basedir}/target/hadoop-tools-deps/${project.artifactId}.tools-optional.txt + + ${project.basedir}/target/hadoop-tools-deps/${project.artifactId}.tools-optional.txt + @@ -83,7 +124,6 @@ org.apache.hadoop hadoop-hdds-common - provided org.apache.hadoop @@ -108,12 +148,10 @@ org.apache.hadoop hadoop-hdds-client - test org.apache.hadoop hadoop-ozone-common - provided org.apache.hadoop @@ -129,7 +167,6 @@ org.apache.hadoop hadoop-ozone-client - provided org.apache.hadoop diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/Constants.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/Constants.java rename to hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java rename to hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java rename to hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java rename to hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java rename to hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/package-info.java rename to hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java diff --git a/hadoop-tools/hadoop-ozone/src/test/resources/contract/ozone.xml b/hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/resources/contract/ozone.xml rename to hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml diff --git a/hadoop-tools/hadoop-ozone/src/test/resources/log4j.properties b/hadoop-ozone/ozonefs/src/test/resources/log4j.properties similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/resources/log4j.properties rename to hadoop-ozone/ozonefs/src/test/resources/log4j.properties diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index cffef14e6e..b655088c57 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -34,6 +34,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> client ozone-manager tools + ozonefs integration-test objectstore-service docs diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index ed0187b533..dfd1eac293 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -586,7 +586,11 @@ hadoop-ozone-docs ${hdds.version} - + + org.apache.hadoop + hadoop-ozone-filesystem + ${hdds.version} + org.apache.hadoop hadoop-hdds-common diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml index 21cc7cef8c..42ce94c829 100644 --- a/hadoop-tools/hadoop-tools-dist/pom.xml +++ b/hadoop-tools/hadoop-tools-dist/pom.xml @@ -192,20 +192,5 @@ - - - hdds - - false - - - - org.apache.hadoop - hadoop-ozone-filesystem - compile - ${project.version} - - - diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index f421e580ba..dca59d31af 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -67,15 +67,4 @@ - - - hdds - - false - - - hadoop-ozone - - - From 440140cea6718229094a3d2b97b9b9bd28b95d9b Mon Sep 17 00:00:00 2001 From: Weiwei Yang Date: Mon, 25 Jun 2018 09:15:31 +0800 Subject: [PATCH 45/70] YARN-8443. Total #VCores in cluster metrics is wrong when CapacityScheduler reserved some containers. Contributed by Tao Yang. --- .../server/resourcemanager/webapp/dao/ClusterMetricsInfo.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java index 84f70d9f2b..69d88aacca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java @@ -101,7 +101,7 @@ public ClusterMetricsInfo(final ResourceScheduler rs) { CapacityScheduler cs = (CapacityScheduler) rs; this.totalMB = availableMB + allocatedMB + reservedMB; this.totalVirtualCores = - availableVirtualCores + allocatedVirtualCores + containersReserved; + availableVirtualCores + allocatedVirtualCores + reservedVirtualCores; // TODO, add support of other schedulers to get total used resources // across partition. if (cs.getRootQueue() != null From 1ba4e62304a70d53f1a4f76995b6e1fac3107922 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Mon, 25 Jun 2018 14:38:33 +0100 Subject: [PATCH 46/70] HADOOP-14396. Add builder interface to FileContext. Contributed by Lei (Eddy) Xu. --- .../hadoop/fs/FSDataOutputStreamBuilder.java | 22 +++++++ .../org/apache/hadoop/fs/FileContext.java | 66 +++++++++++++++++++ .../java/org/apache/hadoop/fs/Options.java | 3 + .../fs/FileContextMainOperationsBaseTest.java | 44 ++++++++++++- 4 files changed, 134 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java index 86c284a9e8..d43129388b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java @@ -115,6 +115,27 @@ public abstract class FSDataOutputStreamBuilder */ protected abstract B getThisBuilder(); + /** + * Construct from a {@link FileContext}. + * + * @param fc FileContext + * @param p path. + * @throws IOException + */ + FSDataOutputStreamBuilder(@Nonnull FileContext fc, + @Nonnull Path p) throws IOException { + Preconditions.checkNotNull(fc); + Preconditions.checkNotNull(p); + this.fs = null; + this.path = p; + + AbstractFileSystem afs = fc.getFSofPath(p); + FsServerDefaults defaults = afs.getServerDefaults(p); + bufferSize = defaults.getFileBufferSize(); + replication = defaults.getReplication(); + blockSize = defaults.getBlockSize(); + } + /** * Constructor. */ @@ -131,6 +152,7 @@ protected FSDataOutputStreamBuilder(@Nonnull FileSystem fileSystem, } protected FileSystem getFS() { + Preconditions.checkNotNull(fs); return fs; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index 6ea69d01b1..5215c3cdee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -24,6 +24,7 @@ import java.net.URI; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; import java.util.HashSet; @@ -35,6 +36,8 @@ import java.util.TreeSet; import java.util.Map.Entry; +import javax.annotation.Nonnull; + import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -694,6 +697,69 @@ public FSDataOutputStream next(final AbstractFileSystem fs, final Path p) }.resolve(this, absF); } + /** + * {@link FSDataOutputStreamBuilder} for {@liink FileContext}. + */ + private static final class FCDataOutputStreamBuilder extends + FSDataOutputStreamBuilder< + FSDataOutputStream, FCDataOutputStreamBuilder> { + private final FileContext fc; + + private FCDataOutputStreamBuilder( + @Nonnull FileContext fc, @Nonnull Path p) throws IOException { + super(fc, p); + this.fc = fc; + Preconditions.checkNotNull(fc); + } + + @Override + protected FCDataOutputStreamBuilder getThisBuilder() { + return this; + } + + @Override + public FSDataOutputStream build() throws IOException { + final EnumSet flags = getFlags(); + List createOpts = new ArrayList<>(Arrays.asList( + CreateOpts.blockSize(getBlockSize()), + CreateOpts.bufferSize(getBufferSize()), + CreateOpts.repFac(getReplication()), + CreateOpts.perms(getPermission()) + )); + if (getChecksumOpt() != null) { + createOpts.add(CreateOpts.checksumParam(getChecksumOpt())); + } + if (getProgress() != null) { + createOpts.add(CreateOpts.progress(getProgress())); + } + if (isRecursive()) { + createOpts.add(CreateOpts.createParent()); + } + return fc.create(getPath(), flags, + createOpts.toArray(new CreateOpts[0])); + } + } + + /** + * Create a {@link FSDataOutputStreamBuilder} for creating or overwriting + * a file on indicated path. + * + * @param f the file path to create builder for. + * @return {@link FSDataOutputStreamBuilder} to build a + * {@link FSDataOutputStream}. + * + * Upon {@link FSDataOutputStreamBuilder#build()} being invoked, + * builder parameters will be verified by {@link FileContext} and + * {@link AbstractFileSystem#create}. And filesystem states will be modified. + * + * Client should expect {@link FSDataOutputStreamBuilder#build()} throw the + * same exceptions as create(Path, EnumSet, CreateOpts...). + */ + public FSDataOutputStreamBuilder create(final Path f) + throws IOException { + return new FCDataOutputStreamBuilder(this, f).create(); + } + /** * Make(create) a directory and all the non-existent parents. * diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java index 126e754731..5e932864c8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java @@ -55,6 +55,9 @@ public static ChecksumParam checksumParam( ChecksumOpt csumOpt) { return new ChecksumParam(csumOpt); } + public static Progress progress(Progressable prog) { + return new Progress(prog); + } public static Perms perms(FsPermission perm) { return new Perms(perm); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java index 35ec4ff6b6..62ecd9f13a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java @@ -810,7 +810,49 @@ public void testCreateFlagAppendCreateOverwrite() throws IOException { fc.create(p, EnumSet.of(CREATE, APPEND, OVERWRITE)); Assert.fail("Excepted exception not thrown"); } - + + @Test + public void testBuilderCreateNonExistingFile() throws IOException { + Path p = getTestRootPath(fc, "test/testBuilderCreateNonExistingFile"); + FSDataOutputStream out = fc.create(p).build(); + writeData(fc, p, out, data, data.length); + } + + @Test + public void testBuilderCreateExistingFile() throws IOException { + Path p = getTestRootPath(fc, "test/testBuilderCreateExistingFile"); + createFile(p); + FSDataOutputStream out = fc.create(p).overwrite(true).build(); + writeData(fc, p, out, data, data.length); + } + + @Test + public void testBuilderCreateAppendNonExistingFile() throws IOException { + Path p = getTestRootPath(fc, "test/testBuilderCreateAppendNonExistingFile"); + FSDataOutputStream out = fc.create(p).append().build(); + writeData(fc, p, out, data, data.length); + } + + @Test + public void testBuilderCreateAppendExistingFile() throws IOException { + Path p = getTestRootPath(fc, "test/testBuilderCreateAppendExistingFile"); + createFile(p); + FSDataOutputStream out = fc.create(p).append().build(); + writeData(fc, p, out, data, 2 * data.length); + } + + @Test + public void testBuilderCreateRecursive() throws IOException { + Path p = getTestRootPath(fc, "test/parent/no/exist/file1"); + try (FSDataOutputStream out = fc.create(p).build()) { + fail("Should throw FileNotFoundException on non-exist directory"); + } catch (FileNotFoundException e) { + } + + FSDataOutputStream out = fc.create(p).recursive().build(); + writeData(fc, p, out, data, data.length); + } + private static void writeData(FileContext fc, Path p, FSDataOutputStream out, byte[] data, long expectedLen) throws IOException { out.write(data, 0, data.length); From abc3e4bad905efde5a4881e8a072c68f6e910ade Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 25 Jun 2018 09:50:27 -0700 Subject: [PATCH 47/70] HADOOP-15458. TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows. Contributed by Xiao Liang. --- .../apache/hadoop/fs/TestLocalFileSystem.java | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java index 0e337b4736..d5622af085 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -689,17 +689,18 @@ public void testFSOutputStreamBuilder() throws Exception { // and permission FSDataOutputStreamBuilder builder = fileSys.createFile(path); - builder.build(); - Assert.assertEquals("Should be default block size", - builder.getBlockSize(), fileSys.getDefaultBlockSize()); - Assert.assertEquals("Should be default replication factor", - builder.getReplication(), fileSys.getDefaultReplication()); - Assert.assertEquals("Should be default buffer size", - builder.getBufferSize(), - fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, - IO_FILE_BUFFER_SIZE_DEFAULT)); - Assert.assertEquals("Should be default permission", - builder.getPermission(), FsPermission.getFileDefault()); + try (FSDataOutputStream stream = builder.build()) { + Assert.assertEquals("Should be default block size", + builder.getBlockSize(), fileSys.getDefaultBlockSize()); + Assert.assertEquals("Should be default replication factor", + builder.getReplication(), fileSys.getDefaultReplication()); + Assert.assertEquals("Should be default buffer size", + builder.getBufferSize(), + fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, + IO_FILE_BUFFER_SIZE_DEFAULT)); + Assert.assertEquals("Should be default permission", + builder.getPermission(), FsPermission.getFileDefault()); + } // Test set 0 to replication, block size and buffer size builder = fileSys.createFile(path); From 4ffe68a6f70ce01a5654da8991b4cdb35ae0bf1f Mon Sep 17 00:00:00 2001 From: Rohith Sharma K S Date: Mon, 25 Jun 2018 10:38:03 -0700 Subject: [PATCH 48/70] YARN-8457. Compilation is broken with -Pyarn-ui. --- .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc index 959e1696e7..daf4462c44 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc @@ -1,4 +1,5 @@ { "directory": "bower_components", - "analytics": false + "analytics": false, + "registry": "https://registry.bower.io" } From a55d6bba71c81c1c4e9d8cd11f55c78f10a548b0 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Mon, 25 Jun 2018 13:05:22 -0700 Subject: [PATCH 49/70] HDDS-191. Queue SCMCommands via EventQueue in SCM. Contributed by Elek, Marton. --- .../protocol/commands/CommandForDatanode.java | 45 +++++++++++++++++++ .../hadoop/hdds/scm/node/SCMNodeManager.java | 20 ++++++++- .../scm/server/StorageContainerManager.java | 8 +++- .../hadoop/hdds/scm/node/TestNodeManager.java | 39 ++++++++++++++++ 4 files changed, 110 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java new file mode 100644 index 0000000000..0c4964ac4c --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.protocol.commands; + +import java.util.UUID; + +import com.google.protobuf.GeneratedMessage; + +/** + * Command for the datanode with the destination address. + */ +public class CommandForDatanode { + + private final UUID datanodeId; + + private final SCMCommand command; + + public CommandForDatanode(UUID datanodeId, SCMCommand command) { + this.datanodeId = datanodeId; + this.command = command; + } + + public UUID getDatanodeId() { + return datanodeId; + } + + public SCMCommand getCommand() { + return command; + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index b339fb7ce9..fc8b0137f3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -25,6 +25,10 @@ import org.apache.hadoop.hdds.scm.VersionInfo; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.server.events.Event; +import org.apache.hadoop.hdds.server.events.EventHandler; +import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.server.events.TypedEvent; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -42,11 +46,14 @@ import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol; import org.apache.hadoop.ozone.protocol.VersionResponse; +import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; + +import com.google.protobuf.GeneratedMessage; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -100,7 +107,8 @@ * as soon as you read it. */ public class SCMNodeManager - implements NodeManager, StorageContainerNodeProtocol { + implements NodeManager, StorageContainerNodeProtocol, + EventHandler { @VisibleForTesting static final Logger LOG = @@ -154,6 +162,9 @@ public class SCMNodeManager private final SCMNodePoolManager nodePoolManager; private final StorageContainerManager scmManager; + public static final Event DATANODE_COMMAND = + new TypedEvent<>(CommandForDatanode.class, "DATANODE_COMMAND"); + /** * Constructs SCM machine Manager. */ @@ -871,4 +882,11 @@ public void addDatanodeCommand(UUID dnId, SCMCommand command) { public void setStaleNodeIntervalMs(long interval) { this.staleNodeIntervalMs = interval; } + + @Override + public void onMessage(CommandForDatanode commandForDatanode, + EventPublisher publisher) { + addDatanodeCommand(commandForDatanode.getDatanodeId(), + commandForDatanode.getCommand()); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 78f13cb47c..5725d236ae 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; +import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RPC; @@ -51,6 +52,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.Storage.StorageState; import org.apache.hadoop.ozone.common.StorageInfo; +import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.StringUtils; @@ -161,8 +163,12 @@ private StorageContainerManager(OzoneConfiguration conf) throws IOException { throw new SCMException("SCM not initialized.", ResultCodes .SCM_NOT_INITIALIZED); } + EventQueue eventQueue = new EventQueue(); + + SCMNodeManager nm = new SCMNodeManager(conf, scmStorage.getClusterID(), this); + scmNodeManager = nm; + eventQueue.addHandler(SCMNodeManager.DATANODE_COMMAND, nm); - scmNodeManager = new SCMNodeManager(conf, scmStorage.getClusterID(), this); scmContainerManager = new ContainerMapping(conf, getScmNodeManager(), cacheSize); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java index 2b04d6b862..824a135194 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java @@ -19,6 +19,7 @@ import com.google.common.base.Supplier; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -29,7 +30,10 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.StorageReportProto; +import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; +import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; @@ -1165,4 +1169,39 @@ public void testScmNodeReportUpdate() throws IOException, assertEquals(expectedRemaining, foundRemaining); } } + + @Test + public void testHandlingSCMCommandEvent() { + OzoneConfiguration conf = getConf(); + conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, + 100, TimeUnit.MILLISECONDS); + + DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); + String dnId = datanodeDetails.getUuidString(); + String storagePath = testDir.getAbsolutePath() + "/" + dnId; + List reports = + TestUtils.createStorageReport(100, 10, 90, + storagePath, null, dnId, 1); + + EventQueue eq = new EventQueue(); + try (SCMNodeManager nodemanager = createNodeManager(conf)) { + eq.addHandler(SCMNodeManager.DATANODE_COMMAND, nodemanager); + + nodemanager + .register(datanodeDetails, TestUtils.createNodeReport(reports)); + eq.fireEvent(SCMNodeManager.DATANODE_COMMAND, + new CommandForDatanode(datanodeDetails.getUuid(), + new CloseContainerCommand(1L, ReplicationType.STAND_ALONE))); + + eq.processAll(1000L); + List command = + nodemanager.sendHeartbeat(datanodeDetails, null); + Assert.assertEquals(1, command.size()); + Assert + .assertEquals(command.get(0).getClass(), CloseContainerCommand.class); + } catch (IOException e) { + e.printStackTrace(); + } + } + } From c687a6617d73293019d8d91ac48bbfd2ccca3b40 Mon Sep 17 00:00:00 2001 From: Sean Mackrory Date: Mon, 25 Jun 2018 11:04:34 -0600 Subject: [PATCH 50/70] HADOOP-15423. Merge fileCache and dirCache into ine single cache in LocalMetadataStore. Contributed by Gabor Bota. --- .../fs/s3a/s3guard/LocalMetadataEntry.java | 81 ++++++ .../fs/s3a/s3guard/LocalMetadataStore.java | 247 ++++++++++-------- .../fs/s3a/s3guard/MetadataStoreTestBase.java | 2 +- .../s3a/s3guard/TestLocalMetadataStore.java | 33 +-- 4 files changed, 240 insertions(+), 123 deletions(-) create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java new file mode 100644 index 0000000000..6040d672ac --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.s3guard; + +import javax.annotation.Nullable; + +/** + * LocalMetadataEntry is used to store entries in the cache of + * LocalMetadataStore. PathMetadata or dirListingMetadata can be null. The + * entry is not immutable. + */ +public final class LocalMetadataEntry { + @Nullable + private PathMetadata pathMetadata; + @Nullable + private DirListingMetadata dirListingMetadata; + + LocalMetadataEntry(PathMetadata pmd){ + pathMetadata = pmd; + dirListingMetadata = null; + } + + LocalMetadataEntry(DirListingMetadata dlm){ + pathMetadata = null; + dirListingMetadata = dlm; + } + + public PathMetadata getFileMeta() { + return pathMetadata; + } + + public DirListingMetadata getDirListingMeta() { + return dirListingMetadata; + } + + + public boolean hasPathMeta() { + return this.pathMetadata != null; + } + + public boolean hasDirMeta() { + return this.dirListingMetadata != null; + } + + public void setPathMetadata(PathMetadata pathMetadata) { + this.pathMetadata = pathMetadata; + } + + public void setDirListingMetadata(DirListingMetadata dirListingMetadata) { + this.dirListingMetadata = dirListingMetadata; + } + + @Override public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("LocalMetadataEntry{"); + if(pathMetadata != null) { + sb.append("pathMetadata=" + pathMetadata.getFileStatus().getPath()); + } + if(dirListingMetadata != null){ + sb.append("; dirListingMetadata=" + dirListingMetadata.getPath()); + } + sb.append("}"); + return sb.toString(); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java index 95689e11fb..49981ed1ee 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java @@ -37,13 +37,12 @@ import java.net.URI; import java.util.Collection; import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedList; import java.util.Map; import java.util.concurrent.TimeUnit; /** - * This is a local, in-memory, implementation of MetadataStore. + * This is a local, in-memory implementation of MetadataStore. * This is not a coherent cache across processes. It is only * locally-coherent. * @@ -56,12 +55,12 @@ * non-recursive removal of non-empty directories. It is assumed the caller * already has to perform these sorts of checks. * - * Contains cache internally with time based eviction. + * Contains one cache internally with time based eviction. */ public class LocalMetadataStore implements MetadataStore { public static final Logger LOG = LoggerFactory.getLogger(MetadataStore.class); - public static final int DEFAULT_MAX_RECORDS = 128; + public static final int DEFAULT_MAX_RECORDS = 256; public static final int DEFAULT_CACHE_ENTRY_TTL_MSEC = 10 * 1000; /** @@ -79,11 +78,8 @@ public class LocalMetadataStore implements MetadataStore { public static final String CONF_CACHE_ENTRY_TTL = "fs.metadatastore.local.ttl"; - /** Contains directories and files. */ - private Cache fileCache; - - /** Contains directory listings. */ - private Cache dirCache; + /** Contains directory and file listings. */ + private Cache localCache; private FileSystem fs; /* Null iff this FS does not have an associated URI host. */ @@ -116,8 +112,7 @@ public void initialize(Configuration conf) throws IOException { builder.expireAfterAccess(ttl, TimeUnit.MILLISECONDS); } - fileCache = builder.build(); - dirCache = builder.build(); + localCache = builder.build(); } @Override @@ -155,8 +150,7 @@ private synchronized void doDelete(Path p, boolean recursive, boolean if (recursive) { // Remove all entries that have this dir as path prefix. - deleteEntryByAncestor(path, dirCache, tombstone); - deleteEntryByAncestor(path, fileCache, tombstone); + deleteEntryByAncestor(path, localCache, tombstone); } } @@ -170,7 +164,7 @@ public PathMetadata get(Path p, boolean wantEmptyDirectoryFlag) throws IOException { Path path = standardize(p); synchronized (this) { - PathMetadata m = fileCache.getIfPresent(path); + PathMetadata m = getFileMeta(path); if (wantEmptyDirectoryFlag && m != null && m.getFileStatus().isDirectory()) { @@ -191,15 +185,15 @@ public PathMetadata get(Path p, boolean wantEmptyDirectoryFlag) * @return TRUE / FALSE if known empty / not-empty, UNKNOWN otherwise. */ private Tristate isEmptyDirectory(Path p) { - DirListingMetadata dirMeta = dirCache.getIfPresent(p); - return dirMeta.withoutTombstones().isEmpty(); + DirListingMetadata dlm = getDirListingMeta(p); + return dlm.withoutTombstones().isEmpty(); } @Override public synchronized DirListingMetadata listChildren(Path p) throws IOException { Path path = standardize(p); - DirListingMetadata listing = dirCache.getIfPresent(path); + DirListingMetadata listing = getDirListingMeta(path); if (LOG.isDebugEnabled()) { LOG.debug("listChildren({}) -> {}", path, listing == null ? "null" : listing.prettyPrint()); @@ -211,6 +205,7 @@ public synchronized DirListingMetadata listChildren(Path p) throws @Override public void move(Collection pathsToDelete, Collection pathsToCreate) throws IOException { + LOG.info("Move {} to {}", pathsToDelete, pathsToCreate); Preconditions.checkNotNull(pathsToDelete, "pathsToDelete is null"); Preconditions.checkNotNull(pathsToCreate, "pathsToCreate is null"); @@ -258,7 +253,12 @@ public void put(PathMetadata meta) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("put {} -> {}", path, meta.prettyPrint()); } - fileCache.put(path, meta); + LocalMetadataEntry entry = localCache.getIfPresent(path); + if(entry == null){ + entry = new LocalMetadataEntry(meta); + } else { + entry.setPathMetadata(meta); + } /* Directory case: * We also make sure we have an entry in the dirCache, so subsequent @@ -271,27 +271,32 @@ public void put(PathMetadata meta) throws IOException { * saving round trips to underlying store for subsequent listStatus() */ - if (status.isDirectory()) { - DirListingMetadata dir = dirCache.getIfPresent(path); - if (dir == null) { - dirCache.put(path, new DirListingMetadata(path, DirListingMetadata - .EMPTY_DIR, false)); - } + // only create DirListingMetadata if the entry does not have one + if (status.isDirectory() && !entry.hasDirMeta()) { + DirListingMetadata dlm = + new DirListingMetadata(path, DirListingMetadata.EMPTY_DIR, false); + entry.setDirListingMetadata(dlm); } + localCache.put(path, entry); /* Update cached parent dir. */ Path parentPath = path.getParent(); if (parentPath != null) { - DirListingMetadata parent = dirCache.getIfPresent(parentPath); - if (parent == null) { - /* Track this new file's listing in parent. Parent is not - * authoritative, since there may be other items in it we don't know - * about. */ - parent = new DirListingMetadata(parentPath, - DirListingMetadata.EMPTY_DIR, false); - dirCache.put(parentPath, parent); + LocalMetadataEntry parentMeta = localCache.getIfPresent(parentPath); + DirListingMetadata parentDirMeta = + new DirListingMetadata(parentPath, DirListingMetadata.EMPTY_DIR, + false); + parentDirMeta.put(status); + + getDirListingMeta(parentPath); + + if (parentMeta == null){ + localCache.put(parentPath, new LocalMetadataEntry(parentDirMeta)); + } else if (!parentMeta.hasDirMeta()) { + parentMeta.setDirListingMetadata(parentDirMeta); + } else { + parentMeta.getDirListingMeta().put(status); } - parent.put(status); } } } @@ -301,7 +306,13 @@ public synchronized void put(DirListingMetadata meta) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("put dirMeta {}", meta.prettyPrint()); } - dirCache.put(standardize(meta.getPath()), meta); + LocalMetadataEntry entry = + localCache.getIfPresent(standardize(meta.getPath())); + if(entry == null){ + localCache.put(standardize(meta.getPath()), new LocalMetadataEntry(meta)); + } else { + entry.setDirListingMetadata(meta); + } put(meta.getListing()); } @@ -319,8 +330,8 @@ public void close() throws IOException { @Override public void destroy() throws IOException { - if (dirCache != null) { - dirCache.invalidateAll(); + if (localCache != null) { + localCache.invalidateAll(); } } @@ -330,42 +341,44 @@ public void prune(long modTime) throws IOException{ } @Override - public synchronized void prune(long modTime, String keyPrefix) - throws IOException { - Iterator> files = - fileCache.asMap().entrySet().iterator(); - while (files.hasNext()) { - Map.Entry entry = files.next(); - if (expired(entry.getValue().getFileStatus(), modTime, keyPrefix)) { - files.remove(); - } - } - Iterator> dirs = - dirCache.asMap().entrySet().iterator(); - while (dirs.hasNext()) { - Map.Entry entry = dirs.next(); - Path path = entry.getKey(); - DirListingMetadata metadata = entry.getValue(); - Collection oldChildren = metadata.getListing(); - Collection newChildren = new LinkedList<>(); + public synchronized void prune(long modTime, String keyPrefix) { + // prune files + // filter path_metadata (files), filter expired, remove expired + localCache.asMap().entrySet().stream() + .filter(entry -> entry.getValue().hasPathMeta()) + .filter(entry -> expired( + entry.getValue().getFileMeta().getFileStatus(), modTime, keyPrefix)) + .forEach(entry -> localCache.invalidate(entry.getKey())); - for (PathMetadata child : oldChildren) { - FileStatus status = child.getFileStatus(); - if (!expired(status, modTime, keyPrefix)) { - newChildren.add(child); - } - } - if (newChildren.size() != oldChildren.size()) { - dirCache.put(path, new DirListingMetadata(path, newChildren, false)); - if (!path.isRoot()) { - DirListingMetadata parent = null; - parent = dirCache.getIfPresent(path.getParent()); - if (parent != null) { - parent.setAuthoritative(false); + + // prune dirs + // filter DIR_LISTING_METADATA, remove expired, remove authoritative bit + localCache.asMap().entrySet().stream() + .filter(entry -> entry.getValue().hasDirMeta()) + .forEach(entry -> { + Path path = entry.getKey(); + DirListingMetadata metadata = entry.getValue().getDirListingMeta(); + Collection oldChildren = metadata.getListing(); + Collection newChildren = new LinkedList<>(); + + for (PathMetadata child : oldChildren) { + FileStatus status = child.getFileStatus(); + if (!expired(status, modTime, keyPrefix)) { + newChildren.add(child); + } } - } - } - } + if (newChildren.size() != oldChildren.size()) { + DirListingMetadata dlm = + new DirListingMetadata(path, newChildren, false); + localCache.put(path, new LocalMetadataEntry(dlm)); + if (!path.isRoot()) { + DirListingMetadata parent = getDirListingMeta(path.getParent()); + if (parent != null) { + parent.setAuthoritative(false); + } + } + } + }); } private boolean expired(FileStatus status, long expiry, String keyPrefix) { @@ -390,31 +403,26 @@ private boolean expired(FileStatus status, long expiry, String keyPrefix) { } @VisibleForTesting - static void deleteEntryByAncestor(Path ancestor, Cache cache, - boolean tombstone) { - for (Iterator> it = cache.asMap().entrySet().iterator(); - it.hasNext();) { - Map.Entry entry = it.next(); - Path f = entry.getKey(); - T meta = entry.getValue(); - if (isAncestorOf(ancestor, f)) { - if (tombstone) { - if (meta instanceof PathMetadata) { - cache.put(f, (T) PathMetadata.tombstone(f)); - } else if (meta instanceof DirListingMetadata) { - it.remove(); + static void deleteEntryByAncestor(Path ancestor, + Cache cache, boolean tombstone) { + + cache.asMap().entrySet().stream() + .filter(entry -> isAncestorOf(ancestor, entry.getKey())) + .forEach(entry -> { + LocalMetadataEntry meta = entry.getValue(); + Path path = entry.getKey(); + if(meta.hasDirMeta()){ + cache.invalidate(path); + } else if(tombstone && meta.hasPathMeta()){ + meta.setPathMetadata(PathMetadata.tombstone(path)); } else { - throw new IllegalStateException("Unknown type in cache"); + cache.invalidate(path); } - } else { - it.remove(); - } - } - } + }); } /** - * @return true iff 'ancestor' is ancestor dir in path 'f'. + * @return true if 'ancestor' is ancestor dir in path 'f'. * All paths here are absolute. Dir does not count as its own ancestor. */ private static boolean isAncestorOf(Path ancestor, Path f) { @@ -431,27 +439,41 @@ private static boolean isAncestorOf(Path ancestor, Path f) { * lock held. */ private void deleteCacheEntries(Path path, boolean tombstone) { - - // Remove target file/dir - LOG.debug("delete file entry for {}", path); - if (tombstone) { - fileCache.put(path, PathMetadata.tombstone(path)); - } else { - fileCache.invalidate(path); + LocalMetadataEntry entry = localCache.getIfPresent(path); + // If there's no entry, delete should silently succeed + // (based on MetadataStoreTestBase#testDeleteNonExisting) + if(entry == null){ + LOG.warn("Delete: path {} is missing from cache.", path); + return; } - // Update this and parent dir listing, if any + // Remove target file entry + LOG.debug("delete file entry for {}", path); + if(entry.hasPathMeta()){ + if (tombstone) { + PathMetadata pmd = PathMetadata.tombstone(path); + entry.setPathMetadata(pmd); + } else { + entry.setPathMetadata(null); + } + } - /* If this path is a dir, remove its listing */ - LOG.debug("removing listing of {}", path); + // If this path is a dir, remove its listing + if(entry.hasDirMeta()) { + LOG.debug("removing listing of {}", path); + entry.setDirListingMetadata(null); + } - dirCache.invalidate(path); + // If the entry is empty (contains no dirMeta or pathMeta) remove it from + // the cache. + if(!entry.hasDirMeta() && !entry.hasPathMeta()){ + localCache.invalidate(entry); + } /* Remove this path from parent's dir listing */ Path parent = path.getParent(); if (parent != null) { - DirListingMetadata dir = null; - dir = dirCache.getIfPresent(parent); + DirListingMetadata dir = getDirListingMeta(parent); if (dir != null) { LOG.debug("removing parent's entry for {} ", path); if (tombstone) { @@ -494,4 +516,23 @@ public Map getDiagnostics() throws IOException { public void updateParameters(Map parameters) throws IOException { } + + PathMetadata getFileMeta(Path p){ + LocalMetadataEntry entry = localCache.getIfPresent(p); + if(entry != null && entry.hasPathMeta()){ + return entry.getFileMeta(); + } else { + return null; + } + } + + DirListingMetadata getDirListingMeta(Path p){ + LocalMetadataEntry entry = localCache.getIfPresent(p); + if(entry != null && entry.hasDirMeta()){ + return entry.getDirListingMeta(); + } else { + return null; + } + } + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java index 56618cb233..5a59400849 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java @@ -836,7 +836,7 @@ private void assertDirectorySize(String pathStr, int size) throws IOException { DirListingMetadata dirMeta = ms.listChildren(strToPath(pathStr)); if (!allowMissing()) { - assertNotNull("Directory " + pathStr + " in cache", dirMeta); + assertNotNull("Directory " + pathStr + " is null in cache", dirMeta); } if (!allowMissing() || dirMeta != null) { dirMeta = dirMeta.withoutTombstones(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java index 074319f582..2ea20b26b0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java @@ -37,7 +37,6 @@ */ public class TestLocalMetadataStore extends MetadataStoreTestBase { - private static final String MAX_ENTRIES_STR = "16"; private final static class LocalMSContract extends AbstractMSContract { @@ -48,7 +47,6 @@ private LocalMSContract() throws IOException { } private LocalMSContract(Configuration config) throws IOException { - config.set(LocalMetadataStore.CONF_MAX_RECORDS, MAX_ENTRIES_STR); fs = FileSystem.getLocal(config); } @@ -76,8 +74,8 @@ public AbstractMSContract createContract(Configuration conf) throws } @Test - public void testClearByAncestor() { - Cache cache = CacheBuilder.newBuilder().build(); + public void testClearByAncestor() throws Exception { + Cache cache = CacheBuilder.newBuilder().build(); // 1. Test paths without scheme/host assertClearResult(cache, "", "/", 0); @@ -122,7 +120,7 @@ public void testCacheTimedEvictionAfterWrite() { final long ttl = t1 + 50; // between t1 and t2 - Cache cache = CacheBuilder.newBuilder() + Cache cache = CacheBuilder.newBuilder() .expireAfterWrite(ttl, TimeUnit.NANOSECONDS /* nanos to avoid conversions */) .ticker(testTicker) @@ -143,7 +141,7 @@ public void testCacheTimedEvictionAfterWrite() { assertEquals("Cache should contain 3 records before eviction", 3, cache.size()); - PathMetadata pm1 = cache.getIfPresent(path1); + LocalMetadataEntry pm1 = cache.getIfPresent(path1); assertNotNull("PathMetadata should not be null before eviction", pm1); // set the ticker to a time when timed eviction should occur @@ -159,7 +157,7 @@ public void testCacheTimedEvictionAfterWrite() { assertNull("PathMetadata should be null after eviction", pm1); } - private static void populateMap(Cache cache, + private static void populateMap(Cache cache, String prefix) { populateEntry(cache, new Path(prefix + "/dirA/dirB/")); populateEntry(cache, new Path(prefix + "/dirA/dirB/dirC")); @@ -168,23 +166,20 @@ private static void populateMap(Cache cache, populateEntry(cache, new Path(prefix + "/dirA/file1")); } - private static void populateEntry(Cache cache, + private static void populateEntry(Cache cache, Path path) { - cache.put(path, new PathMetadata(new FileStatus(0, true, 0, 0, 0, path))); + FileStatus fileStatus = new FileStatus(0, true, 0, 0, 0, path); + cache.put(path, new LocalMetadataEntry(new PathMetadata(fileStatus))); } - private static int sizeOfMap(Cache cache) { - int count = 0; - for (PathMetadata meta : cache.asMap().values()) { - if (!meta.isDeleted()) { - count++; - } - } - return count; + private static long sizeOfMap(Cache cache) { + return cache.asMap().values().stream() + .filter(entry -> !entry.getFileMeta().isDeleted()) + .count(); } - private static void assertClearResult(Cache cache, - String prefixStr, String pathStr, int leftoverSize) { + private static void assertClearResult(Cache cache, + String prefixStr, String pathStr, int leftoverSize) throws IOException { populateMap(cache, prefixStr); LocalMetadataStore.deleteEntryByAncestor(new Path(prefixStr + pathStr), cache, true); From 7a3c6e9c3cd9ffdc71946fd12f5c3d59718c4939 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Mon, 25 Jun 2018 15:36:45 -0700 Subject: [PATCH 51/70] HADOOP-15550. Avoid static initialization of ObjectMappers --- .../crypto/key/kms/KMSClientProvider.java | 7 ++---- .../web/DelegationTokenAuthenticator.java | 8 ++----- .../hadoop/util/HttpExceptionUtils.java | 12 ++-------- .../apache/hadoop/util/JsonSerialization.java | 24 +++++++++++++++++++ .../crypto/key/kms/server/KMSJSONWriter.java | 6 ++--- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 7 ++---- ...fRefreshTokenBasedAccessTokenProvider.java | 8 +++---- .../CredentialBasedAccessTokenProvider.java | 8 +++---- .../apache/hadoop/mapreduce/JobSubmitter.java | 8 +++---- .../hadoop/fs/azure/security/JsonUtils.java | 4 ++-- 10 files changed, 45 insertions(+), 47 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java index edbf897664..7b4607507b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java @@ -42,6 +42,7 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL; import org.apache.hadoop.util.HttpExceptionUtils; +import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.KMSUtil; import org.apache.http.client.utils.URIBuilder; import org.slf4j.Logger; @@ -79,7 +80,6 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension; import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectWriter; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Strings; @@ -132,9 +132,6 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension, private final ValueQueue encKeyVersionQueue; - private static final ObjectWriter WRITER = - new ObjectMapper().writerWithDefaultPrettyPrinter(); - private final Text dtService; // Allow fallback to default kms server port 9600 for certain tests that do @@ -237,7 +234,7 @@ public KMSEncryptedKeyVersion(String keyName, String keyVersionName, private static void writeJson(Object obj, OutputStream os) throws IOException { Writer writer = new OutputStreamWriter(os, StandardCharsets.UTF_8); - WRITER.writeValue(writer, obj); + JsonSerialization.writer().writeValue(writer, obj); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java index 617773b34d..0ae2af35bf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.security.token.delegation.web; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.net.NetUtils; @@ -31,6 +29,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.util.HttpExceptionUtils; +import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,9 +55,6 @@ public abstract class DelegationTokenAuthenticator implements Authenticator { private static final String CONTENT_TYPE = "Content-Type"; private static final String APPLICATION_JSON_MIME = "application/json"; - private static final ObjectReader READER = - new ObjectMapper().readerFor(Map.class); - private static final String HTTP_GET = "GET"; private static final String HTTP_PUT = "PUT"; @@ -328,7 +324,7 @@ private Map doDelegationTokenOperation(URL url, if (contentType != null && contentType.contains(APPLICATION_JSON_MIME)) { try { - ret = READER.readValue(conn.getInputStream()); + ret = JsonSerialization.mapReader().readValue(conn.getInputStream()); } catch (Exception ex) { throw new AuthenticationException(String.format( "'%s' did not handle the '%s' delegation token operation: %s", diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java index cdb8112584..50be1c38d8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java @@ -17,9 +17,6 @@ */ package org.apache.hadoop.util; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -56,11 +53,6 @@ public class HttpExceptionUtils { private static final String ENTER = System.getProperty("line.separator"); - private static final ObjectReader READER = - new ObjectMapper().readerFor(Map.class); - private static final ObjectWriter WRITER = - new ObjectMapper().writerWithDefaultPrettyPrinter(); - /** * Creates a HTTP servlet response serializing the exception in it as JSON. * @@ -82,7 +74,7 @@ public static void createServletExceptionResponse( Map jsonResponse = new LinkedHashMap(); jsonResponse.put(ERROR_JSON, json); Writer writer = response.getWriter(); - WRITER.writeValue(writer, jsonResponse); + JsonSerialization.writer().writeValue(writer, jsonResponse); writer.flush(); } @@ -150,7 +142,7 @@ public static void validateResponse(HttpURLConnection conn, InputStream es = null; try { es = conn.getErrorStream(); - Map json = READER.readValue(es); + Map json = JsonSerialization.mapReader().readValue(es); json = (Map) json.get(ERROR_JSON); String exClass = (String) json.get(ERROR_CLASSNAME_JSON); String exMsg = (String) json.get(ERROR_MESSAGE_JSON); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java index 86c4df666e..cbc8560a40 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java @@ -25,14 +25,18 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.Map; import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectReader; +import com.fasterxml.jackson.databind.ObjectWriter; import com.fasterxml.jackson.databind.SerializationFeature; import com.google.common.base.Preconditions; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,6 +69,26 @@ public class JsonSerialization { private final Class classType; private final ObjectMapper mapper; + private static final ObjectWriter WRITER = + new ObjectMapper().writerWithDefaultPrettyPrinter(); + + private static final ObjectReader MAP_READER = + new ObjectMapper().readerFor(Map.class); + + /** + * @return an ObjectWriter which pretty-prints its output + */ + public static ObjectWriter writer() { + return WRITER; + } + + /** + * @return an ObjectReader which returns simple Maps. + */ + public static ObjectReader mapReader() { + return MAP_READER; + } + /** * Create an instance bound to a specific type. * @param classType class to marshall diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java index f8265729d8..b9b8d9cee6 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.crypto.key.kms.server; -import com.fasterxml.jackson.databind.ObjectMapper; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.http.JettyUtils; +import org.apache.hadoop.util.JsonSerialization; import javax.ws.rs.Produces; import javax.ws.rs.WebApplicationException; @@ -67,8 +66,7 @@ public void writeTo(Object obj, Class aClass, Type type, OutputStream outputStream) throws IOException, WebApplicationException { Writer writer = new OutputStreamWriter(outputStream, Charset .forName("UTF-8")); - ObjectMapper jsonMapper = new ObjectMapper(); - jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, obj); + JsonSerialization.writer().writeValue(writer, obj); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 673acd6fa7..ec60a186c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -56,8 +56,6 @@ import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MediaType; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; import org.apache.commons.io.IOUtils; import org.apache.commons.io.input.BoundedInputStream; import org.apache.hadoop.conf.Configuration; @@ -121,6 +119,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenSelector; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; +import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.KMSUtil; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.StringUtils; @@ -172,8 +171,6 @@ public class WebHdfsFileSystem extends FileSystem private boolean disallowFallbackToInsecureCluster; private String restCsrfCustomHeader; private Set restCsrfMethodsToIgnore; - private static final ObjectReader READER = - new ObjectMapper().readerFor(Map.class); private DFSOpsCountStatistics storageStatistics; @@ -476,7 +473,7 @@ private Path makeAbsolute(Path f) { + "\" (parsed=\"" + parsed + "\")"); } } - return READER.readValue(in); + return JsonSerialization.mapReader().readValue(in); } finally { in.close(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java index c6ebdd67eb..3e3fbfbd91 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java @@ -18,8 +18,6 @@ */ package org.apache.hadoop.hdfs.web.oauth2; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; import com.squareup.okhttp.OkHttpClient; import com.squareup.okhttp.Request; import com.squareup.okhttp.RequestBody; @@ -28,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.web.URLConnectionFactory; +import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.Timer; import org.apache.http.HttpStatus; @@ -55,8 +54,6 @@ @InterfaceStability.Evolving public class ConfRefreshTokenBasedAccessTokenProvider extends AccessTokenProvider { - private static final ObjectReader READER = - new ObjectMapper().readerFor(Map.class); public static final String OAUTH_REFRESH_TOKEN_KEY = "dfs.webhdfs.oauth2.refresh.token"; @@ -129,7 +126,8 @@ void refresh() throws IOException { + responseBody.code() + ", text = " + responseBody.toString()); } - Map response = READER.readValue(responseBody.body().string()); + Map response = JsonSerialization.mapReader().readValue( + responseBody.body().string()); String newExpiresIn = response.get(EXPIRES_IN).toString(); accessTokenTimer.setExpiresIn(newExpiresIn); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java index 5c629e0165..bfd7055990 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java @@ -18,8 +18,6 @@ */ package org.apache.hadoop.hdfs.web.oauth2; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; import com.squareup.okhttp.OkHttpClient; import com.squareup.okhttp.Request; import com.squareup.okhttp.RequestBody; @@ -28,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.web.URLConnectionFactory; +import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.Timer; import org.apache.http.HttpStatus; @@ -55,8 +54,6 @@ @InterfaceStability.Evolving public abstract class CredentialBasedAccessTokenProvider extends AccessTokenProvider { - private static final ObjectReader READER = - new ObjectMapper().readerFor(Map.class); public static final String OAUTH_CREDENTIAL_KEY = "dfs.webhdfs.oauth2.credential"; @@ -123,7 +120,8 @@ void refresh() throws IOException { + responseBody.code() + ", text = " + responseBody.toString()); } - Map response = READER.readValue(responseBody.body().string()); + Map response = JsonSerialization.mapReader().readValue( + responseBody.body().string()); String newExpiresIn = response.get(EXPIRES_IN).toString(); timer.setExpiresIn(newExpiresIn); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java index 246986f3be..ebf9341048 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java @@ -34,8 +34,6 @@ import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.databind.JsonMappingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -61,6 +59,7 @@ import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.yarn.api.records.ReservationId; @@ -71,8 +70,6 @@ class JobSubmitter { protected static final Logger LOG = LoggerFactory.getLogger(JobSubmitter.class); - private static final ObjectReader READER = - new ObjectMapper().readerFor(Map.class); private static final String SHUFFLE_KEYGEN_ALGORITHM = "HmacSHA1"; private static final int SHUFFLE_KEY_LENGTH = 64; private FileSystem jtFs; @@ -406,7 +403,8 @@ private void readTokensFromFiles(Configuration conf, Credentials credentials) try { // read JSON - Map nm = READER.readValue(new File(localFileName)); + Map nm = JsonSerialization.mapReader().readValue( + new File(localFileName)); for(Map.Entry ent: nm.entrySet()) { credentials.addSecretKey(new Text(ent.getKey()), ent.getValue() diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/JsonUtils.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/JsonUtils.java index 20dd4706b4..9c40325e21 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/JsonUtils.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/JsonUtils.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.azure.security; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hadoop.util.JsonSerialization; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,8 +38,7 @@ private JsonUtils() { public static Map parse(final String jsonString) throws IOException { try { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readerFor(Map.class).readValue(jsonString); + return JsonSerialization.mapReader().readValue(jsonString); } catch (Exception e) { LOG.debug("JSON Parsing exception: {} while parsing {}", e.getMessage(), jsonString); From 35ec9401e829bfa10994790659a26b0babacae35 Mon Sep 17 00:00:00 2001 From: Miklos Szegedi Date: Mon, 25 Jun 2018 09:23:11 -0700 Subject: [PATCH 52/70] YARN-8438. TestContainer.testKillOnNew flaky on trunk. Contributed by Szilard Nemeth. --- .../containermanager/container/TestContainer.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java index 1a263eea19..edf26d46dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java @@ -116,7 +116,7 @@ public class TestContainer { final NodeManagerMetrics metrics = NodeManagerMetrics.create(); final Configuration conf = new YarnConfiguration(); final String FAKE_LOCALIZATION_ERROR = "Fake localization error"; - + /** * Verify correct container request events sent to localizer. */ @@ -591,9 +591,8 @@ public void testKillOnNew() throws Exception { Assert.assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER, containerMetrics.exitCode.value()); Assert.assertTrue(containerMetrics.startTime.value() > 0); - Assert.assertTrue( - containerMetrics.finishTime.value() > containerMetrics.startTime - .value()); + Assert.assertTrue(containerMetrics.finishTime.value() >= + containerMetrics.startTime.value()); Assert.assertEquals(ContainerEventType.KILL_CONTAINER, wc.initStateToEvent.get(ContainerState.NEW)); Assert.assertEquals(ContainerState.DONE, @@ -1612,4 +1611,5 @@ public SlidingWindowRetryPolicy getRetryPolicy() { return ((ContainerImpl)c).getRetryPolicy(); } } + } From 238fe00ad2692154f6a382f35735169ee5e4af2c Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Mon, 25 Jun 2018 21:12:05 -0700 Subject: [PATCH 53/70] HDDS-192:Create new SCMCommand to request a replication of a container. Contributed by Elek Marton --- .../statemachine/DatanodeStateMachine.java | 3 + .../ReplicateContainerCommandHandler.java | 67 +++++++++++++ .../endpoint/HeartbeatEndpointTask.java | 12 +++ .../commands/ReplicateContainerCommand.java | 94 +++++++++++++++++++ .../StorageContainerDatanodeProtocol.proto | 12 ++- .../scm/server/SCMDatanodeProtocolServer.java | 11 +++ .../TestReplicateContainerHandler.java | 71 ++++++++++++++ 7 files changed, 269 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerHandler.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index dc4e673126..b073d7b81d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -32,6 +32,8 @@ .CommandDispatcher; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler .DeleteBlocksCommandHandler; +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler + .ReplicateContainerCommandHandler; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.util.Time; @@ -95,6 +97,7 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, .addHandler(new CloseContainerCommandHandler()) .addHandler(new DeleteBlocksCommandHandler( container.getContainerManager(), conf)) + .addHandler(new ReplicateContainerCommandHandler()) .setConnectionManager(connectionManager) .setContainer(container) .setContext(context) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java new file mode 100644 index 0000000000..b4e83b7d40 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; + +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; +import org.apache.hadoop.ozone.container.common.statemachine + .SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; +import org.apache.hadoop.ozone.protocol.commands.SCMCommand; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Command handler to copy containers from sources. + */ +public class ReplicateContainerCommandHandler implements CommandHandler { + static final Logger LOG = + LoggerFactory.getLogger(ReplicateContainerCommandHandler.class); + + private int invocationCount; + + private long totalTime; + + @Override + public void handle(SCMCommand command, OzoneContainer container, + StateContext context, SCMConnectionManager connectionManager) { + LOG.warn("Replicate command is not yet handled"); + + } + + @Override + public SCMCommandProto.Type getCommandType() { + return Type.replicateContainerCommand; + } + + @Override + public int getInvocationCount() { + return this.invocationCount; + } + + @Override + public long getAverageRunTime() { + if (invocationCount > 0) { + return totalTime / invocationCount; + } + return 0; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index 1ee6375a56..260a245ceb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -39,6 +39,8 @@ import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; +import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -196,6 +198,16 @@ private void processResponse(SCMHeartbeatResponseProto response, } this.context.addCommand(closeContainer); break; + case replicateContainerCommand: + ReplicateContainerCommand replicateContainerCommand = + ReplicateContainerCommand.getFromProtobuf( + commandResponseProto.getReplicateContainerCommandProto()); + if (LOG.isDebugEnabled()) { + LOG.debug("Received SCM container replicate request for container {}", + replicateContainerCommand.getContainerID()); + } + this.context.addCommand(replicateContainerCommand); + break; default: throw new IllegalArgumentException("Unknown response : " + commandResponseProto.getCommandType().name()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java new file mode 100644 index 0000000000..834318b145 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.protocol.commands; + +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto + .Builder; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; +import org.apache.hadoop.hdds.scm.container.ContainerID; + +import com.google.common.base.Preconditions; + +/** + * SCM command to request replication of a container. + */ +public class ReplicateContainerCommand + extends SCMCommand { + + private final long containerID; + + private final List sourceDatanodes; + + public ReplicateContainerCommand(long containerID, + List sourceDatanodes) { + this.containerID = containerID; + this.sourceDatanodes = sourceDatanodes; + } + + @Override + public Type getType() { + return SCMCommandProto.Type.replicateContainerCommand; + } + + @Override + public byte[] getProtoBufMessage() { + return getProto().toByteArray(); + } + + public ReplicateContainerCommandProto getProto() { + Builder builder = ReplicateContainerCommandProto.newBuilder() + .setContainerID(containerID); + for (DatanodeDetails dd : sourceDatanodes) { + builder.addSources(dd.getProtoBufMessage()); + } + return builder.build(); + } + + public static ReplicateContainerCommand getFromProtobuf( + ReplicateContainerCommandProto protoMessage) { + Preconditions.checkNotNull(protoMessage); + + List datanodeDetails = + protoMessage.getSourcesList() + .stream() + .map(DatanodeDetails::getFromProtoBuf) + .collect(Collectors.toList()); + + return new ReplicateContainerCommand(protoMessage.getContainerID(), + datanodeDetails); + + } + + public long getContainerID() { + return containerID; + } + + public List getSourceDatanodes() { + return sourceDatanodes; + } +} diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto index f6aba05636..54230c1e9f 100644 --- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto +++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto @@ -172,6 +172,7 @@ message SCMCommandProto { deleteBlocksCommand = 2; closeContainerCommand = 3; deleteContainerCommand = 4; + replicateContainerCommand = 5; } // TODO: once we start using protoc 3.x, refactor this message using "oneof" required Type commandType = 1; @@ -179,6 +180,7 @@ message SCMCommandProto { optional DeleteBlocksCommandProto deleteBlocksCommandProto = 3; optional CloseContainerCommandProto closeContainerCommandProto = 4; optional DeleteContainerCommandProto deleteContainerCommandProto = 5; + optional ReplicateContainerCommandProto replicateContainerCommandProto = 6; } /** @@ -227,12 +229,20 @@ message CloseContainerCommandProto { } /** -This command asks the datanode to close a specific container. +This command asks the datanode to delete a specific container. */ message DeleteContainerCommandProto { required int64 containerID = 1; } +/** +This command asks the datanode to replicate a container from specific sources. +*/ +message ReplicateContainerCommandProto { + required int64 containerID = 1; + repeated DatanodeDetailsProto sources = 2; +} + /** * Protocol used from a datanode to StorageContainerManager. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index 7d16161e59..eb5ce1a827 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; + import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; import org.apache.hadoop.hdds.protocol.proto @@ -62,6 +63,9 @@ import static org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto .Type.deleteBlocksCommand; +import static org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type + .replicateContainerCommand; import static org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto .Type.reregisterCommand; @@ -77,6 +81,7 @@ import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; +import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; import org.apache.hadoop.ozone.protocolPB @@ -293,6 +298,12 @@ public SCMCommandProto getCommandResponse(SCMCommand cmd) .setCloseContainerCommandProto( ((CloseContainerCommand) cmd).getProto()) .build(); + case replicateContainerCommand: + return builder + .setCommandType(replicateContainerCommand) + .setReplicateContainerCommandProto( + ((ReplicateContainerCommand)cmd).getProto()) + .build(); default: throw new IllegalArgumentException("Not implemented"); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerHandler.java new file mode 100644 index 0000000000..a5b101fa70 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerHandler.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.concurrent.TimeoutException; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.rest.OzoneException; +import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; +import org.apache.hadoop.test.GenericTestUtils; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CONTAINER_SIZE_GB; +import org.junit.Test; + +/** + * Tests the behavior of the datanode, when replicate container command is + * received. + */ +public class TestReplicateContainerHandler { + + @Test + public void test() throws IOException, TimeoutException, InterruptedException, + OzoneException { + + GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer + .captureLogs(ReplicateContainerCommandHandler.LOG); + + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(OZONE_SCM_CONTAINER_SIZE_GB, "1"); + MiniOzoneCluster cluster = + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); + cluster.waitForClusterToBeReady(); + + DatanodeDetails datanodeDetails = + cluster.getHddsDatanodes().get(0).getDatanodeDetails(); + //send the order to replicate the container + cluster.getStorageContainerManager().getScmNodeManager() + .addDatanodeCommand(datanodeDetails.getUuid(), + new ReplicateContainerCommand(1L, + new ArrayList<>())); + + //TODO: here we test only the serialization/unserialization as + // the implementation is not yet done + GenericTestUtils + .waitFor(() -> logCapturer.getOutput().contains("not yet handled"), 500, + 5 * 1000); + + } + +} \ No newline at end of file From 3e586330eb2c7db0b884fe328b171fb27ce545fa Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Tue, 26 Jun 2018 14:25:15 -0400 Subject: [PATCH 54/70] YARN-8214. Change default RegistryDNS port. Contributed by Billie Rinaldi --- .../hadoop/registry/client/api/RegistryConstants.java | 2 +- .../src/site/markdown/yarn-service/RegistryDNS.md | 10 +++++----- .../src/site/markdown/yarn-service/ServiceDiscovery.md | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java index cfa2d65e63..bd97a5a85d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java @@ -95,7 +95,7 @@ public interface RegistryConstants { /** * Default DNS port number. */ - int DEFAULT_DNS_PORT = 5353; + int DEFAULT_DNS_PORT = 5335; /** * DNSSEC Enabled? diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/RegistryDNS.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/RegistryDNS.md index 2307e5ca61..642d26e531 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/RegistryDNS.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/RegistryDNS.md @@ -58,7 +58,7 @@ primary DNS server can be configured to forward a zone to the registry DNS server. 2. The DNS Server exposes a port that can receive both TCP and UDP requests per DNS standards. The default port for DNS protocols is not in the restricted -range (5353). However, existing DNS assets may only allow zone forwarding to +range (5335). However, existing DNS assets may only allow zone forwarding to non-custom ports. To support this, the registry DNS server can be started in privileged mode. @@ -136,7 +136,7 @@ standard DNS requests from users or other DNS servers (for example, DNS servers RegistryDNS service configured as a forwarder). ## Start the DNS Server -By default, the DNS server runs on non-privileged port `5353`. Start the server +By default, the DNS server runs on non-privileged port `5335`. Start the server with: ``` yarn --daemon start registrydns @@ -157,7 +157,7 @@ The Registry DNS server reads its configuration properties from the yarn-site.xm | hadoop.registry.dns.enabled | The DNS functionality is enabled for the cluster. Default is false. | | hadoop.registry.dns.domain-name | The domain name for Hadoop cluster associated records. | | hadoop.registry.dns.bind-address | Address associated with the network interface to which the DNS listener should bind. | -| hadoop.registry.dns.bind-port | The port number for the DNS listener. The default port is 5353. | +| hadoop.registry.dns.bind-port | The port number for the DNS listener. The default port is 5335. | | hadoop.registry.dns.dnssec.enabled | Indicates whether the DNSSEC support is enabled. Default is false. | | hadoop.registry.dns.public-key | The base64 representation of the server’s public key. Leveraged for creating the DNSKEY Record provided for DNSSEC client requests. | | hadoop.registry.dns.private-key-file | The path to the standard DNSSEC private key file. Must only be readable by the DNS launching identity. See [dnssec-keygen](https://ftp.isc.org/isc/bind/cur/9.9/doc/arm/man.dnssec-keygen.html) documentation. | @@ -174,10 +174,10 @@ The Registry DNS server reads its configuration properties from the yarn-site.xm - The port number for the DNS listener. The default port is 5353. + The port number for the DNS listener. The default port is 5335. If the standard privileged port 53 is used, make sure start the DNS with jsvc support. hadoop.registry.dns.bind-port - 5353 + 5335 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md index 7ee16dd6b1..6b93f3dd32 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md @@ -94,10 +94,10 @@ section of [Registry DNS](RegistryDNS.html). - The port number for the DNS listener. The default port is 5353. + The port number for the DNS listener. The default port is 5335. If the standard privileged port 53 is used, make sure start the DNS with jsvc support. hadoop.registry.dns.bind-port - 5353 + 5335 @@ -135,7 +135,7 @@ To configure Registry DNS to serve reverse lookup for `172.17.0.0/24` ``` ## Start Registry DNS Server -By default, the DNS server runs on non-privileged port `5353`. Start the server +By default, the DNS server runs on non-privileged port `5335`. Start the server with: ``` yarn --daemon start registrydns From b69ba0f3307a90500aeb0c5db9e582fcda60b501 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Tue, 26 Jun 2018 17:34:57 -0400 Subject: [PATCH 55/70] YARN-8108. Added option to disable loading existing filters to prevent security filter from initialize twice. Contributed by Sunil Govindan --- .../org/apache/hadoop/yarn/webapp/WebApps.java | 14 +++++++++++--- .../server/resourcemanager/ResourceManager.java | 2 +- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java index 0d045f36a9..0e9f0a77be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java @@ -83,6 +83,7 @@ static class ServletStruct { public String name; public String spec; public Map params; + public boolean loadExistingFilters = true; } final String name; @@ -151,12 +152,13 @@ public Builder withServlet(String name, String pathSpec, public Builder withServlet(String name, String pathSpec, Class servlet, - Map params) { + Map params,boolean loadExistingFilters) { ServletStruct struct = new ServletStruct(); struct.clazz = servlet; struct.name = name; struct.spec = pathSpec; struct.params = params; + struct.loadExistingFilters = loadExistingFilters; servlets.add(struct); return this; } @@ -256,9 +258,15 @@ public void setup() { pathList.add("/" + wsName + "/*"); } } + for (ServletStruct s : servlets) { if (!pathList.contains(s.spec)) { - pathList.add(s.spec); + // The servlet told us to not load-existing filters, but we still want + // to add the default authentication filter always, so add it to the + // pathList + if (!s.loadExistingFilters) { + pathList.add(s.spec); + } } } if (conf == null) { @@ -333,7 +341,7 @@ public void setup() { HttpServer2 server = builder.build(); for(ServletStruct struct: servlets) { - if (struct.params != null) { + if (!struct.loadExistingFilters) { server.addInternalServlet(struct.name, struct.spec, struct.clazz, struct.params); } else { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index c53311127c..0b7e87cc0b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -1111,7 +1111,7 @@ protected void startWepApp() { "ws") .with(conf) .withServlet("API-Service", "/app/*", - ServletContainer.class, params) + ServletContainer.class, params, false) .withHttpSpnegoPrincipalKey( YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY) .withHttpSpnegoKeytabKey( From 62d83ca5360cf803ecf6780caf192462d0092009 Mon Sep 17 00:00:00 2001 From: Miklos Szegedi Date: Tue, 26 Jun 2018 15:21:35 -0700 Subject: [PATCH 56/70] YARN-8461. Support strict memory control on individual container with elastic control memory mechanism. Contributed by Haibo Chen. --- .../CGroupsMemoryResourceHandlerImpl.java | 24 ++++ .../resources/MemoryResourceHandler.java | 10 ++ .../monitor/ContainersMonitorImpl.java | 112 +++++++++++------- .../TestCGroupsMemoryResourceHandlerImpl.java | 43 +++++++ 4 files changed, 144 insertions(+), 45 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java index a57adb1391..053b796b5f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java @@ -34,6 +34,9 @@ import java.io.File; import java.util.ArrayList; import java.util.List; +import java.util.Optional; + +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_OOM_CONTROL; /** * Handler class to handle the memory controller. YARN already ships a @@ -172,4 +175,25 @@ public List teardown() throws ResourceHandlerException { return null; } + @Override + public Optional isUnderOOM(ContainerId containerId) { + try { + String status = cGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + containerId.toString(), + CGROUP_PARAM_MEMORY_OOM_CONTROL); + if (LOG.isDebugEnabled()) { + LOG.debug("cgroups OOM status for " + containerId + ": " + status); + } + if (status.contains(CGroupsHandler.UNDER_OOM)) { + LOG.warn("Container " + containerId + " under OOM based on cgroups."); + return Optional.of(true); + } else { + return Optional.of(false); + } + } catch (ResourceHandlerException e) { + LOG.warn("Could not read cgroups" + containerId, e); + } + return Optional.empty(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java index 013a49fbb4..1729fc17a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java @@ -20,8 +20,18 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.records.ContainerId; + +import java.util.Optional; @InterfaceAudience.Private @InterfaceStability.Unstable public interface MemoryResourceHandler extends ResourceHandler { + /** + * check whether a container is under OOM. + * @param containerId the id of the container + * @return empty if the status is unknown, true is the container is under oom, + * false otherwise + */ + Optional isUnderOOM(ContainerId containerId); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java index bd68dfe23d..d83fe39ffc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java @@ -22,6 +22,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupElasticMemoryController; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.MemoryResourceHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.slf4j.Logger; @@ -51,6 +52,7 @@ import java.util.Arrays; import java.util.Map; import java.util.Map.Entry; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; /** @@ -697,55 +699,75 @@ private void checkLimit(ContainerId containerId, String pId, ProcessTreeInfo ptInfo, long currentVmemUsage, long currentPmemUsage) { - if (elasticMemoryEnforcement || strictMemoryEnforcement) { - // We enforce the overall memory usage instead of individual containers - return; - } - boolean isMemoryOverLimit = false; - long vmemLimit = ptInfo.getVmemLimit(); - long pmemLimit = ptInfo.getPmemLimit(); - // as processes begin with an age 1, we want to see if there - // are processes more than 1 iteration old. - long curMemUsageOfAgedProcesses = pTree.getVirtualMemorySize(1); - long curRssMemUsageOfAgedProcesses = pTree.getRssMemorySize(1); + Optional isMemoryOverLimit = Optional.empty(); String msg = ""; int containerExitStatus = ContainerExitStatus.INVALID; - if (isVmemCheckEnabled() - && isProcessTreeOverLimit(containerId.toString(), - currentVmemUsage, curMemUsageOfAgedProcesses, vmemLimit)) { - // The current usage (age=0) is always higher than the aged usage. We - // do not show the aged size in the message, base the delta on the - // current usage - long delta = currentVmemUsage - vmemLimit; - // Container (the root process) is still alive and overflowing - // memory. - // Dump the process-tree and then clean it up. - msg = formatErrorMessage("virtual", - formatUsageString(currentVmemUsage, vmemLimit, - currentPmemUsage, pmemLimit), - pId, containerId, pTree, delta); - isMemoryOverLimit = true; - containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_VMEM; - } else if (isPmemCheckEnabled() - && isProcessTreeOverLimit(containerId.toString(), - currentPmemUsage, curRssMemUsageOfAgedProcesses, - pmemLimit)) { - // The current usage (age=0) is always higher than the aged usage. We - // do not show the aged size in the message, base the delta on the - // current usage - long delta = currentPmemUsage - pmemLimit; - // Container (the root process) is still alive and overflowing - // memory. - // Dump the process-tree and then clean it up. - msg = formatErrorMessage("physical", - formatUsageString(currentVmemUsage, vmemLimit, - currentPmemUsage, pmemLimit), - pId, containerId, pTree, delta); - isMemoryOverLimit = true; - containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM; + + if (strictMemoryEnforcement && elasticMemoryEnforcement) { + // Both elastic memory control and strict memory control are enabled + // through cgroups. A container will be frozen by the elastic memory + // control mechanism if it exceeds its request, so we check for this + // here and kill it. Otherwise, the container will not be killed if + // the node never exceeds its limit and the procfs-based + // memory accounting is different from the cgroup-based accounting. + + MemoryResourceHandler handler = + ResourceHandlerModule.getMemoryResourceHandler(); + if (handler != null) { + isMemoryOverLimit = handler.isUnderOOM(containerId); + containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM; + msg = containerId + " is under oom because it exceeded its" + + " physical memory limit"; + } + } else if (strictMemoryEnforcement || elasticMemoryEnforcement) { + // if cgroup-based memory control is enabled + isMemoryOverLimit = Optional.of(false); } - if (isMemoryOverLimit) { + if (!isMemoryOverLimit.isPresent()) { + long vmemLimit = ptInfo.getVmemLimit(); + long pmemLimit = ptInfo.getPmemLimit(); + // as processes begin with an age 1, we want to see if there + // are processes more than 1 iteration old. + long curMemUsageOfAgedProcesses = pTree.getVirtualMemorySize(1); + long curRssMemUsageOfAgedProcesses = pTree.getRssMemorySize(1); + if (isVmemCheckEnabled() + && isProcessTreeOverLimit(containerId.toString(), + currentVmemUsage, curMemUsageOfAgedProcesses, vmemLimit)) { + // The current usage (age=0) is always higher than the aged usage. We + // do not show the aged size in the message, base the delta on the + // current usage + long delta = currentVmemUsage - vmemLimit; + // Container (the root process) is still alive and overflowing + // memory. + // Dump the process-tree and then clean it up. + msg = formatErrorMessage("virtual", + formatUsageString(currentVmemUsage, vmemLimit, + currentPmemUsage, pmemLimit), + pId, containerId, pTree, delta); + isMemoryOverLimit = Optional.of(true); + containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_VMEM; + } else if (isPmemCheckEnabled() + && isProcessTreeOverLimit(containerId.toString(), + currentPmemUsage, curRssMemUsageOfAgedProcesses, + pmemLimit)) { + // The current usage (age=0) is always higher than the aged usage. We + // do not show the aged size in the message, base the delta on the + // current usage + long delta = currentPmemUsage - pmemLimit; + // Container (the root process) is still alive and overflowing + // memory. + // Dump the process-tree and then clean it up. + msg = formatErrorMessage("physical", + formatUsageString(currentVmemUsage, vmemLimit, + currentPmemUsage, pmemLimit), + pId, containerId, pTree, delta); + isMemoryOverLimit = Optional.of(true); + containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM; + } + } + + if (isMemoryOverLimit.isPresent() && isMemoryOverLimit.get()) { // Virtual or physical memory over limit. Fail the container and // remove // the corresponding process tree diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java index 5c7e233381..4d3e7e6e1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java @@ -31,7 +31,9 @@ import org.junit.Assert; import java.util.List; +import java.util.Optional; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_OOM_CONTROL; import static org.mockito.Mockito.*; /** @@ -242,4 +244,45 @@ public void testOpportunistic() throws Exception { .updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id, CGroupsHandler.CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES, "1024M"); } + + @Test + public void testContainerUnderOom() throws Exception { + Configuration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false); + conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false); + + cGroupsMemoryResourceHandler.bootstrap(conf); + + ContainerId containerId = mock(ContainerId.class); + when(containerId.toString()).thenReturn("container_01_01"); + + when(mockCGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + containerId.toString(), + CGROUP_PARAM_MEMORY_OOM_CONTROL)).thenReturn(CGroupsHandler.UNDER_OOM); + Optional outOfOom = + cGroupsMemoryResourceHandler.isUnderOOM(containerId); + Assert.assertTrue("The container should be reported to run under oom", + outOfOom.isPresent() && outOfOom.get().equals(true)); + + when(mockCGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + containerId.toString(), + CGROUP_PARAM_MEMORY_OOM_CONTROL)).thenReturn(""); + outOfOom = cGroupsMemoryResourceHandler.isUnderOOM(containerId); + Assert.assertTrue( + "The container should not be reported to run under oom", + outOfOom.isPresent() && outOfOom.get().equals(false)); + + when(mockCGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + containerId.toString(), + CGROUP_PARAM_MEMORY_OOM_CONTROL)). + thenThrow(new ResourceHandlerException()); + outOfOom = cGroupsMemoryResourceHandler.isUnderOOM(containerId); + Assert.assertFalse( + "No report of the oom status should be available.", + outOfOom.isPresent()); + + } } From ada8f63d0b3739d245300461387b0516dc92ccf9 Mon Sep 17 00:00:00 2001 From: Wangda Tan Date: Tue, 26 Jun 2018 19:25:57 -0700 Subject: [PATCH 57/70] YARN-8423. GPU does not get released even though the application gets killed. (Sunil G via wangda) Change-Id: I570db7d60f8c6c21762dd618a9207d1107c486a0 --- .../containermanager/container/Container.java | 6 ++ .../container/ContainerImpl.java | 11 +++ .../resources/gpu/GpuResourceAllocator.java | 68 ++++++++++++++++++- .../resources/gpu/GpuResourceHandlerImpl.java | 1 - .../nodemanager/webapp/MockContainer.java | 3 + 5 files changed, 85 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java index 5d48d8486b..4912d02758 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java @@ -113,4 +113,10 @@ public interface Container extends EventHandler { ResourceMappings getResourceMappings(); void sendPauseEvent(String description); + + /** + * Verify container is in final states. + * @return true/false based on container's state + */ + boolean isContainerInFinalStates(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index 0541544ab5..f76e682339 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -2223,4 +2223,15 @@ private void storeRetryContext() { SlidingWindowRetryPolicy getRetryPolicy() { return retryPolicy; } + + @Override + public boolean isContainerInFinalStates() { + ContainerState state = getContainerState(); + return state == ContainerState.KILLING || state == ContainerState.DONE + || state == ContainerState.LOCALIZATION_FAILED + || state == ContainerState.CONTAINER_RESOURCES_CLEANINGUP + || state == ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL + || state == ContainerState.EXITED_WITH_FAILURE + || state == ContainerState.EXITED_WITH_SUCCESS; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java index 5bdffc369b..81a965522c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java @@ -26,6 +26,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; @@ -36,10 +37,8 @@ import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -54,6 +53,7 @@ */ public class GpuResourceAllocator { final static Log LOG = LogFactory.getLog(GpuResourceAllocator.class); + private static final int WAIT_MS_PER_LOOP = 1000; private Set allowedGpuDevices = new TreeSet<>(); private Map usedDevices = new TreeMap<>(); @@ -168,13 +168,58 @@ public static int getRequestedGpus(Resource requestedResource) { * @return allocation results. * @throws ResourceHandlerException When failed to assign GPUs. */ - public synchronized GpuAllocation assignGpus(Container container) + public GpuAllocation assignGpus(Container container) + throws ResourceHandlerException { + GpuAllocation allocation = internalAssignGpus(container); + + // Wait for a maximum of 120 seconds if no available GPU are there which + // are yet to be released. + final int timeoutMsecs = 120 * WAIT_MS_PER_LOOP; + int timeWaiting = 0; + while (allocation == null) { + if (timeWaiting >= timeoutMsecs) { + break; + } + + // Sleep for 1 sec to ensure there are some free GPU devices which are + // getting released. + try { + LOG.info("Container : " + container.getContainerId() + + " is waiting for free GPU devices."); + Thread.sleep(WAIT_MS_PER_LOOP); + timeWaiting += WAIT_MS_PER_LOOP; + allocation = internalAssignGpus(container); + } catch (InterruptedException e) { + // On any interrupt, break the loop and continue execution. + break; + } + } + + if(allocation == null) { + String message = "Could not get valid GPU device for container '" + + container.getContainerId() + + "' as some other containers might not releasing GPUs."; + LOG.warn(message); + throw new ResourceHandlerException(message); + } + return allocation; + } + + private synchronized GpuAllocation internalAssignGpus(Container container) throws ResourceHandlerException { Resource requestedResource = container.getResource(); ContainerId containerId = container.getContainerId(); int numRequestedGpuDevices = getRequestedGpus(requestedResource); // Assign Gpus to container if requested some. if (numRequestedGpuDevices > 0) { + if (numRequestedGpuDevices > getAvailableGpus()) { + // If there are some devices which are getting released, wait for few + // seconds to get it. + if (numRequestedGpuDevices <= getReleasingGpus() + getAvailableGpus()) { + return null; + } + } + if (numRequestedGpuDevices > getAvailableGpus()) { throw new ResourceHandlerException( getResourceHandlerExceptionMessage(numRequestedGpuDevices, @@ -211,6 +256,23 @@ public synchronized GpuAllocation assignGpus(Container container) return new GpuAllocation(null, allowedGpuDevices); } + private synchronized long getReleasingGpus() { + long releasingGpus = 0; + Iterator> iter = usedDevices.entrySet() + .iterator(); + while (iter.hasNext()) { + ContainerId containerId = iter.next().getValue(); + Container container; + if ((container = nmContext.getContainers().get(containerId)) != null) { + if (container.isContainerInFinalStates()) { + releasingGpus = releasingGpus + container.getResource() + .getResourceInformation(ResourceInformation.GPU_URI).getValue(); + } + } + } + return releasingGpus; + } + /** * Clean up all Gpus assigned to containerId * @param containerId containerId diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java index 587fcb4983..118438296b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java @@ -18,7 +18,6 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu; -import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java index 325709b07a..67dfef259d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java @@ -255,4 +255,7 @@ public ResourceMappings getResourceMappings() { public void sendPauseEvent(String description) { } + @Override public boolean isContainerInFinalStates() { + return false; + } } From bedc4fe0799cf3b161100acc521fc62a97793427 Mon Sep 17 00:00:00 2001 From: Wangda Tan Date: Tue, 26 Jun 2018 19:27:17 -0700 Subject: [PATCH 58/70] YARN-8464. Async scheduling thread could be interrupted when there are no NodeManagers in cluster. (Sunil G via wangda) Change-Id: I4f5f856373378685713e77752ba6cf0988a66065 --- .../scheduler/capacity/CapacityScheduler.java | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 50ab70d03e..54bbf24ef7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -521,7 +521,14 @@ static void schedule(CapacityScheduler cs) throws InterruptedException{ // First randomize the start point int current = 0; Collection nodes = cs.nodeTracker.getAllNodes(); - int start = random.nextInt(nodes.size()); + + // If nodes size is 0 (when there are no node managers registered, + // we can return from here itself. + int nodeSize = nodes.size(); + if(nodeSize == 0) { + return; + } + int start = random.nextInt(nodeSize); // To avoid too verbose DEBUG logging, only print debug log once for // every 10 secs. @@ -574,6 +581,7 @@ public AsyncScheduleThread(CapacityScheduler cs) { @Override public void run() { + int debuggingLogCounter = 0; while (!Thread.currentThread().isInterrupted()) { try { if (!runSchedules.get()) { @@ -585,6 +593,14 @@ public void run() { Thread.sleep(1); } else{ schedule(cs); + if(LOG.isDebugEnabled()) { + // Adding a debug log here to ensure that the thread is alive + // and running fine. + if (debuggingLogCounter++ > 10000) { + debuggingLogCounter = 0; + LOG.debug("AsyncScheduleThread[" + getName() + "] is running!"); + } + } } } } catch (InterruptedException ie) { From fbaff369e9b9022723a7b2c6f25e71122a8f8a15 Mon Sep 17 00:00:00 2001 From: Sunil G Date: Wed, 27 Jun 2018 10:35:15 -0700 Subject: [PATCH 59/70] YARN-8401. [UI2] new ui is not accessible with out internet connection. Contributed by Bibin A Chundatt. --- .../hadoop-yarn-ui/src/main/webapp/WEB-INF/web.xml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/web.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/web.xml index ddb8532434..ac74d5ccea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/web.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/web.xml @@ -16,10 +16,6 @@ * limitations under the License. --> - - - + YARN UI From aaf03cc459a34af284f9735453aefd4ddb430d67 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Wed, 27 Jun 2018 12:39:15 -0700 Subject: [PATCH 60/70] HDDS-194. Remove NodePoolManager and node pool handling from SCM. Contributed by Elek Marton --- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 26 -- .../org/apache/hadoop/ozone/OzoneConsts.java | 1 - .../src/main/resources/ozone-default.xml | 47 --- .../replication/ReplicationQueue.java | 78 ++++ .../replication/ReplicationReqMsg.java | 107 ++++++ .../container/replication/package-info.java | 23 ++ .../replication/TestReplicationQueue.java | 134 +++++++ .../container/replication/package-info.java | 23 ++ .../hdds/scm/container/ContainerMapping.java | 10 +- .../replication/ContainerSupervisor.java | 340 ------------------ .../container/replication/InProgressPool.java | 255 ------------- .../container/replication/PeriodicPool.java | 119 ------ .../container/replication/package-info.java | 23 -- .../hadoop/hdds/scm/node/NodeManager.java | 6 - .../hadoop/hdds/scm/node/NodePoolManager.java | 71 ---- .../hadoop/hdds/scm/node/SCMNodeManager.java | 23 -- .../hdds/scm/node/SCMNodePoolManager.java | 269 -------------- .../hdds/scm/container/MockNodeManager.java | 6 - .../hdds/scm/node/TestSCMNodePoolManager.java | 160 --------- .../testutils/ReplicationNodeManagerMock.java | 5 - .../ReplicationNodePoolManagerMock.java | 133 ------- .../hadoop/ozone/scm/TestContainerSQLCli.java | 31 -- .../apache/hadoop/ozone/scm/cli/SQLCLI.java | 74 ---- 23 files changed, 368 insertions(+), 1596 deletions(-) create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 85407e65ce..df6fbf0c75 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -243,32 +243,6 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s"; - /** - * Don't start processing a pool if we have not had a minimum number of - * seconds from the last processing. - */ - public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL = - "ozone.scm.container.report.processing.interval"; - public static final String - OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s"; - - /** - * This determines the total number of pools to be processed in parallel. - */ - public static final String OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS = - "ozone.scm.max.nodepool.processing.threads"; - public static final int OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT = 1; - /** - * These 2 settings control the number of threads in executor pool and time - * outs for thw container reports from all nodes. - */ - public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS = - "ozone.scm.max.container.report.threads"; - public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100; - public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT = - "ozone.scm.container.reports.wait.timeout"; - public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT = - "5m"; public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY = "ozone.scm.block.deletion.max.retry"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index c40dc8e4ee..08a5ffdb87 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -91,7 +91,6 @@ public final class OzoneConsts { public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX; public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX; public static final String BLOCK_DB = "block.db"; - public static final String NODEPOOL_DB = "nodepool.db"; public static final String OPEN_CONTAINERS_DB = "openContainers.db"; public static final String DELETED_BLOCK_DB = "deletedBlock.db"; public static final String KSM_DB_NAME = "ksm.db"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 7a91610c65..25365c8d9d 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -571,25 +571,6 @@ allocation. - - ozone.scm.container.report.processing.interval - 60s - OZONE, PERFORMANCE - Time interval for scm to process container reports - for a node pool. Scm handles node pool reports in a cyclic clock - manner, it fetches pools periodically with this time interval. - - - - ozone.scm.container.reports.wait.timeout - 300s - OZONE, PERFORMANCE, MANAGEMENT - Maximum time to wait in seconds for processing all container - reports from - a node pool. It determines the timeout for a - node pool report. - - ozone.scm.container.size.gb 5 @@ -792,17 +773,6 @@ The keytab file for Kerberos authentication in SCM. - - ozone.scm.max.container.report.threads - 100 - OZONE, PERFORMANCE - - Maximum number of threads to process container reports in scm. - Each container report from a data node is processed by scm in a worker - thread, fetched from a thread pool. This property is used to control the - maximum size of the thread pool. - - ozone.scm.max.hb.count.to.process 5000 @@ -814,14 +784,6 @@ for more info. - - ozone.scm.max.nodepool.processing.threads - 1 - OZONE, MANAGEMENT, PERFORMANCE - - Number of node pools to process in parallel. - - ozone.scm.names @@ -843,15 +805,6 @@ see ozone.scm.heartbeat.thread.interval before changing this value. - - ozone.scm.max.nodepool.processing.threads - 1 - OZONE, SCM - - Controls the number of node pools that can be processed in parallel by - Container Supervisor. - - ozone.trace.enabled false diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java new file mode 100644 index 0000000000..b83ecf13bc --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.replication; + +import java.util.List; +import java.util.PriorityQueue; +import java.util.Queue; + +/** + * Priority queue to handle under-replicated and over replicated containers + * in ozone. ReplicationManager will consume these messages and decide + * accordingly. + */ +public class ReplicationQueue { + + private final Queue queue; + + ReplicationQueue() { + queue = new PriorityQueue<>(); + } + + public synchronized boolean add(ReplicationReqMsg repObj) { + if (this.queue.contains(repObj)) { + // Remove the earlier message and insert this one + this.queue.remove(repObj); + return this.queue.add(repObj); + } else { + return this.queue.add(repObj); + } + } + + public synchronized boolean remove(ReplicationReqMsg repObj) { + return queue.remove(repObj); + } + + /** + * Retrieves, but does not remove, the head of this queue, + * or returns {@code null} if this queue is empty. + * + * @return the head of this queue, or {@code null} if this queue is empty + */ + public synchronized ReplicationReqMsg peek() { + return queue.peek(); + } + + /** + * Retrieves and removes the head of this queue, + * or returns {@code null} if this queue is empty. + * + * @return the head of this queue, or {@code null} if this queue is empty + */ + public synchronized ReplicationReqMsg poll() { + return queue.poll(); + } + + public synchronized boolean removeAll(List repObjs) { + return queue.removeAll(repObjs); + } + + public int size() { + return queue.size(); + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java new file mode 100644 index 0000000000..8d26fc368d --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.replication; + +import java.io.Serializable; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.commons.lang3.math.NumberUtils; + +/** + * Wrapper class for hdds replication queue. Implements its natural + * ordering for priority queue. + */ +public class ReplicationReqMsg implements Comparable, + Serializable { + private final long containerId; + private final short replicationCount; + private final short expecReplicationCount; + private final long timestamp; + + public ReplicationReqMsg(long containerId, short replicationCount, + long timestamp, short expecReplicationCount) { + this.containerId = containerId; + this.replicationCount = replicationCount; + this.timestamp = timestamp; + this.expecReplicationCount = expecReplicationCount; + } + + /** + * Compares this object with the specified object for order. Returns a + * negative integer, zero, or a positive integer as this object is less + * than, equal to, or greater than the specified object. + * @param o the object to be compared. + * @return a negative integer, zero, or a positive integer as this object + * is less than, equal to, or greater than the specified object. + * @throws NullPointerException if the specified object is null + * @throws ClassCastException if the specified object's type prevents it + * from being compared to this object. + */ + @Override + public int compareTo(ReplicationReqMsg o) { + if (this == o) { + return 0; + } + if (o == null) { + return 1; + } + int retVal = NumberUtils + .compare(getReplicationCount() - getExpecReplicationCount(), + o.getReplicationCount() - o.getExpecReplicationCount()); + if (retVal != 0) { + return retVal; + } + return NumberUtils.compare(getTimestamp(), o.getTimestamp()); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(91, 1011) + .append(getContainerId()) + .toHashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ReplicationReqMsg that = (ReplicationReqMsg) o; + return new EqualsBuilder().append(getContainerId(), that.getContainerId()) + .isEquals(); + } + + public long getContainerId() { + return containerId; + } + + public short getReplicationCount() { + return replicationCount; + } + + public long getTimestamp() { + return timestamp; + } + + public short getExpecReplicationCount() { + return expecReplicationCount; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java new file mode 100644 index 0000000000..7f335e37c1 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.replication; + +/** + * Ozone Container replicaton related classes. + */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java new file mode 100644 index 0000000000..39c61d32a0 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.replication; + +import java.util.Random; +import java.util.UUID; +import org.apache.hadoop.util.Time; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Test class for ReplicationQueue. + */ +public class TestReplicationQueue { + + private ReplicationQueue replicationQueue; + private Random random; + + @Before + public void setUp() { + replicationQueue = new ReplicationQueue(); + random = new Random(); + } + + @Test + public void testDuplicateAddOp() { + long contId = random.nextLong(); + String nodeId = UUID.randomUUID().toString(); + ReplicationReqMsg obj1, obj2, obj3; + long time = Time.monotonicNow(); + obj1 = new ReplicationReqMsg(contId, (short) 2, time, (short) 3); + obj2 = new ReplicationReqMsg(contId, (short) 2, time + 1, (short) 3); + obj3 = new ReplicationReqMsg(contId, (short) 1, time+2, (short) 3); + + replicationQueue.add(obj1); + replicationQueue.add(obj2); + replicationQueue.add(obj3); + Assert.assertEquals("Should add only 1 msg as second one is duplicate", + 1, replicationQueue.size()); + ReplicationReqMsg temp = replicationQueue.poll(); + Assert.assertEquals(temp, obj3); + } + + @Test + public void testPollOp() { + long contId = random.nextLong(); + String nodeId = UUID.randomUUID().toString(); + ReplicationReqMsg msg1, msg2, msg3, msg4, msg5; + msg1 = new ReplicationReqMsg(contId, (short) 1, Time.monotonicNow(), + (short) 3); + long time = Time.monotonicNow(); + msg2 = new ReplicationReqMsg(contId + 1, (short) 4, time, (short) 3); + msg3 = new ReplicationReqMsg(contId + 2, (short) 0, time, (short) 3); + msg4 = new ReplicationReqMsg(contId, (short) 2, time, (short) 3); + // Replication message for same container but different nodeId + msg5 = new ReplicationReqMsg(contId + 1, (short) 2, time, (short) 3); + + replicationQueue.add(msg1); + replicationQueue.add(msg2); + replicationQueue.add(msg3); + replicationQueue.add(msg4); + replicationQueue.add(msg5); + Assert.assertEquals("Should have 3 objects", + 3, replicationQueue.size()); + + // Since Priority queue orders messages according to replication count, + // message with lowest replication should be first + ReplicationReqMsg temp; + temp = replicationQueue.poll(); + Assert.assertEquals("Should have 2 objects", + 2, replicationQueue.size()); + Assert.assertEquals(temp, msg3); + + temp = replicationQueue.poll(); + Assert.assertEquals("Should have 1 objects", + 1, replicationQueue.size()); + Assert.assertEquals(temp, msg5); + + // Message 2 should be ordered before message 5 as both have same replication + // number but message 2 has earlier timestamp. + temp = replicationQueue.poll(); + Assert.assertEquals("Should have 0 objects", + replicationQueue.size(), 0); + Assert.assertEquals(temp, msg4); + } + + @Test + public void testRemoveOp() { + long contId = random.nextLong(); + String nodeId = UUID.randomUUID().toString(); + ReplicationReqMsg obj1, obj2, obj3; + obj1 = new ReplicationReqMsg(contId, (short) 1, Time.monotonicNow(), + (short) 3); + obj2 = new ReplicationReqMsg(contId + 1, (short) 2, Time.monotonicNow(), + (short) 3); + obj3 = new ReplicationReqMsg(contId + 2, (short) 3, Time.monotonicNow(), + (short) 3); + + replicationQueue.add(obj1); + replicationQueue.add(obj2); + replicationQueue.add(obj3); + Assert.assertEquals("Should have 3 objects", + 3, replicationQueue.size()); + + replicationQueue.remove(obj3); + Assert.assertEquals("Should have 2 objects", + 2, replicationQueue.size()); + + replicationQueue.remove(obj2); + Assert.assertEquals("Should have 1 objects", + 1, replicationQueue.size()); + + replicationQueue.remove(obj1); + Assert.assertEquals("Should have 0 objects", + 0, replicationQueue.size()); + } + +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java new file mode 100644 index 0000000000..5b1fd0f43a --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * SCM Testing and Mocking Utils. + */ +package org.apache.hadoop.ozone.container.replication; +// Test classes for Replication functionality. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java index b563e90e76..9fd30f2ad0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.replication.ContainerSupervisor; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; @@ -80,7 +79,6 @@ public class ContainerMapping implements Mapping { private final PipelineSelector pipelineSelector; private final ContainerStateManager containerStateManager; private final LeaseManager containerLeaseManager; - private final ContainerSupervisor containerSupervisor; private final float containerCloseThreshold; private final ContainerCloser closer; private final long size; @@ -127,9 +125,7 @@ public ContainerMapping( OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024; this.containerStateManager = new ContainerStateManager(conf, this); - this.containerSupervisor = - new ContainerSupervisor(conf, nodeManager, - nodeManager.getNodePoolManager()); + this.containerCloseThreshold = conf.getFloat( ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD, ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT); @@ -407,8 +403,8 @@ public void processContainerReports(DatanodeDetails datanodeDetails, throws IOException { List containerInfos = reports.getReportsList(); - containerSupervisor.handleContainerReport(datanodeDetails, reports); - for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState : + + for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState : containerInfos) { byte[] dbKey = Longs.toByteArray(datanodeState.getContainerID()); lock.lock(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java deleted file mode 100644 index 5bd05746bf..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java +++ /dev/null @@ -1,340 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.IOException; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.PriorityQueue; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import static com.google.common.util.concurrent.Uninterruptibles - .sleepUninterruptibly; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT; - -/** - * This class takes a set of container reports that belong to a pool and then - * computes the replication levels for each container. - */ -public class ContainerSupervisor implements Closeable { - public static final Logger LOG = - LoggerFactory.getLogger(ContainerSupervisor.class); - - private final NodePoolManager poolManager; - private final HashSet poolNames; - private final PriorityQueue poolQueue; - private final NodeManager nodeManager; - private final long containerProcessingLag; - private final AtomicBoolean runnable; - private final ExecutorService executorService; - private final long maxPoolWait; - private long poolProcessCount; - private final List inProgressPoolList; - private final AtomicInteger threadFaultCount; - private final int inProgressPoolMaxCount; - - private final ReadWriteLock inProgressPoolListLock; - - /** - * Returns the number of times we have processed pools. - * @return long - */ - public long getPoolProcessCount() { - return poolProcessCount; - } - - - /** - * Constructs a class that computes Replication Levels. - * - * @param conf - OzoneConfiguration - * @param nodeManager - Node Manager - * @param poolManager - Pool Manager - */ - public ContainerSupervisor(Configuration conf, NodeManager nodeManager, - NodePoolManager poolManager) { - Preconditions.checkNotNull(poolManager); - Preconditions.checkNotNull(nodeManager); - this.containerProcessingLag = - conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL, - OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT, - TimeUnit.SECONDS - ) * 1000; - int maxContainerReportThreads = - conf.getInt(OZONE_SCM_MAX_CONTAINER_REPORT_THREADS, - OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT - ); - this.maxPoolWait = - conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT, - OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - this.inProgressPoolMaxCount = conf.getInt( - OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS, - OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT); - this.poolManager = poolManager; - this.nodeManager = nodeManager; - this.poolNames = new HashSet<>(); - this.poolQueue = new PriorityQueue<>(); - this.runnable = new AtomicBoolean(true); - this.threadFaultCount = new AtomicInteger(0); - this.executorService = newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Container Reports Processing Thread - %d") - .build(), maxContainerReportThreads); - this.inProgressPoolList = new LinkedList<>(); - this.inProgressPoolListLock = new ReentrantReadWriteLock(); - - initPoolProcessThread(); - } - - private ExecutorService newCachedThreadPool(ThreadFactory threadFactory, - int maxThreads) { - return new HadoopThreadPoolExecutor(0, maxThreads, 60L, TimeUnit.SECONDS, - new LinkedBlockingQueue<>(), threadFactory); - } - - /** - * Returns the number of pools that are under process right now. - * @return int - Number of pools that are in process. - */ - public int getInProgressPoolCount() { - return inProgressPoolList.size(); - } - - /** - * Exits the background thread. - */ - public void setExit() { - this.runnable.set(false); - } - - /** - * Adds or removes pools from names that we need to process. - * - * There are two different cases that we need to process. - * The case where some pools are being added and some times we have to - * handle cases where pools are removed. - */ - private void refreshPools() { - List pools = this.poolManager.getNodePools(); - if (pools != null) { - - HashSet removedPools = - computePoolDifference(this.poolNames, new HashSet<>(pools)); - - HashSet addedPools = - computePoolDifference(new HashSet<>(pools), this.poolNames); - // TODO: Support remove pool API in pool manager so that this code - // path can be tested. This never happens in the current code base. - for (String poolName : removedPools) { - for (PeriodicPool periodicPool : poolQueue) { - if (periodicPool.getPoolName().compareTo(poolName) == 0) { - poolQueue.remove(periodicPool); - } - } - } - // Remove the pool names that we have in the list. - this.poolNames.removeAll(removedPools); - - for (String poolName : addedPools) { - poolQueue.add(new PeriodicPool(poolName)); - } - - // Add to the pool names we are tracking. - poolNames.addAll(addedPools); - } - - } - - /** - * Handle the case where pools are added. - * - * @param newPools - New Pools list - * @param oldPool - oldPool List. - */ - private HashSet computePoolDifference(HashSet newPools, - Set oldPool) { - Preconditions.checkNotNull(newPools); - Preconditions.checkNotNull(oldPool); - HashSet newSet = new HashSet<>(newPools); - newSet.removeAll(oldPool); - return newSet; - } - - private void initPoolProcessThread() { - - /* - * Task that runs to check if we need to start a pool processing job. - * if so we create a pool reconciliation job and find out of all the - * expected containers are on the nodes. - */ - Runnable processPools = () -> { - while (runnable.get()) { - // Make sure that we don't have any new pools. - refreshPools(); - while (inProgressPoolList.size() < inProgressPoolMaxCount) { - PeriodicPool pool = poolQueue.poll(); - if (pool != null) { - if (pool.getLastProcessedTime() + this.containerProcessingLag > - Time.monotonicNow()) { - LOG.debug("Not within the time window for processing: {}", - pool.getPoolName()); - // we might over sleep here, not a big deal. - sleepUninterruptibly(this.containerProcessingLag, - TimeUnit.MILLISECONDS); - } - LOG.debug("Adding pool {} to container processing queue", - pool.getPoolName()); - InProgressPool inProgressPool = new InProgressPool(maxPoolWait, - pool, this.nodeManager, this.poolManager, this.executorService); - inProgressPool.startReconciliation(); - inProgressPoolListLock.writeLock().lock(); - try { - inProgressPoolList.add(inProgressPool); - } finally { - inProgressPoolListLock.writeLock().unlock(); - } - poolProcessCount++; - } else { - break; - } - } - sleepUninterruptibly(this.maxPoolWait, TimeUnit.MILLISECONDS); - inProgressPoolListLock.readLock().lock(); - try { - for (InProgressPool inProgressPool : inProgressPoolList) { - inProgressPool.finalizeReconciliation(); - poolQueue.add(inProgressPool.getPool()); - } - } finally { - inProgressPoolListLock.readLock().unlock(); - } - inProgressPoolListLock.writeLock().lock(); - try { - inProgressPoolList.clear(); - } finally { - inProgressPoolListLock.writeLock().unlock(); - } - } - }; - - // We will have only one thread for pool processing. - Thread poolProcessThread = new Thread(processPools); - poolProcessThread.setDaemon(true); - poolProcessThread.setName("Pool replica thread"); - poolProcessThread.setUncaughtExceptionHandler((Thread t, Throwable e) -> { - // Let us just restart this thread after logging a critical error. - // if this thread is not running we cannot handle commands from SCM. - LOG.error("Critical Error : Pool replica thread encountered an " + - "error. Thread: {} Error Count : {}", t.toString(), e, - threadFaultCount.incrementAndGet()); - poolProcessThread.start(); - // TODO : Add a config to restrict how many times we will restart this - // thread in a single session. - }); - poolProcessThread.start(); - } - - /** - * Adds a container report to appropriate inProgress Pool. - * @param containerReport -- Container report for a specific container from - * a datanode. - */ - public void handleContainerReport(DatanodeDetails datanodeDetails, - ContainerReportsProto containerReport) { - inProgressPoolListLock.readLock().lock(); - try { - String poolName = poolManager.getNodePool(datanodeDetails); - for (InProgressPool ppool : inProgressPoolList) { - if (ppool.getPoolName().equalsIgnoreCase(poolName)) { - ppool.handleContainerReport(datanodeDetails, containerReport); - return; - } - } - // TODO: Decide if we can do anything else with this report. - LOG.debug("Discarding the container report for pool {}. " + - "That pool is not currently in the pool reconciliation process." + - " Container Name: {}", poolName, datanodeDetails); - } catch (SCMException e) { - LOG.warn("Skipping processing container report from datanode {}, " - + "cause: failed to get the corresponding node pool", - datanodeDetails.toString(), e); - } finally { - inProgressPoolListLock.readLock().unlock(); - } - } - - /** - * Get in process pool list, used for testing. - * @return List of InProgressPool - */ - @VisibleForTesting - public List getInProcessPoolList() { - return inProgressPoolList; - } - - /** - * Shutdown the Container Replication Manager. - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - setExit(); - HadoopExecutors.shutdown(executorService, LOG, 5, TimeUnit.SECONDS); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java deleted file mode 100644 index 4b547311da..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerInfo; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Predicate; -import java.util.stream.Collectors; - -/** - * These are pools that are actively checking for replication status of the - * containers. - */ -public final class InProgressPool { - public static final Logger LOG = - LoggerFactory.getLogger(InProgressPool.class); - - private final PeriodicPool pool; - private final NodeManager nodeManager; - private final NodePoolManager poolManager; - private final ExecutorService executorService; - private final Map containerCountMap; - private final Map processedNodeSet; - private final long startTime; - private ProgressStatus status; - private AtomicInteger nodeCount; - private AtomicInteger nodeProcessed; - private AtomicInteger containerProcessedCount; - private long maxWaitTime; - /** - * Constructs an pool that is being processed. - * @param maxWaitTime - Maximum wait time in milliseconds. - * @param pool - Pool that we are working against - * @param nodeManager - Nodemanager - * @param poolManager - pool manager - * @param executorService - Shared Executor service. - */ - InProgressPool(long maxWaitTime, PeriodicPool pool, - NodeManager nodeManager, NodePoolManager poolManager, - ExecutorService executorService) { - Preconditions.checkNotNull(pool); - Preconditions.checkNotNull(nodeManager); - Preconditions.checkNotNull(poolManager); - Preconditions.checkNotNull(executorService); - Preconditions.checkArgument(maxWaitTime > 0); - this.pool = pool; - this.nodeManager = nodeManager; - this.poolManager = poolManager; - this.executorService = executorService; - this.containerCountMap = new ConcurrentHashMap<>(); - this.processedNodeSet = new ConcurrentHashMap<>(); - this.maxWaitTime = maxWaitTime; - startTime = Time.monotonicNow(); - } - - /** - * Returns periodic pool. - * - * @return PeriodicPool - */ - public PeriodicPool getPool() { - return pool; - } - - /** - * We are done if we have got reports from all nodes or we have - * done waiting for the specified time. - * - * @return true if we are done, false otherwise. - */ - public boolean isDone() { - return (nodeCount.get() == nodeProcessed.get()) || - (this.startTime + this.maxWaitTime) > Time.monotonicNow(); - } - - /** - * Gets the number of containers processed. - * - * @return int - */ - public int getContainerProcessedCount() { - return containerProcessedCount.get(); - } - - /** - * Returns the start time in milliseconds. - * - * @return - Start Time. - */ - public long getStartTime() { - return startTime; - } - - /** - * Get the number of nodes in this pool. - * - * @return - node count - */ - public int getNodeCount() { - return nodeCount.get(); - } - - /** - * Get the number of nodes that we have already processed container reports - * from. - * - * @return - Processed count. - */ - public int getNodeProcessed() { - return nodeProcessed.get(); - } - - /** - * Returns the current status. - * - * @return Status - */ - public ProgressStatus getStatus() { - return status; - } - - /** - * Starts the reconciliation process for all the nodes in the pool. - */ - public void startReconciliation() { - List datanodeDetailsList = - this.poolManager.getNodes(pool.getPoolName()); - if (datanodeDetailsList.size() == 0) { - LOG.error("Datanode list for {} is Empty. Pool with no nodes ? ", - pool.getPoolName()); - this.status = ProgressStatus.Error; - return; - } - - nodeProcessed = new AtomicInteger(0); - containerProcessedCount = new AtomicInteger(0); - nodeCount = new AtomicInteger(0); - this.status = ProgressStatus.InProgress; - this.getPool().setLastProcessedTime(Time.monotonicNow()); - } - - /** - * Queues a container Report for handling. This is done in a worker thread - * since decoding a container report might be compute intensive . We don't - * want to block since we have asked for bunch of container reports - * from a set of datanodes. - * - * @param containerReport - ContainerReport - */ - public void handleContainerReport(DatanodeDetails datanodeDetails, - ContainerReportsProto containerReport) { - if (status == ProgressStatus.InProgress) { - executorService.submit(processContainerReport(datanodeDetails, - containerReport)); - } else { - LOG.debug("Cannot handle container report when the pool is in {} status.", - status); - } - } - - private Runnable processContainerReport(DatanodeDetails datanodeDetails, - ContainerReportsProto reports) { - return () -> { - if (processedNodeSet.computeIfAbsent(datanodeDetails.getUuid(), - (k) -> true)) { - nodeProcessed.incrementAndGet(); - LOG.debug("Total Nodes processed : {} Node Name: {} ", nodeProcessed, - datanodeDetails.getUuid()); - for (ContainerInfo info : reports.getReportsList()) { - containerProcessedCount.incrementAndGet(); - LOG.debug("Total Containers processed: {} Container Name: {}", - containerProcessedCount.get(), info.getContainerID()); - - // Update the container map with count + 1 if the key exists or - // update the map with 1. Since this is a concurrentMap the - // computation and update is atomic. - containerCountMap.merge(info.getContainerID(), 1, Integer::sum); - } - } - }; - } - - /** - * Filter the containers based on specific rules. - * - * @param predicate -- Predicate to filter by - * @return A list of map entries. - */ - public List> filterContainer( - Predicate> predicate) { - return containerCountMap.entrySet().stream() - .filter(predicate).collect(Collectors.toList()); - } - - /** - * Used only for testing, calling this will abort container report - * processing. This is very dangerous call and should not be made by any users - */ - @VisibleForTesting - public void setDoneProcessing() { - nodeProcessed.set(nodeCount.get()); - } - - /** - * Returns the pool name. - * - * @return Name of the pool. - */ - String getPoolName() { - return pool.getPoolName(); - } - - public void finalizeReconciliation() { - status = ProgressStatus.Done; - //TODO: Add finalizing logic. This is where actual reconciliation happens. - } - - /** - * Current status of the computing replication status. - */ - public enum ProgressStatus { - InProgress, Done, Error - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java deleted file mode 100644 index ef28aa78d0..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import java.util.concurrent.atomic.AtomicLong; - -/** - * Periodic pool is a pool with a time stamp, this allows us to process pools - * based on a cyclic clock. - */ -public class PeriodicPool implements Comparable { - private final String poolName; - private long lastProcessedTime; - private AtomicLong totalProcessedCount; - - /** - * Constructs a periodic pool. - * - * @param poolName - Name of the pool - */ - public PeriodicPool(String poolName) { - this.poolName = poolName; - lastProcessedTime = 0; - totalProcessedCount = new AtomicLong(0); - } - - /** - * Get pool Name. - * @return PoolName - */ - public String getPoolName() { - return poolName; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object is - * less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(PeriodicPool o) { - return Long.compare(this.lastProcessedTime, o.lastProcessedTime); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - PeriodicPool that = (PeriodicPool) o; - - return poolName.equals(that.poolName); - } - - @Override - public int hashCode() { - return poolName.hashCode(); - } - - /** - * Returns the Total Times we have processed this pool. - * - * @return processed count. - */ - public long getTotalProcessedCount() { - return totalProcessedCount.get(); - } - - /** - * Gets the last time we processed this pool. - * @return time in milliseconds - */ - public long getLastProcessedTime() { - return this.lastProcessedTime; - } - - - /** - * Sets the last processed time. - * - * @param lastProcessedTime - Long in milliseconds. - */ - - public void setLastProcessedTime(long lastProcessedTime) { - this.lastProcessedTime = lastProcessedTime; - } - - /* - * Increments the total processed count. - */ - public void incTotalProcessedCount() { - this.totalProcessedCount.incrementAndGet(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java deleted file mode 100644 index 7bbe2efe57..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; -/* - This package contains routines that manage replication of a container. This - relies on container reports to understand the replication level of a - container - UnderReplicated, Replicated, OverReplicated -- and manages the - replication level based on that. - */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 4392633b16..72d7e946cc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -123,12 +123,6 @@ public interface NodeManager extends StorageContainerNodeProtocol, */ SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails); - /** - * Returns the NodePoolManager associated with the NodeManager. - * @return NodePoolManager - */ - NodePoolManager getNodePoolManager(); - /** * Wait for the heartbeat is processed by NodeManager. * @return true if heartbeat has been processed. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java deleted file mode 100644 index 46faf9ca4d..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; - -/** - * Interface that defines SCM NodePoolManager. - */ -public interface NodePoolManager extends Closeable { - - /** - * Add a node to a node pool. - * @param pool - name of the node pool. - * @param node - data node. - */ - void addNode(String pool, DatanodeDetails node) throws IOException; - - /** - * Remove a node from a node pool. - * @param pool - name of the node pool. - * @param node - data node. - * @throws SCMException - */ - void removeNode(String pool, DatanodeDetails node) - throws SCMException; - - /** - * Get a list of known node pools. - * @return a list of known node pool names or an empty list if not node pool - * is defined. - */ - List getNodePools(); - - /** - * Get all nodes of a node pool given the name of the node pool. - * @param pool - name of the node pool. - * @return a list of datanode ids or an empty list if the node pool was not - * found. - */ - List getNodes(String pool); - - /** - * Get the node pool name if the node has been added to a node pool. - * @param datanodeDetails - datanode ID. - * @return node pool name if it has been assigned. - * null if the node has not been assigned to any node pool yet. - */ - String getNodePool(DatanodeDetails datanodeDetails) throws SCMException; -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index fc8b0137f3..adca8eae0c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -53,7 +53,6 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; -import com.google.protobuf.GeneratedMessage; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -159,7 +158,6 @@ public class SCMNodeManager private ObjectName nmInfoBean; // Node pool manager. - private final SCMNodePoolManager nodePoolManager; private final StorageContainerManager scmManager; public static final Event DATANODE_COMMAND = @@ -210,7 +208,6 @@ public SCMNodeManager(OzoneConfiguration conf, String clusterID, registerMXBean(); - this.nodePoolManager = new SCMNodePoolManager(conf); this.scmManager = scmManager; } @@ -682,7 +679,6 @@ private void updateNodeStat(UUID dnId, NodeReportProto nodeReport) { @Override public void close() throws IOException { unregisterMXBean(); - nodePoolManager.close(); executorService.shutdown(); try { if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { @@ -760,20 +756,6 @@ public RegisteredCommand register( LOG.info("Leaving startup chill mode."); } - // TODO: define node pool policy for non-default node pool. - // For now, all nodes are added to the "DefaultNodePool" upon registration - // if it has not been added to any node pool yet. - try { - if (nodePoolManager.getNodePool(datanodeDetails) == null) { - nodePoolManager.addNode(SCMNodePoolManager.DEFAULT_NODEPOOL, - datanodeDetails); - } - } catch (IOException e) { - // TODO: make sure registration failure is handled correctly. - return RegisteredCommand.newBuilder() - .setErrorCode(ErrorCode.errorNodeNotPermitted) - .build(); - } // Updating Node Report, as registration is successful updateNodeStat(datanodeDetails.getUuid(), nodeReport); LOG.info("Data node with ID: {} Registered.", @@ -859,11 +841,6 @@ public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) { return new SCMNodeMetric(nodeStats.get(datanodeDetails.getUuid())); } - @Override - public NodePoolManager getNodePoolManager() { - return nodePoolManager; - } - @Override public Map getNodeCount() { Map nodeCountMap = new HashMap(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java deleted file mode 100644 index faf330ea1d..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java +++ /dev/null @@ -1,269 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.utils.MetadataStore; -import org.apache.hadoop.utils.MetadataStoreBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_FIND_NODE_IN_POOL; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_LOAD_NODEPOOL; -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; -import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; - -/** - * SCM node pool manager that manges node pools. - */ -public final class SCMNodePoolManager implements NodePoolManager { - - private static final Logger LOG = - LoggerFactory.getLogger(SCMNodePoolManager.class); - private static final List EMPTY_NODE_LIST = - new ArrayList<>(); - private static final List EMPTY_NODEPOOL_LIST = new ArrayList<>(); - public static final String DEFAULT_NODEPOOL = "DefaultNodePool"; - - // DB that saves the node to node pool mapping. - private MetadataStore nodePoolStore; - - // In-memory node pool to nodes mapping - private HashMap> nodePools; - - // Read-write lock for nodepool operations - private ReadWriteLock lock; - - /** - * Construct SCMNodePoolManager class that manages node to node pool mapping. - * @param conf - configuration. - * @throws IOException - */ - public SCMNodePoolManager(final OzoneConfiguration conf) - throws IOException { - final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, - OZONE_SCM_DB_CACHE_SIZE_DEFAULT); - File metaDir = getOzoneMetaDirPath(conf); - String scmMetaDataDir = metaDir.getPath(); - File nodePoolDBPath = new File(scmMetaDataDir, NODEPOOL_DB); - nodePoolStore = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setDbFile(nodePoolDBPath) - .setCacheSize(cacheSize * OzoneConsts.MB) - .build(); - nodePools = new HashMap<>(); - lock = new ReentrantReadWriteLock(); - init(); - } - - /** - * Initialize the in-memory store based on persist store from level db. - * No lock is needed as init() is only invoked by constructor. - * @throws SCMException - */ - private void init() throws SCMException { - try { - nodePoolStore.iterate(null, (key, value) -> { - try { - DatanodeDetails nodeId = DatanodeDetails.getFromProtoBuf( - HddsProtos.DatanodeDetailsProto.PARSER.parseFrom(key)); - String poolName = DFSUtil.bytes2String(value); - - Set nodePool = null; - if (nodePools.containsKey(poolName)) { - nodePool = nodePools.get(poolName); - } else { - nodePool = new HashSet<>(); - nodePools.put(poolName, nodePool); - } - nodePool.add(nodeId); - if (LOG.isDebugEnabled()) { - LOG.debug("Adding node: {} to node pool: {}", - nodeId, poolName); - } - } catch (IOException e) { - LOG.warn("Can't add a datanode to node pool, continue next..."); - } - return true; - }); - } catch (IOException e) { - LOG.error("Loading node pool error " + e); - throw new SCMException("Failed to load node pool", - FAILED_TO_LOAD_NODEPOOL); - } - } - - /** - * Add a datanode to a node pool. - * @param pool - name of the node pool. - * @param node - name of the datanode. - */ - @Override - public void addNode(final String pool, final DatanodeDetails node) - throws IOException { - Preconditions.checkNotNull(pool, "pool name is null"); - Preconditions.checkNotNull(node, "node is null"); - lock.writeLock().lock(); - try { - // add to the persistent store - nodePoolStore.put(node.getProtoBufMessage().toByteArray(), - DFSUtil.string2Bytes(pool)); - - // add to the in-memory store - Set nodePool = null; - if (nodePools.containsKey(pool)) { - nodePool = nodePools.get(pool); - } else { - nodePool = new HashSet(); - nodePools.put(pool, nodePool); - } - nodePool.add(node); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Remove a datanode from a node pool. - * @param pool - name of the node pool. - * @param node - datanode id. - * @throws SCMException - */ - @Override - public void removeNode(final String pool, final DatanodeDetails node) - throws SCMException { - Preconditions.checkNotNull(pool, "pool name is null"); - Preconditions.checkNotNull(node, "node is null"); - lock.writeLock().lock(); - try { - // Remove from the persistent store - byte[] kName = node.getProtoBufMessage().toByteArray(); - byte[] kData = nodePoolStore.get(kName); - if (kData == null) { - throw new SCMException(String.format("Unable to find node %s from" + - " pool %s in DB.", DFSUtil.bytes2String(kName), pool), - FAILED_TO_FIND_NODE_IN_POOL); - } - nodePoolStore.delete(kName); - - // Remove from the in-memory store - if (nodePools.containsKey(pool)) { - Set nodePool = nodePools.get(pool); - nodePool.remove(node); - } else { - throw new SCMException(String.format("Unable to find node %s from" + - " pool %s in MAP.", DFSUtil.bytes2String(kName), pool), - FAILED_TO_FIND_NODE_IN_POOL); - } - } catch (IOException e) { - throw new SCMException("Failed to remove node " + node.toString() - + " from node pool " + pool, e, - SCMException.ResultCodes.IO_EXCEPTION); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Get all the node pools. - * @return all the node pools. - */ - @Override - public List getNodePools() { - lock.readLock().lock(); - try { - if (!nodePools.isEmpty()) { - return nodePools.keySet().stream().collect(Collectors.toList()); - } else { - return EMPTY_NODEPOOL_LIST; - } - } finally { - lock.readLock().unlock(); - } - } - - /** - * Get all datanodes of a specific node pool. - * @param pool - name of the node pool. - * @return all datanodes of the specified node pool. - */ - @Override - public List getNodes(final String pool) { - Preconditions.checkNotNull(pool, "pool name is null"); - if (nodePools.containsKey(pool)) { - return nodePools.get(pool).stream().collect(Collectors.toList()); - } else { - return EMPTY_NODE_LIST; - } - } - - /** - * Get the node pool name if the node has been added to a node pool. - * @param datanodeDetails - datanode ID. - * @return node pool name if it has been assigned. - * null if the node has not been assigned to any node pool yet. - * TODO: Put this in a in-memory map if performance is an issue. - */ - @Override - public String getNodePool(final DatanodeDetails datanodeDetails) - throws SCMException { - Preconditions.checkNotNull(datanodeDetails, "node is null"); - try { - byte[] result = nodePoolStore.get( - datanodeDetails.getProtoBufMessage().toByteArray()); - return result == null ? null : DFSUtil.bytes2String(result); - } catch (IOException e) { - throw new SCMException("Failed to get node pool for node " - + datanodeDetails.toString(), e, - SCMException.ResultCodes.IO_EXCEPTION); - } - } - - /** - * Close node pool level db store. - * @throws IOException - */ - @Override - public void close() throws IOException { - nodePoolStore.close(); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 8c59462b40..80b5d6e182 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -19,7 +19,6 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -273,11 +272,6 @@ public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) { return new SCMNodeMetric(nodeMetricMap.get(datanodeDetails.getUuid())); } - @Override - public NodePoolManager getNodePoolManager() { - return Mockito.mock(NodePoolManager.class); - } - /** * Used for testing. * diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java deleted file mode 100644 index 8f412dedda..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.commons.collections.ListUtils; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.test.PathUtils; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.Collections; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -/** - * Test for SCM node pool manager. - */ -public class TestSCMNodePoolManager { - private static final Logger LOG = - LoggerFactory.getLogger(TestSCMNodePoolManager.class); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private final File testDir = PathUtils.getTestDir( - TestSCMNodePoolManager.class); - - SCMNodePoolManager createNodePoolManager(OzoneConfiguration conf) - throws IOException { - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); - return new SCMNodePoolManager(conf); - } - - /** - * Test default node pool. - * - * @throws IOException - */ - @Test - public void testDefaultNodePool() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - try { - final String defaultPool = "DefaultPool"; - NodePoolManager npMgr = createNodePoolManager(conf); - - final int nodeCount = 4; - final List nodes = TestUtils - .getListOfDatanodeDetails(nodeCount); - assertEquals(0, npMgr.getNodePools().size()); - for (DatanodeDetails node: nodes) { - npMgr.addNode(defaultPool, node); - } - List nodesRetrieved = npMgr.getNodes(defaultPool); - assertEquals(nodeCount, nodesRetrieved.size()); - assertTwoDatanodeListsEqual(nodes, nodesRetrieved); - - DatanodeDetails nodeRemoved = nodes.remove(2); - npMgr.removeNode(defaultPool, nodeRemoved); - List nodesAfterRemove = npMgr.getNodes(defaultPool); - assertTwoDatanodeListsEqual(nodes, nodesAfterRemove); - - List nonExistSet = npMgr.getNodes("NonExistSet"); - assertEquals(0, nonExistSet.size()); - } finally { - FileUtil.fullyDelete(testDir); - } - } - - - /** - * Test default node pool reload. - * - * @throws IOException - */ - @Test - public void testDefaultNodePoolReload() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - final String defaultPool = "DefaultPool"; - final int nodeCount = 4; - final List nodes = TestUtils - .getListOfDatanodeDetails(nodeCount); - - try { - try { - SCMNodePoolManager npMgr = createNodePoolManager(conf); - assertEquals(0, npMgr.getNodePools().size()); - for (DatanodeDetails node : nodes) { - npMgr.addNode(defaultPool, node); - } - List nodesRetrieved = npMgr.getNodes(defaultPool); - assertEquals(nodeCount, nodesRetrieved.size()); - assertTwoDatanodeListsEqual(nodes, nodesRetrieved); - npMgr.close(); - } finally { - LOG.info("testDefaultNodePoolReload: Finish adding nodes to pool" + - " and close."); - } - - // try reload with a new NodePoolManager instance - try { - SCMNodePoolManager npMgr = createNodePoolManager(conf); - List nodesRetrieved = npMgr.getNodes(defaultPool); - assertEquals(nodeCount, nodesRetrieved.size()); - assertTwoDatanodeListsEqual(nodes, nodesRetrieved); - } finally { - LOG.info("testDefaultNodePoolReload: Finish reloading node pool."); - } - } finally { - FileUtil.fullyDelete(testDir); - } - } - - /** - * Compare and verify that two datanode lists are equal. - * @param list1 - datanode list 1. - * @param list2 - datanode list 2. - */ - private void assertTwoDatanodeListsEqual(List list1, - List list2) { - assertEquals(list1.size(), list2.size()); - Collections.sort(list1); - Collections.sort(list2); - assertTrue(ListUtils.isEqualList(list1, list2)); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index 072d821247..1a4dcd7ad2 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.node.CommandQueue; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -201,10 +200,6 @@ public SCMNodeMetric getNodeStat(DatanodeDetails dd) { return null; } - @Override - public NodePoolManager getNodePoolManager() { - return Mockito.mock(NodePoolManager.class); - } /** * Wait for the heartbeat is processed by NodeManager. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java deleted file mode 100644 index ffcd752e84..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.testutils; - -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** - * Pool Manager replication mock. - */ -public class ReplicationNodePoolManagerMock implements NodePoolManager { - - private final Map nodeMemberShip; - - /** - * A node pool manager for testing. - */ - public ReplicationNodePoolManagerMock() { - nodeMemberShip = new HashMap<>(); - } - - /** - * Add a node to a node pool. - * - * @param pool - name of the node pool. - * @param node - data node. - */ - @Override - public void addNode(String pool, DatanodeDetails node) { - nodeMemberShip.put(node, pool); - } - - /** - * Remove a node from a node pool. - * - * @param pool - name of the node pool. - * @param node - data node. - * @throws SCMException - */ - @Override - public void removeNode(String pool, DatanodeDetails node) - throws SCMException { - nodeMemberShip.remove(node); - - } - - /** - * Get a list of known node pools. - * - * @return a list of known node pool names or an empty list if not node pool - * is defined. - */ - @Override - public List getNodePools() { - Set poolSet = new HashSet<>(); - for (Map.Entry entry : nodeMemberShip.entrySet()) { - poolSet.add(entry.getValue()); - } - return new ArrayList<>(poolSet); - - } - - /** - * Get all nodes of a node pool given the name of the node pool. - * - * @param pool - name of the node pool. - * @return a list of datanode ids or an empty list if the node pool was not - * found. - */ - @Override - public List getNodes(String pool) { - Set datanodeSet = new HashSet<>(); - for (Map.Entry entry : nodeMemberShip.entrySet()) { - if (entry.getValue().equals(pool)) { - datanodeSet.add(entry.getKey()); - } - } - return new ArrayList<>(datanodeSet); - } - - /** - * Get the node pool name if the node has been added to a node pool. - * - * @param datanodeDetails DatanodeDetails. - * @return node pool name if it has been assigned. null if the node has not - * been assigned to any node pool yet. - */ - @Override - public String getNodePool(DatanodeDetails datanodeDetails) { - return nodeMemberShip.get(datanodeDetails); - } - - /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - *

    - *

    As noted in {@link AutoCloseable#close()}, cases where the - * close may fail require careful attention. It is strongly advised - * to relinquish the underlying resources and to internally - * mark the {@code Closeable} as closed, prior to throwing - * the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java index 4d70af84a2..b4ed2b12c2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java @@ -51,12 +51,9 @@ import java.util.HashMap; import java.util.UUID; -import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB; import static org.apache.hadoop.ozone.OzoneConsts.KB; -import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; /** * This class tests the CLI that transforms container into SQLite DB files. @@ -176,34 +173,6 @@ public void shutdown() throws InterruptedException { } } - @Test - public void testConvertNodepoolDB() throws Exception { - String dbOutPath = GenericTestUtils.getTempPath( - UUID.randomUUID() + "/out_sql.db"); - String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); - String dbPath = dbRootPath + "/" + NODEPOOL_DB; - String[] args = {"-p", dbPath, "-o", dbOutPath}; - - cli.run(args); - - // verify the sqlite db - HashMap expectedPool = new HashMap<>(); - for (DatanodeDetails dnid : nodeManager.getAllNodes()) { - expectedPool.put(dnid.getUuidString(), "DefaultNodePool"); - } - Connection conn = connectDB(dbOutPath); - String sql = "SELECT * FROM nodePool"; - ResultSet rs = executeQuery(conn, sql); - while(rs.next()) { - String datanodeUUID = rs.getString("datanodeUUID"); - String poolName = rs.getString("poolName"); - assertTrue(expectedPool.remove(datanodeUUID).equals(poolName)); - } - assertEquals(0, expectedPool.size()); - - Files.delete(Paths.get(dbOutPath)); - } - @Test public void testConvertContainerDB() throws Exception { String dbOutPath = GenericTestUtils.getTempPath( diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java index 2bd43fb93a..edc0d7b597 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java @@ -19,7 +19,6 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; -import com.google.protobuf.ByteString; import org.apache.commons.cli.BasicParser; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; @@ -60,13 +59,11 @@ import java.util.HashSet; import java.util.Set; -import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX; import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.KSM_USER_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.KSM_BUCKET_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.KSM_VOLUME_PREFIX; -import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB; /** @@ -111,15 +108,6 @@ public class SQLCLI extends Configured implements Tool { private static final String INSERT_CONTAINER_MEMBERS = "INSERT INTO containerMembers (containerName, datanodeUUID) " + "VALUES (\"%s\", \"%s\")"; - // for nodepool.db - private static final String CREATE_NODE_POOL = - "CREATE TABLE nodePool (" + - "datanodeUUID TEXT NOT NULL," + - "poolName TEXT NOT NULL," + - "PRIMARY KEY(datanodeUUID, poolName))"; - private static final String INSERT_NODE_POOL = - "INSERT INTO nodePool (datanodeUUID, poolName) " + - "VALUES (\"%s\", \"%s\")"; // and reuse CREATE_DATANODE_INFO and INSERT_DATANODE_INFO // for openContainer.db private static final String CREATE_OPEN_CONTAINER = @@ -285,9 +273,6 @@ public int run(String[] args) throws Exception { if (dbName.toString().endsWith(CONTAINER_DB_SUFFIX)) { LOG.info("Converting container DB"); convertContainerDB(dbPath, outPath); - } else if (dbName.toString().equals(NODEPOOL_DB)) { - LOG.info("Converting node pool DB"); - convertNodePoolDB(dbPath, outPath); } else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) { LOG.info("Converting open container DB"); convertOpenContainerDB(dbPath, outPath); @@ -543,66 +528,7 @@ private void insertContainerDB(Connection conn, long containerID, } LOG.info("Insertion completed."); } - /** - * Converts nodePool.db to sqlite. The schema of sql db: - * two tables, nodePool and datanodeInfo (the same datanode Info as for - * container.db). - * - * nodePool - * --------------------------------------------------------- - * datanodeUUID* | poolName* - * --------------------------------------------------------- - * - * datanodeInfo: - * --------------------------------------------------------- - * hostname | datanodeUUid* | xferPort | ipcPort - * --------------------------------------------------------- - * - * -------------------------------- - * |containerPort - * -------------------------------- - * - * @param dbPath path to container db. - * @param outPath path to output sqlite - * @throws IOException throws exception. - */ - private void convertNodePoolDB(Path dbPath, Path outPath) throws Exception { - LOG.info("Create table for sql node pool db."); - File dbFile = dbPath.toFile(); - try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder() - .setConf(conf).setDbFile(dbFile).build(); - Connection conn = connectDB(outPath.toString())) { - executeSQL(conn, CREATE_NODE_POOL); - executeSQL(conn, CREATE_DATANODE_INFO); - dbStore.iterate(null, (key, value) -> { - DatanodeDetails nodeId = DatanodeDetails - .getFromProtoBuf(HddsProtos.DatanodeDetailsProto - .PARSER.parseFrom(key)); - String blockPool = DFSUtil.bytes2String(value); - try { - insertNodePoolDB(conn, blockPool, nodeId); - return true; - } catch (SQLException e) { - throw new IOException(e); - } - }); - } - } - - private void insertNodePoolDB(Connection conn, String blockPool, - DatanodeDetails datanodeDetails) throws SQLException { - String insertNodePool = String.format(INSERT_NODE_POOL, - datanodeDetails.getUuidString(), blockPool); - executeSQL(conn, insertNodePool); - - String insertDatanodeDetails = String - .format(INSERT_DATANODE_INFO, datanodeDetails.getHostName(), - datanodeDetails.getUuidString(), datanodeDetails.getIpAddress(), - datanodeDetails.getPort(DatanodeDetails.Port.Name.STANDALONE) - .getValue()); - executeSQL(conn, insertDatanodeDetails); - } /** * Convert openContainer.db to sqlite db file. This is rather simple db, From 0d6fe5f36be5b19aab89d995866e526c5feec758 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Wed, 27 Jun 2018 13:25:45 -0700 Subject: [PATCH 61/70] Revert "HDDS-194. Remove NodePoolManager and node pool handling from SCM. Contributed by Elek Marton" This reverts commit aaf03cc459a34af284f9735453aefd4ddb430d67. --- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 26 ++ .../org/apache/hadoop/ozone/OzoneConsts.java | 1 + .../src/main/resources/ozone-default.xml | 47 +++ .../replication/ReplicationQueue.java | 78 ---- .../replication/ReplicationReqMsg.java | 107 ------ .../container/replication/package-info.java | 23 -- .../replication/TestReplicationQueue.java | 134 ------- .../container/replication/package-info.java | 23 -- .../hdds/scm/container/ContainerMapping.java | 10 +- .../replication/ContainerSupervisor.java | 340 ++++++++++++++++++ .../container/replication/InProgressPool.java | 255 +++++++++++++ .../container/replication/PeriodicPool.java | 119 ++++++ .../container/replication/package-info.java | 23 ++ .../hadoop/hdds/scm/node/NodeManager.java | 6 + .../hadoop/hdds/scm/node/NodePoolManager.java | 71 ++++ .../hadoop/hdds/scm/node/SCMNodeManager.java | 23 ++ .../hdds/scm/node/SCMNodePoolManager.java | 269 ++++++++++++++ .../hdds/scm/container/MockNodeManager.java | 6 + .../hdds/scm/node/TestSCMNodePoolManager.java | 160 +++++++++ .../testutils/ReplicationNodeManagerMock.java | 5 + .../ReplicationNodePoolManagerMock.java | 133 +++++++ .../hadoop/ozone/scm/TestContainerSQLCli.java | 31 ++ .../apache/hadoop/ozone/scm/cli/SQLCLI.java | 74 ++++ 23 files changed, 1596 insertions(+), 368 deletions(-) delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index df6fbf0c75..85407e65ce 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -243,6 +243,32 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s"; + /** + * Don't start processing a pool if we have not had a minimum number of + * seconds from the last processing. + */ + public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL = + "ozone.scm.container.report.processing.interval"; + public static final String + OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s"; + + /** + * This determines the total number of pools to be processed in parallel. + */ + public static final String OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS = + "ozone.scm.max.nodepool.processing.threads"; + public static final int OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT = 1; + /** + * These 2 settings control the number of threads in executor pool and time + * outs for thw container reports from all nodes. + */ + public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS = + "ozone.scm.max.container.report.threads"; + public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100; + public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT = + "ozone.scm.container.reports.wait.timeout"; + public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT = + "5m"; public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY = "ozone.scm.block.deletion.max.retry"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 08a5ffdb87..c40dc8e4ee 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -91,6 +91,7 @@ public final class OzoneConsts { public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX; public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX; public static final String BLOCK_DB = "block.db"; + public static final String NODEPOOL_DB = "nodepool.db"; public static final String OPEN_CONTAINERS_DB = "openContainers.db"; public static final String DELETED_BLOCK_DB = "deletedBlock.db"; public static final String KSM_DB_NAME = "ksm.db"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 25365c8d9d..7a91610c65 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -571,6 +571,25 @@ allocation. + + ozone.scm.container.report.processing.interval + 60s + OZONE, PERFORMANCE + Time interval for scm to process container reports + for a node pool. Scm handles node pool reports in a cyclic clock + manner, it fetches pools periodically with this time interval. + + + + ozone.scm.container.reports.wait.timeout + 300s + OZONE, PERFORMANCE, MANAGEMENT + Maximum time to wait in seconds for processing all container + reports from + a node pool. It determines the timeout for a + node pool report. + + ozone.scm.container.size.gb 5 @@ -773,6 +792,17 @@ The keytab file for Kerberos authentication in SCM. + + ozone.scm.max.container.report.threads + 100 + OZONE, PERFORMANCE + + Maximum number of threads to process container reports in scm. + Each container report from a data node is processed by scm in a worker + thread, fetched from a thread pool. This property is used to control the + maximum size of the thread pool. + + ozone.scm.max.hb.count.to.process 5000 @@ -784,6 +814,14 @@ for more info. + + ozone.scm.max.nodepool.processing.threads + 1 + OZONE, MANAGEMENT, PERFORMANCE + + Number of node pools to process in parallel. + + ozone.scm.names @@ -805,6 +843,15 @@ see ozone.scm.heartbeat.thread.interval before changing this value. + + ozone.scm.max.nodepool.processing.threads + 1 + OZONE, SCM + + Controls the number of node pools that can be processed in parallel by + Container Supervisor. + + ozone.trace.enabled false diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java deleted file mode 100644 index b83ecf13bc..0000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.util.List; -import java.util.PriorityQueue; -import java.util.Queue; - -/** - * Priority queue to handle under-replicated and over replicated containers - * in ozone. ReplicationManager will consume these messages and decide - * accordingly. - */ -public class ReplicationQueue { - - private final Queue queue; - - ReplicationQueue() { - queue = new PriorityQueue<>(); - } - - public synchronized boolean add(ReplicationReqMsg repObj) { - if (this.queue.contains(repObj)) { - // Remove the earlier message and insert this one - this.queue.remove(repObj); - return this.queue.add(repObj); - } else { - return this.queue.add(repObj); - } - } - - public synchronized boolean remove(ReplicationReqMsg repObj) { - return queue.remove(repObj); - } - - /** - * Retrieves, but does not remove, the head of this queue, - * or returns {@code null} if this queue is empty. - * - * @return the head of this queue, or {@code null} if this queue is empty - */ - public synchronized ReplicationReqMsg peek() { - return queue.peek(); - } - - /** - * Retrieves and removes the head of this queue, - * or returns {@code null} if this queue is empty. - * - * @return the head of this queue, or {@code null} if this queue is empty - */ - public synchronized ReplicationReqMsg poll() { - return queue.poll(); - } - - public synchronized boolean removeAll(List repObjs) { - return queue.removeAll(repObjs); - } - - public int size() { - return queue.size(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java deleted file mode 100644 index 8d26fc368d..0000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.io.Serializable; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.commons.lang3.math.NumberUtils; - -/** - * Wrapper class for hdds replication queue. Implements its natural - * ordering for priority queue. - */ -public class ReplicationReqMsg implements Comparable, - Serializable { - private final long containerId; - private final short replicationCount; - private final short expecReplicationCount; - private final long timestamp; - - public ReplicationReqMsg(long containerId, short replicationCount, - long timestamp, short expecReplicationCount) { - this.containerId = containerId; - this.replicationCount = replicationCount; - this.timestamp = timestamp; - this.expecReplicationCount = expecReplicationCount; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object - * is less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(ReplicationReqMsg o) { - if (this == o) { - return 0; - } - if (o == null) { - return 1; - } - int retVal = NumberUtils - .compare(getReplicationCount() - getExpecReplicationCount(), - o.getReplicationCount() - o.getExpecReplicationCount()); - if (retVal != 0) { - return retVal; - } - return NumberUtils.compare(getTimestamp(), o.getTimestamp()); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(91, 1011) - .append(getContainerId()) - .toHashCode(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ReplicationReqMsg that = (ReplicationReqMsg) o; - return new EqualsBuilder().append(getContainerId(), that.getContainerId()) - .isEquals(); - } - - public long getContainerId() { - return containerId; - } - - public short getReplicationCount() { - return replicationCount; - } - - public long getTimestamp() { - return timestamp; - } - - public short getExpecReplicationCount() { - return expecReplicationCount; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java deleted file mode 100644 index 7f335e37c1..0000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.replication; - -/** - * Ozone Container replicaton related classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java deleted file mode 100644 index 39c61d32a0..0000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.util.Random; -import java.util.UUID; -import org.apache.hadoop.util.Time; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -/** - * Test class for ReplicationQueue. - */ -public class TestReplicationQueue { - - private ReplicationQueue replicationQueue; - private Random random; - - @Before - public void setUp() { - replicationQueue = new ReplicationQueue(); - random = new Random(); - } - - @Test - public void testDuplicateAddOp() { - long contId = random.nextLong(); - String nodeId = UUID.randomUUID().toString(); - ReplicationReqMsg obj1, obj2, obj3; - long time = Time.monotonicNow(); - obj1 = new ReplicationReqMsg(contId, (short) 2, time, (short) 3); - obj2 = new ReplicationReqMsg(contId, (short) 2, time + 1, (short) 3); - obj3 = new ReplicationReqMsg(contId, (short) 1, time+2, (short) 3); - - replicationQueue.add(obj1); - replicationQueue.add(obj2); - replicationQueue.add(obj3); - Assert.assertEquals("Should add only 1 msg as second one is duplicate", - 1, replicationQueue.size()); - ReplicationReqMsg temp = replicationQueue.poll(); - Assert.assertEquals(temp, obj3); - } - - @Test - public void testPollOp() { - long contId = random.nextLong(); - String nodeId = UUID.randomUUID().toString(); - ReplicationReqMsg msg1, msg2, msg3, msg4, msg5; - msg1 = new ReplicationReqMsg(contId, (short) 1, Time.monotonicNow(), - (short) 3); - long time = Time.monotonicNow(); - msg2 = new ReplicationReqMsg(contId + 1, (short) 4, time, (short) 3); - msg3 = new ReplicationReqMsg(contId + 2, (short) 0, time, (short) 3); - msg4 = new ReplicationReqMsg(contId, (short) 2, time, (short) 3); - // Replication message for same container but different nodeId - msg5 = new ReplicationReqMsg(contId + 1, (short) 2, time, (short) 3); - - replicationQueue.add(msg1); - replicationQueue.add(msg2); - replicationQueue.add(msg3); - replicationQueue.add(msg4); - replicationQueue.add(msg5); - Assert.assertEquals("Should have 3 objects", - 3, replicationQueue.size()); - - // Since Priority queue orders messages according to replication count, - // message with lowest replication should be first - ReplicationReqMsg temp; - temp = replicationQueue.poll(); - Assert.assertEquals("Should have 2 objects", - 2, replicationQueue.size()); - Assert.assertEquals(temp, msg3); - - temp = replicationQueue.poll(); - Assert.assertEquals("Should have 1 objects", - 1, replicationQueue.size()); - Assert.assertEquals(temp, msg5); - - // Message 2 should be ordered before message 5 as both have same replication - // number but message 2 has earlier timestamp. - temp = replicationQueue.poll(); - Assert.assertEquals("Should have 0 objects", - replicationQueue.size(), 0); - Assert.assertEquals(temp, msg4); - } - - @Test - public void testRemoveOp() { - long contId = random.nextLong(); - String nodeId = UUID.randomUUID().toString(); - ReplicationReqMsg obj1, obj2, obj3; - obj1 = new ReplicationReqMsg(contId, (short) 1, Time.monotonicNow(), - (short) 3); - obj2 = new ReplicationReqMsg(contId + 1, (short) 2, Time.monotonicNow(), - (short) 3); - obj3 = new ReplicationReqMsg(contId + 2, (short) 3, Time.monotonicNow(), - (short) 3); - - replicationQueue.add(obj1); - replicationQueue.add(obj2); - replicationQueue.add(obj3); - Assert.assertEquals("Should have 3 objects", - 3, replicationQueue.size()); - - replicationQueue.remove(obj3); - Assert.assertEquals("Should have 2 objects", - 2, replicationQueue.size()); - - replicationQueue.remove(obj2); - Assert.assertEquals("Should have 1 objects", - 1, replicationQueue.size()); - - replicationQueue.remove(obj1); - Assert.assertEquals("Should have 0 objects", - 0, replicationQueue.size()); - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java deleted file mode 100644 index 5b1fd0f43a..0000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * SCM Testing and Mocking Utils. - */ -package org.apache.hadoop.ozone.container.replication; -// Test classes for Replication functionality. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java index 9fd30f2ad0..b563e90e76 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.replication.ContainerSupervisor; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; @@ -79,6 +80,7 @@ public class ContainerMapping implements Mapping { private final PipelineSelector pipelineSelector; private final ContainerStateManager containerStateManager; private final LeaseManager containerLeaseManager; + private final ContainerSupervisor containerSupervisor; private final float containerCloseThreshold; private final ContainerCloser closer; private final long size; @@ -125,7 +127,9 @@ public ContainerMapping( OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024; this.containerStateManager = new ContainerStateManager(conf, this); - + this.containerSupervisor = + new ContainerSupervisor(conf, nodeManager, + nodeManager.getNodePoolManager()); this.containerCloseThreshold = conf.getFloat( ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD, ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT); @@ -403,8 +407,8 @@ public void processContainerReports(DatanodeDetails datanodeDetails, throws IOException { List containerInfos = reports.getReportsList(); - - for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState : + containerSupervisor.handleContainerReport(datanodeDetails, reports); + for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState : containerInfos) { byte[] dbKey = Longs.toByteArray(datanodeState.getContainerID()); lock.lock(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java new file mode 100644 index 0000000000..5bd05746bf --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java @@ -0,0 +1,340 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.container.replication; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodePoolManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.IOException; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.PriorityQueue; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import static com.google.common.util.concurrent.Uninterruptibles + .sleepUninterruptibly; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT; + +/** + * This class takes a set of container reports that belong to a pool and then + * computes the replication levels for each container. + */ +public class ContainerSupervisor implements Closeable { + public static final Logger LOG = + LoggerFactory.getLogger(ContainerSupervisor.class); + + private final NodePoolManager poolManager; + private final HashSet poolNames; + private final PriorityQueue poolQueue; + private final NodeManager nodeManager; + private final long containerProcessingLag; + private final AtomicBoolean runnable; + private final ExecutorService executorService; + private final long maxPoolWait; + private long poolProcessCount; + private final List inProgressPoolList; + private final AtomicInteger threadFaultCount; + private final int inProgressPoolMaxCount; + + private final ReadWriteLock inProgressPoolListLock; + + /** + * Returns the number of times we have processed pools. + * @return long + */ + public long getPoolProcessCount() { + return poolProcessCount; + } + + + /** + * Constructs a class that computes Replication Levels. + * + * @param conf - OzoneConfiguration + * @param nodeManager - Node Manager + * @param poolManager - Pool Manager + */ + public ContainerSupervisor(Configuration conf, NodeManager nodeManager, + NodePoolManager poolManager) { + Preconditions.checkNotNull(poolManager); + Preconditions.checkNotNull(nodeManager); + this.containerProcessingLag = + conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL, + OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT, + TimeUnit.SECONDS + ) * 1000; + int maxContainerReportThreads = + conf.getInt(OZONE_SCM_MAX_CONTAINER_REPORT_THREADS, + OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT + ); + this.maxPoolWait = + conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT, + OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); + this.inProgressPoolMaxCount = conf.getInt( + OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS, + OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT); + this.poolManager = poolManager; + this.nodeManager = nodeManager; + this.poolNames = new HashSet<>(); + this.poolQueue = new PriorityQueue<>(); + this.runnable = new AtomicBoolean(true); + this.threadFaultCount = new AtomicInteger(0); + this.executorService = newCachedThreadPool( + new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("Container Reports Processing Thread - %d") + .build(), maxContainerReportThreads); + this.inProgressPoolList = new LinkedList<>(); + this.inProgressPoolListLock = new ReentrantReadWriteLock(); + + initPoolProcessThread(); + } + + private ExecutorService newCachedThreadPool(ThreadFactory threadFactory, + int maxThreads) { + return new HadoopThreadPoolExecutor(0, maxThreads, 60L, TimeUnit.SECONDS, + new LinkedBlockingQueue<>(), threadFactory); + } + + /** + * Returns the number of pools that are under process right now. + * @return int - Number of pools that are in process. + */ + public int getInProgressPoolCount() { + return inProgressPoolList.size(); + } + + /** + * Exits the background thread. + */ + public void setExit() { + this.runnable.set(false); + } + + /** + * Adds or removes pools from names that we need to process. + * + * There are two different cases that we need to process. + * The case where some pools are being added and some times we have to + * handle cases where pools are removed. + */ + private void refreshPools() { + List pools = this.poolManager.getNodePools(); + if (pools != null) { + + HashSet removedPools = + computePoolDifference(this.poolNames, new HashSet<>(pools)); + + HashSet addedPools = + computePoolDifference(new HashSet<>(pools), this.poolNames); + // TODO: Support remove pool API in pool manager so that this code + // path can be tested. This never happens in the current code base. + for (String poolName : removedPools) { + for (PeriodicPool periodicPool : poolQueue) { + if (periodicPool.getPoolName().compareTo(poolName) == 0) { + poolQueue.remove(periodicPool); + } + } + } + // Remove the pool names that we have in the list. + this.poolNames.removeAll(removedPools); + + for (String poolName : addedPools) { + poolQueue.add(new PeriodicPool(poolName)); + } + + // Add to the pool names we are tracking. + poolNames.addAll(addedPools); + } + + } + + /** + * Handle the case where pools are added. + * + * @param newPools - New Pools list + * @param oldPool - oldPool List. + */ + private HashSet computePoolDifference(HashSet newPools, + Set oldPool) { + Preconditions.checkNotNull(newPools); + Preconditions.checkNotNull(oldPool); + HashSet newSet = new HashSet<>(newPools); + newSet.removeAll(oldPool); + return newSet; + } + + private void initPoolProcessThread() { + + /* + * Task that runs to check if we need to start a pool processing job. + * if so we create a pool reconciliation job and find out of all the + * expected containers are on the nodes. + */ + Runnable processPools = () -> { + while (runnable.get()) { + // Make sure that we don't have any new pools. + refreshPools(); + while (inProgressPoolList.size() < inProgressPoolMaxCount) { + PeriodicPool pool = poolQueue.poll(); + if (pool != null) { + if (pool.getLastProcessedTime() + this.containerProcessingLag > + Time.monotonicNow()) { + LOG.debug("Not within the time window for processing: {}", + pool.getPoolName()); + // we might over sleep here, not a big deal. + sleepUninterruptibly(this.containerProcessingLag, + TimeUnit.MILLISECONDS); + } + LOG.debug("Adding pool {} to container processing queue", + pool.getPoolName()); + InProgressPool inProgressPool = new InProgressPool(maxPoolWait, + pool, this.nodeManager, this.poolManager, this.executorService); + inProgressPool.startReconciliation(); + inProgressPoolListLock.writeLock().lock(); + try { + inProgressPoolList.add(inProgressPool); + } finally { + inProgressPoolListLock.writeLock().unlock(); + } + poolProcessCount++; + } else { + break; + } + } + sleepUninterruptibly(this.maxPoolWait, TimeUnit.MILLISECONDS); + inProgressPoolListLock.readLock().lock(); + try { + for (InProgressPool inProgressPool : inProgressPoolList) { + inProgressPool.finalizeReconciliation(); + poolQueue.add(inProgressPool.getPool()); + } + } finally { + inProgressPoolListLock.readLock().unlock(); + } + inProgressPoolListLock.writeLock().lock(); + try { + inProgressPoolList.clear(); + } finally { + inProgressPoolListLock.writeLock().unlock(); + } + } + }; + + // We will have only one thread for pool processing. + Thread poolProcessThread = new Thread(processPools); + poolProcessThread.setDaemon(true); + poolProcessThread.setName("Pool replica thread"); + poolProcessThread.setUncaughtExceptionHandler((Thread t, Throwable e) -> { + // Let us just restart this thread after logging a critical error. + // if this thread is not running we cannot handle commands from SCM. + LOG.error("Critical Error : Pool replica thread encountered an " + + "error. Thread: {} Error Count : {}", t.toString(), e, + threadFaultCount.incrementAndGet()); + poolProcessThread.start(); + // TODO : Add a config to restrict how many times we will restart this + // thread in a single session. + }); + poolProcessThread.start(); + } + + /** + * Adds a container report to appropriate inProgress Pool. + * @param containerReport -- Container report for a specific container from + * a datanode. + */ + public void handleContainerReport(DatanodeDetails datanodeDetails, + ContainerReportsProto containerReport) { + inProgressPoolListLock.readLock().lock(); + try { + String poolName = poolManager.getNodePool(datanodeDetails); + for (InProgressPool ppool : inProgressPoolList) { + if (ppool.getPoolName().equalsIgnoreCase(poolName)) { + ppool.handleContainerReport(datanodeDetails, containerReport); + return; + } + } + // TODO: Decide if we can do anything else with this report. + LOG.debug("Discarding the container report for pool {}. " + + "That pool is not currently in the pool reconciliation process." + + " Container Name: {}", poolName, datanodeDetails); + } catch (SCMException e) { + LOG.warn("Skipping processing container report from datanode {}, " + + "cause: failed to get the corresponding node pool", + datanodeDetails.toString(), e); + } finally { + inProgressPoolListLock.readLock().unlock(); + } + } + + /** + * Get in process pool list, used for testing. + * @return List of InProgressPool + */ + @VisibleForTesting + public List getInProcessPoolList() { + return inProgressPoolList; + } + + /** + * Shutdown the Container Replication Manager. + * @throws IOException if an I/O error occurs + */ + @Override + public void close() throws IOException { + setExit(); + HadoopExecutors.shutdown(executorService, LOG, 5, TimeUnit.SECONDS); + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java new file mode 100644 index 0000000000..4b547311da --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java @@ -0,0 +1,255 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.container.replication; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodePoolManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerInfo; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +/** + * These are pools that are actively checking for replication status of the + * containers. + */ +public final class InProgressPool { + public static final Logger LOG = + LoggerFactory.getLogger(InProgressPool.class); + + private final PeriodicPool pool; + private final NodeManager nodeManager; + private final NodePoolManager poolManager; + private final ExecutorService executorService; + private final Map containerCountMap; + private final Map processedNodeSet; + private final long startTime; + private ProgressStatus status; + private AtomicInteger nodeCount; + private AtomicInteger nodeProcessed; + private AtomicInteger containerProcessedCount; + private long maxWaitTime; + /** + * Constructs an pool that is being processed. + * @param maxWaitTime - Maximum wait time in milliseconds. + * @param pool - Pool that we are working against + * @param nodeManager - Nodemanager + * @param poolManager - pool manager + * @param executorService - Shared Executor service. + */ + InProgressPool(long maxWaitTime, PeriodicPool pool, + NodeManager nodeManager, NodePoolManager poolManager, + ExecutorService executorService) { + Preconditions.checkNotNull(pool); + Preconditions.checkNotNull(nodeManager); + Preconditions.checkNotNull(poolManager); + Preconditions.checkNotNull(executorService); + Preconditions.checkArgument(maxWaitTime > 0); + this.pool = pool; + this.nodeManager = nodeManager; + this.poolManager = poolManager; + this.executorService = executorService; + this.containerCountMap = new ConcurrentHashMap<>(); + this.processedNodeSet = new ConcurrentHashMap<>(); + this.maxWaitTime = maxWaitTime; + startTime = Time.monotonicNow(); + } + + /** + * Returns periodic pool. + * + * @return PeriodicPool + */ + public PeriodicPool getPool() { + return pool; + } + + /** + * We are done if we have got reports from all nodes or we have + * done waiting for the specified time. + * + * @return true if we are done, false otherwise. + */ + public boolean isDone() { + return (nodeCount.get() == nodeProcessed.get()) || + (this.startTime + this.maxWaitTime) > Time.monotonicNow(); + } + + /** + * Gets the number of containers processed. + * + * @return int + */ + public int getContainerProcessedCount() { + return containerProcessedCount.get(); + } + + /** + * Returns the start time in milliseconds. + * + * @return - Start Time. + */ + public long getStartTime() { + return startTime; + } + + /** + * Get the number of nodes in this pool. + * + * @return - node count + */ + public int getNodeCount() { + return nodeCount.get(); + } + + /** + * Get the number of nodes that we have already processed container reports + * from. + * + * @return - Processed count. + */ + public int getNodeProcessed() { + return nodeProcessed.get(); + } + + /** + * Returns the current status. + * + * @return Status + */ + public ProgressStatus getStatus() { + return status; + } + + /** + * Starts the reconciliation process for all the nodes in the pool. + */ + public void startReconciliation() { + List datanodeDetailsList = + this.poolManager.getNodes(pool.getPoolName()); + if (datanodeDetailsList.size() == 0) { + LOG.error("Datanode list for {} is Empty. Pool with no nodes ? ", + pool.getPoolName()); + this.status = ProgressStatus.Error; + return; + } + + nodeProcessed = new AtomicInteger(0); + containerProcessedCount = new AtomicInteger(0); + nodeCount = new AtomicInteger(0); + this.status = ProgressStatus.InProgress; + this.getPool().setLastProcessedTime(Time.monotonicNow()); + } + + /** + * Queues a container Report for handling. This is done in a worker thread + * since decoding a container report might be compute intensive . We don't + * want to block since we have asked for bunch of container reports + * from a set of datanodes. + * + * @param containerReport - ContainerReport + */ + public void handleContainerReport(DatanodeDetails datanodeDetails, + ContainerReportsProto containerReport) { + if (status == ProgressStatus.InProgress) { + executorService.submit(processContainerReport(datanodeDetails, + containerReport)); + } else { + LOG.debug("Cannot handle container report when the pool is in {} status.", + status); + } + } + + private Runnable processContainerReport(DatanodeDetails datanodeDetails, + ContainerReportsProto reports) { + return () -> { + if (processedNodeSet.computeIfAbsent(datanodeDetails.getUuid(), + (k) -> true)) { + nodeProcessed.incrementAndGet(); + LOG.debug("Total Nodes processed : {} Node Name: {} ", nodeProcessed, + datanodeDetails.getUuid()); + for (ContainerInfo info : reports.getReportsList()) { + containerProcessedCount.incrementAndGet(); + LOG.debug("Total Containers processed: {} Container Name: {}", + containerProcessedCount.get(), info.getContainerID()); + + // Update the container map with count + 1 if the key exists or + // update the map with 1. Since this is a concurrentMap the + // computation and update is atomic. + containerCountMap.merge(info.getContainerID(), 1, Integer::sum); + } + } + }; + } + + /** + * Filter the containers based on specific rules. + * + * @param predicate -- Predicate to filter by + * @return A list of map entries. + */ + public List> filterContainer( + Predicate> predicate) { + return containerCountMap.entrySet().stream() + .filter(predicate).collect(Collectors.toList()); + } + + /** + * Used only for testing, calling this will abort container report + * processing. This is very dangerous call and should not be made by any users + */ + @VisibleForTesting + public void setDoneProcessing() { + nodeProcessed.set(nodeCount.get()); + } + + /** + * Returns the pool name. + * + * @return Name of the pool. + */ + String getPoolName() { + return pool.getPoolName(); + } + + public void finalizeReconciliation() { + status = ProgressStatus.Done; + //TODO: Add finalizing logic. This is where actual reconciliation happens. + } + + /** + * Current status of the computing replication status. + */ + public enum ProgressStatus { + InProgress, Done, Error + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java new file mode 100644 index 0000000000..ef28aa78d0 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.container.replication; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Periodic pool is a pool with a time stamp, this allows us to process pools + * based on a cyclic clock. + */ +public class PeriodicPool implements Comparable { + private final String poolName; + private long lastProcessedTime; + private AtomicLong totalProcessedCount; + + /** + * Constructs a periodic pool. + * + * @param poolName - Name of the pool + */ + public PeriodicPool(String poolName) { + this.poolName = poolName; + lastProcessedTime = 0; + totalProcessedCount = new AtomicLong(0); + } + + /** + * Get pool Name. + * @return PoolName + */ + public String getPoolName() { + return poolName; + } + + /** + * Compares this object with the specified object for order. Returns a + * negative integer, zero, or a positive integer as this object is less + * than, equal to, or greater than the specified object. + * + * @param o the object to be compared. + * @return a negative integer, zero, or a positive integer as this object is + * less than, equal to, or greater than the specified object. + * @throws NullPointerException if the specified object is null + * @throws ClassCastException if the specified object's type prevents it + * from being compared to this object. + */ + @Override + public int compareTo(PeriodicPool o) { + return Long.compare(this.lastProcessedTime, o.lastProcessedTime); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PeriodicPool that = (PeriodicPool) o; + + return poolName.equals(that.poolName); + } + + @Override + public int hashCode() { + return poolName.hashCode(); + } + + /** + * Returns the Total Times we have processed this pool. + * + * @return processed count. + */ + public long getTotalProcessedCount() { + return totalProcessedCount.get(); + } + + /** + * Gets the last time we processed this pool. + * @return time in milliseconds + */ + public long getLastProcessedTime() { + return this.lastProcessedTime; + } + + + /** + * Sets the last processed time. + * + * @param lastProcessedTime - Long in milliseconds. + */ + + public void setLastProcessedTime(long lastProcessedTime) { + this.lastProcessedTime = lastProcessedTime; + } + + /* + * Increments the total processed count. + */ + public void incTotalProcessedCount() { + this.totalProcessedCount.incrementAndGet(); + } +} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java new file mode 100644 index 0000000000..7bbe2efe57 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.container.replication; +/* + This package contains routines that manage replication of a container. This + relies on container reports to understand the replication level of a + container - UnderReplicated, Replicated, OverReplicated -- and manages the + replication level based on that. + */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 72d7e946cc..4392633b16 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -123,6 +123,12 @@ public interface NodeManager extends StorageContainerNodeProtocol, */ SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails); + /** + * Returns the NodePoolManager associated with the NodeManager. + * @return NodePoolManager + */ + NodePoolManager getNodePoolManager(); + /** * Wait for the heartbeat is processed by NodeManager. * @return true if heartbeat has been processed. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java new file mode 100644 index 0000000000..46faf9ca4d --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.node; + +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; + +/** + * Interface that defines SCM NodePoolManager. + */ +public interface NodePoolManager extends Closeable { + + /** + * Add a node to a node pool. + * @param pool - name of the node pool. + * @param node - data node. + */ + void addNode(String pool, DatanodeDetails node) throws IOException; + + /** + * Remove a node from a node pool. + * @param pool - name of the node pool. + * @param node - data node. + * @throws SCMException + */ + void removeNode(String pool, DatanodeDetails node) + throws SCMException; + + /** + * Get a list of known node pools. + * @return a list of known node pool names or an empty list if not node pool + * is defined. + */ + List getNodePools(); + + /** + * Get all nodes of a node pool given the name of the node pool. + * @param pool - name of the node pool. + * @return a list of datanode ids or an empty list if the node pool was not + * found. + */ + List getNodes(String pool); + + /** + * Get the node pool name if the node has been added to a node pool. + * @param datanodeDetails - datanode ID. + * @return node pool name if it has been assigned. + * null if the node has not been assigned to any node pool yet. + */ + String getNodePool(DatanodeDetails datanodeDetails) throws SCMException; +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index adca8eae0c..fc8b0137f3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -53,6 +53,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import com.google.protobuf.GeneratedMessage; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -158,6 +159,7 @@ public class SCMNodeManager private ObjectName nmInfoBean; // Node pool manager. + private final SCMNodePoolManager nodePoolManager; private final StorageContainerManager scmManager; public static final Event DATANODE_COMMAND = @@ -208,6 +210,7 @@ public SCMNodeManager(OzoneConfiguration conf, String clusterID, registerMXBean(); + this.nodePoolManager = new SCMNodePoolManager(conf); this.scmManager = scmManager; } @@ -679,6 +682,7 @@ private void updateNodeStat(UUID dnId, NodeReportProto nodeReport) { @Override public void close() throws IOException { unregisterMXBean(); + nodePoolManager.close(); executorService.shutdown(); try { if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { @@ -756,6 +760,20 @@ public RegisteredCommand register( LOG.info("Leaving startup chill mode."); } + // TODO: define node pool policy for non-default node pool. + // For now, all nodes are added to the "DefaultNodePool" upon registration + // if it has not been added to any node pool yet. + try { + if (nodePoolManager.getNodePool(datanodeDetails) == null) { + nodePoolManager.addNode(SCMNodePoolManager.DEFAULT_NODEPOOL, + datanodeDetails); + } + } catch (IOException e) { + // TODO: make sure registration failure is handled correctly. + return RegisteredCommand.newBuilder() + .setErrorCode(ErrorCode.errorNodeNotPermitted) + .build(); + } // Updating Node Report, as registration is successful updateNodeStat(datanodeDetails.getUuid(), nodeReport); LOG.info("Data node with ID: {} Registered.", @@ -841,6 +859,11 @@ public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) { return new SCMNodeMetric(nodeStats.get(datanodeDetails.getUuid())); } + @Override + public NodePoolManager getNodePoolManager() { + return nodePoolManager; + } + @Override public Map getNodeCount() { Map nodeCountMap = new HashMap(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java new file mode 100644 index 0000000000..faf330ea1d --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java @@ -0,0 +1,269 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.node; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.utils.MetadataStoreBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.stream.Collectors; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_DB_CACHE_SIZE_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_DB_CACHE_SIZE_MB; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes + .FAILED_TO_FIND_NODE_IN_POOL; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes + .FAILED_TO_LOAD_NODEPOOL; +import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; +import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; + +/** + * SCM node pool manager that manges node pools. + */ +public final class SCMNodePoolManager implements NodePoolManager { + + private static final Logger LOG = + LoggerFactory.getLogger(SCMNodePoolManager.class); + private static final List EMPTY_NODE_LIST = + new ArrayList<>(); + private static final List EMPTY_NODEPOOL_LIST = new ArrayList<>(); + public static final String DEFAULT_NODEPOOL = "DefaultNodePool"; + + // DB that saves the node to node pool mapping. + private MetadataStore nodePoolStore; + + // In-memory node pool to nodes mapping + private HashMap> nodePools; + + // Read-write lock for nodepool operations + private ReadWriteLock lock; + + /** + * Construct SCMNodePoolManager class that manages node to node pool mapping. + * @param conf - configuration. + * @throws IOException + */ + public SCMNodePoolManager(final OzoneConfiguration conf) + throws IOException { + final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, + OZONE_SCM_DB_CACHE_SIZE_DEFAULT); + File metaDir = getOzoneMetaDirPath(conf); + String scmMetaDataDir = metaDir.getPath(); + File nodePoolDBPath = new File(scmMetaDataDir, NODEPOOL_DB); + nodePoolStore = MetadataStoreBuilder.newBuilder() + .setConf(conf) + .setDbFile(nodePoolDBPath) + .setCacheSize(cacheSize * OzoneConsts.MB) + .build(); + nodePools = new HashMap<>(); + lock = new ReentrantReadWriteLock(); + init(); + } + + /** + * Initialize the in-memory store based on persist store from level db. + * No lock is needed as init() is only invoked by constructor. + * @throws SCMException + */ + private void init() throws SCMException { + try { + nodePoolStore.iterate(null, (key, value) -> { + try { + DatanodeDetails nodeId = DatanodeDetails.getFromProtoBuf( + HddsProtos.DatanodeDetailsProto.PARSER.parseFrom(key)); + String poolName = DFSUtil.bytes2String(value); + + Set nodePool = null; + if (nodePools.containsKey(poolName)) { + nodePool = nodePools.get(poolName); + } else { + nodePool = new HashSet<>(); + nodePools.put(poolName, nodePool); + } + nodePool.add(nodeId); + if (LOG.isDebugEnabled()) { + LOG.debug("Adding node: {} to node pool: {}", + nodeId, poolName); + } + } catch (IOException e) { + LOG.warn("Can't add a datanode to node pool, continue next..."); + } + return true; + }); + } catch (IOException e) { + LOG.error("Loading node pool error " + e); + throw new SCMException("Failed to load node pool", + FAILED_TO_LOAD_NODEPOOL); + } + } + + /** + * Add a datanode to a node pool. + * @param pool - name of the node pool. + * @param node - name of the datanode. + */ + @Override + public void addNode(final String pool, final DatanodeDetails node) + throws IOException { + Preconditions.checkNotNull(pool, "pool name is null"); + Preconditions.checkNotNull(node, "node is null"); + lock.writeLock().lock(); + try { + // add to the persistent store + nodePoolStore.put(node.getProtoBufMessage().toByteArray(), + DFSUtil.string2Bytes(pool)); + + // add to the in-memory store + Set nodePool = null; + if (nodePools.containsKey(pool)) { + nodePool = nodePools.get(pool); + } else { + nodePool = new HashSet(); + nodePools.put(pool, nodePool); + } + nodePool.add(node); + } finally { + lock.writeLock().unlock(); + } + } + + /** + * Remove a datanode from a node pool. + * @param pool - name of the node pool. + * @param node - datanode id. + * @throws SCMException + */ + @Override + public void removeNode(final String pool, final DatanodeDetails node) + throws SCMException { + Preconditions.checkNotNull(pool, "pool name is null"); + Preconditions.checkNotNull(node, "node is null"); + lock.writeLock().lock(); + try { + // Remove from the persistent store + byte[] kName = node.getProtoBufMessage().toByteArray(); + byte[] kData = nodePoolStore.get(kName); + if (kData == null) { + throw new SCMException(String.format("Unable to find node %s from" + + " pool %s in DB.", DFSUtil.bytes2String(kName), pool), + FAILED_TO_FIND_NODE_IN_POOL); + } + nodePoolStore.delete(kName); + + // Remove from the in-memory store + if (nodePools.containsKey(pool)) { + Set nodePool = nodePools.get(pool); + nodePool.remove(node); + } else { + throw new SCMException(String.format("Unable to find node %s from" + + " pool %s in MAP.", DFSUtil.bytes2String(kName), pool), + FAILED_TO_FIND_NODE_IN_POOL); + } + } catch (IOException e) { + throw new SCMException("Failed to remove node " + node.toString() + + " from node pool " + pool, e, + SCMException.ResultCodes.IO_EXCEPTION); + } finally { + lock.writeLock().unlock(); + } + } + + /** + * Get all the node pools. + * @return all the node pools. + */ + @Override + public List getNodePools() { + lock.readLock().lock(); + try { + if (!nodePools.isEmpty()) { + return nodePools.keySet().stream().collect(Collectors.toList()); + } else { + return EMPTY_NODEPOOL_LIST; + } + } finally { + lock.readLock().unlock(); + } + } + + /** + * Get all datanodes of a specific node pool. + * @param pool - name of the node pool. + * @return all datanodes of the specified node pool. + */ + @Override + public List getNodes(final String pool) { + Preconditions.checkNotNull(pool, "pool name is null"); + if (nodePools.containsKey(pool)) { + return nodePools.get(pool).stream().collect(Collectors.toList()); + } else { + return EMPTY_NODE_LIST; + } + } + + /** + * Get the node pool name if the node has been added to a node pool. + * @param datanodeDetails - datanode ID. + * @return node pool name if it has been assigned. + * null if the node has not been assigned to any node pool yet. + * TODO: Put this in a in-memory map if performance is an issue. + */ + @Override + public String getNodePool(final DatanodeDetails datanodeDetails) + throws SCMException { + Preconditions.checkNotNull(datanodeDetails, "node is null"); + try { + byte[] result = nodePoolStore.get( + datanodeDetails.getProtoBufMessage().toByteArray()); + return result == null ? null : DFSUtil.bytes2String(result); + } catch (IOException e) { + throw new SCMException("Failed to get node pool for node " + + datanodeDetails.toString(), e, + SCMException.ResultCodes.IO_EXCEPTION); + } + } + + /** + * Close node pool level db store. + * @throws IOException + */ + @Override + public void close() throws IOException { + nodePoolStore.close(); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 80b5d6e182..8c59462b40 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodePoolManager; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -272,6 +273,11 @@ public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) { return new SCMNodeMetric(nodeMetricMap.get(datanodeDetails.getUuid())); } + @Override + public NodePoolManager getNodePoolManager() { + return Mockito.mock(NodePoolManager.class); + } + /** * Used for testing. * diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java new file mode 100644 index 0000000000..8f412dedda --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java @@ -0,0 +1,160 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.node; + +import org.apache.commons.collections.ListUtils; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.container.placement.algorithms + .ContainerPlacementPolicy; +import org.apache.hadoop.hdds.scm.container.placement.algorithms + .SCMContainerPlacementCapacity; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.test.PathUtils; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * Test for SCM node pool manager. + */ +public class TestSCMNodePoolManager { + private static final Logger LOG = + LoggerFactory.getLogger(TestSCMNodePoolManager.class); + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + private final File testDir = PathUtils.getTestDir( + TestSCMNodePoolManager.class); + + SCMNodePoolManager createNodePoolManager(OzoneConfiguration conf) + throws IOException { + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + testDir.getAbsolutePath()); + conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, + SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); + return new SCMNodePoolManager(conf); + } + + /** + * Test default node pool. + * + * @throws IOException + */ + @Test + public void testDefaultNodePool() throws IOException { + OzoneConfiguration conf = new OzoneConfiguration(); + try { + final String defaultPool = "DefaultPool"; + NodePoolManager npMgr = createNodePoolManager(conf); + + final int nodeCount = 4; + final List nodes = TestUtils + .getListOfDatanodeDetails(nodeCount); + assertEquals(0, npMgr.getNodePools().size()); + for (DatanodeDetails node: nodes) { + npMgr.addNode(defaultPool, node); + } + List nodesRetrieved = npMgr.getNodes(defaultPool); + assertEquals(nodeCount, nodesRetrieved.size()); + assertTwoDatanodeListsEqual(nodes, nodesRetrieved); + + DatanodeDetails nodeRemoved = nodes.remove(2); + npMgr.removeNode(defaultPool, nodeRemoved); + List nodesAfterRemove = npMgr.getNodes(defaultPool); + assertTwoDatanodeListsEqual(nodes, nodesAfterRemove); + + List nonExistSet = npMgr.getNodes("NonExistSet"); + assertEquals(0, nonExistSet.size()); + } finally { + FileUtil.fullyDelete(testDir); + } + } + + + /** + * Test default node pool reload. + * + * @throws IOException + */ + @Test + public void testDefaultNodePoolReload() throws IOException { + OzoneConfiguration conf = new OzoneConfiguration(); + final String defaultPool = "DefaultPool"; + final int nodeCount = 4; + final List nodes = TestUtils + .getListOfDatanodeDetails(nodeCount); + + try { + try { + SCMNodePoolManager npMgr = createNodePoolManager(conf); + assertEquals(0, npMgr.getNodePools().size()); + for (DatanodeDetails node : nodes) { + npMgr.addNode(defaultPool, node); + } + List nodesRetrieved = npMgr.getNodes(defaultPool); + assertEquals(nodeCount, nodesRetrieved.size()); + assertTwoDatanodeListsEqual(nodes, nodesRetrieved); + npMgr.close(); + } finally { + LOG.info("testDefaultNodePoolReload: Finish adding nodes to pool" + + " and close."); + } + + // try reload with a new NodePoolManager instance + try { + SCMNodePoolManager npMgr = createNodePoolManager(conf); + List nodesRetrieved = npMgr.getNodes(defaultPool); + assertEquals(nodeCount, nodesRetrieved.size()); + assertTwoDatanodeListsEqual(nodes, nodesRetrieved); + } finally { + LOG.info("testDefaultNodePoolReload: Finish reloading node pool."); + } + } finally { + FileUtil.fullyDelete(testDir); + } + } + + /** + * Compare and verify that two datanode lists are equal. + * @param list1 - datanode list 1. + * @param list2 - datanode list 2. + */ + private void assertTwoDatanodeListsEqual(List list1, + List list2) { + assertEquals(list1.size(), list2.size()); + Collections.sort(list1); + Collections.sort(list2); + assertTrue(ListUtils.isEqualList(list1, list2)); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index 1a4dcd7ad2..072d821247 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.node.CommandQueue; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodePoolManager; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -200,6 +201,10 @@ public SCMNodeMetric getNodeStat(DatanodeDetails dd) { return null; } + @Override + public NodePoolManager getNodePoolManager() { + return Mockito.mock(NodePoolManager.class); + } /** * Wait for the heartbeat is processed by NodeManager. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java new file mode 100644 index 0000000000..ffcd752e84 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java @@ -0,0 +1,133 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.container.testutils; + +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodePoolManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * Pool Manager replication mock. + */ +public class ReplicationNodePoolManagerMock implements NodePoolManager { + + private final Map nodeMemberShip; + + /** + * A node pool manager for testing. + */ + public ReplicationNodePoolManagerMock() { + nodeMemberShip = new HashMap<>(); + } + + /** + * Add a node to a node pool. + * + * @param pool - name of the node pool. + * @param node - data node. + */ + @Override + public void addNode(String pool, DatanodeDetails node) { + nodeMemberShip.put(node, pool); + } + + /** + * Remove a node from a node pool. + * + * @param pool - name of the node pool. + * @param node - data node. + * @throws SCMException + */ + @Override + public void removeNode(String pool, DatanodeDetails node) + throws SCMException { + nodeMemberShip.remove(node); + + } + + /** + * Get a list of known node pools. + * + * @return a list of known node pool names or an empty list if not node pool + * is defined. + */ + @Override + public List getNodePools() { + Set poolSet = new HashSet<>(); + for (Map.Entry entry : nodeMemberShip.entrySet()) { + poolSet.add(entry.getValue()); + } + return new ArrayList<>(poolSet); + + } + + /** + * Get all nodes of a node pool given the name of the node pool. + * + * @param pool - name of the node pool. + * @return a list of datanode ids or an empty list if the node pool was not + * found. + */ + @Override + public List getNodes(String pool) { + Set datanodeSet = new HashSet<>(); + for (Map.Entry entry : nodeMemberShip.entrySet()) { + if (entry.getValue().equals(pool)) { + datanodeSet.add(entry.getKey()); + } + } + return new ArrayList<>(datanodeSet); + } + + /** + * Get the node pool name if the node has been added to a node pool. + * + * @param datanodeDetails DatanodeDetails. + * @return node pool name if it has been assigned. null if the node has not + * been assigned to any node pool yet. + */ + @Override + public String getNodePool(DatanodeDetails datanodeDetails) { + return nodeMemberShip.get(datanodeDetails); + } + + /** + * Closes this stream and releases any system resources associated + * with it. If the stream is already closed then invoking this + * method has no effect. + *

    + *

    As noted in {@link AutoCloseable#close()}, cases where the + * close may fail require careful attention. It is strongly advised + * to relinquish the underlying resources and to internally + * mark the {@code Closeable} as closed, prior to throwing + * the {@code IOException}. + * + * @throws IOException if an I/O error occurs + */ + @Override + public void close() throws IOException { + + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java index b4ed2b12c2..4d70af84a2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java @@ -51,9 +51,12 @@ import java.util.HashMap; import java.util.UUID; +import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB; import static org.apache.hadoop.ozone.OzoneConsts.KB; +import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; /** * This class tests the CLI that transforms container into SQLite DB files. @@ -173,6 +176,34 @@ public void shutdown() throws InterruptedException { } } + @Test + public void testConvertNodepoolDB() throws Exception { + String dbOutPath = GenericTestUtils.getTempPath( + UUID.randomUUID() + "/out_sql.db"); + String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); + String dbPath = dbRootPath + "/" + NODEPOOL_DB; + String[] args = {"-p", dbPath, "-o", dbOutPath}; + + cli.run(args); + + // verify the sqlite db + HashMap expectedPool = new HashMap<>(); + for (DatanodeDetails dnid : nodeManager.getAllNodes()) { + expectedPool.put(dnid.getUuidString(), "DefaultNodePool"); + } + Connection conn = connectDB(dbOutPath); + String sql = "SELECT * FROM nodePool"; + ResultSet rs = executeQuery(conn, sql); + while(rs.next()) { + String datanodeUUID = rs.getString("datanodeUUID"); + String poolName = rs.getString("poolName"); + assertTrue(expectedPool.remove(datanodeUUID).equals(poolName)); + } + assertEquals(0, expectedPool.size()); + + Files.delete(Paths.get(dbOutPath)); + } + @Test public void testConvertContainerDB() throws Exception { String dbOutPath = GenericTestUtils.getTempPath( diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java index edc0d7b597..2bd43fb93a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java @@ -19,6 +19,7 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; +import com.google.protobuf.ByteString; import org.apache.commons.cli.BasicParser; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; @@ -59,11 +60,13 @@ import java.util.HashSet; import java.util.Set; +import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX; import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.KSM_USER_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.KSM_BUCKET_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.KSM_VOLUME_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB; /** @@ -108,6 +111,15 @@ public class SQLCLI extends Configured implements Tool { private static final String INSERT_CONTAINER_MEMBERS = "INSERT INTO containerMembers (containerName, datanodeUUID) " + "VALUES (\"%s\", \"%s\")"; + // for nodepool.db + private static final String CREATE_NODE_POOL = + "CREATE TABLE nodePool (" + + "datanodeUUID TEXT NOT NULL," + + "poolName TEXT NOT NULL," + + "PRIMARY KEY(datanodeUUID, poolName))"; + private static final String INSERT_NODE_POOL = + "INSERT INTO nodePool (datanodeUUID, poolName) " + + "VALUES (\"%s\", \"%s\")"; // and reuse CREATE_DATANODE_INFO and INSERT_DATANODE_INFO // for openContainer.db private static final String CREATE_OPEN_CONTAINER = @@ -273,6 +285,9 @@ public int run(String[] args) throws Exception { if (dbName.toString().endsWith(CONTAINER_DB_SUFFIX)) { LOG.info("Converting container DB"); convertContainerDB(dbPath, outPath); + } else if (dbName.toString().equals(NODEPOOL_DB)) { + LOG.info("Converting node pool DB"); + convertNodePoolDB(dbPath, outPath); } else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) { LOG.info("Converting open container DB"); convertOpenContainerDB(dbPath, outPath); @@ -528,7 +543,66 @@ private void insertContainerDB(Connection conn, long containerID, } LOG.info("Insertion completed."); } + /** + * Converts nodePool.db to sqlite. The schema of sql db: + * two tables, nodePool and datanodeInfo (the same datanode Info as for + * container.db). + * + * nodePool + * --------------------------------------------------------- + * datanodeUUID* | poolName* + * --------------------------------------------------------- + * + * datanodeInfo: + * --------------------------------------------------------- + * hostname | datanodeUUid* | xferPort | ipcPort + * --------------------------------------------------------- + * + * -------------------------------- + * |containerPort + * -------------------------------- + * + * @param dbPath path to container db. + * @param outPath path to output sqlite + * @throws IOException throws exception. + */ + private void convertNodePoolDB(Path dbPath, Path outPath) throws Exception { + LOG.info("Create table for sql node pool db."); + File dbFile = dbPath.toFile(); + try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder() + .setConf(conf).setDbFile(dbFile).build(); + Connection conn = connectDB(outPath.toString())) { + executeSQL(conn, CREATE_NODE_POOL); + executeSQL(conn, CREATE_DATANODE_INFO); + dbStore.iterate(null, (key, value) -> { + DatanodeDetails nodeId = DatanodeDetails + .getFromProtoBuf(HddsProtos.DatanodeDetailsProto + .PARSER.parseFrom(key)); + String blockPool = DFSUtil.bytes2String(value); + try { + insertNodePoolDB(conn, blockPool, nodeId); + return true; + } catch (SQLException e) { + throw new IOException(e); + } + }); + } + } + + private void insertNodePoolDB(Connection conn, String blockPool, + DatanodeDetails datanodeDetails) throws SQLException { + String insertNodePool = String.format(INSERT_NODE_POOL, + datanodeDetails.getUuidString(), blockPool); + executeSQL(conn, insertNodePool); + + String insertDatanodeDetails = String + .format(INSERT_DATANODE_INFO, datanodeDetails.getHostName(), + datanodeDetails.getUuidString(), datanodeDetails.getIpAddress(), + datanodeDetails.getPort(DatanodeDetails.Port.Name.STANDALONE) + .getValue()); + executeSQL(conn, insertDatanodeDetails); + } /** * Convert openContainer.db to sqlite db file. This is rather simple db, From 56a4cdb9804daea7164155a5b1b4eba44a11b705 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Wed, 27 Jun 2018 13:28:00 -0700 Subject: [PATCH 62/70] HDDS-194. Remove NodePoolManager and node pool handling from SCM. Contributed by Elek Marton. --- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 26 -- .../org/apache/hadoop/ozone/OzoneConsts.java | 1 - .../src/main/resources/ozone-default.xml | 47 --- .../hdds/scm/container/ContainerMapping.java | 10 +- .../replication/ContainerSupervisor.java | 340 ------------------ .../container/replication/InProgressPool.java | 255 ------------- .../container/replication/PeriodicPool.java | 119 ------ .../container/replication/package-info.java | 23 -- .../hadoop/hdds/scm/node/NodeManager.java | 6 - .../hadoop/hdds/scm/node/NodePoolManager.java | 71 ---- .../hadoop/hdds/scm/node/SCMNodeManager.java | 23 -- .../hdds/scm/node/SCMNodePoolManager.java | 269 -------------- .../hdds/scm/container/MockNodeManager.java | 6 - .../hdds/scm/node/TestSCMNodePoolManager.java | 160 --------- .../testutils/ReplicationNodeManagerMock.java | 5 - .../ReplicationNodePoolManagerMock.java | 133 ------- .../hadoop/ozone/scm/TestContainerSQLCli.java | 31 -- .../apache/hadoop/ozone/scm/cli/SQLCLI.java | 74 ---- 18 files changed, 3 insertions(+), 1596 deletions(-) delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 85407e65ce..df6fbf0c75 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -243,32 +243,6 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s"; - /** - * Don't start processing a pool if we have not had a minimum number of - * seconds from the last processing. - */ - public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL = - "ozone.scm.container.report.processing.interval"; - public static final String - OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s"; - - /** - * This determines the total number of pools to be processed in parallel. - */ - public static final String OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS = - "ozone.scm.max.nodepool.processing.threads"; - public static final int OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT = 1; - /** - * These 2 settings control the number of threads in executor pool and time - * outs for thw container reports from all nodes. - */ - public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS = - "ozone.scm.max.container.report.threads"; - public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100; - public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT = - "ozone.scm.container.reports.wait.timeout"; - public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT = - "5m"; public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY = "ozone.scm.block.deletion.max.retry"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index c40dc8e4ee..08a5ffdb87 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -91,7 +91,6 @@ public final class OzoneConsts { public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX; public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX; public static final String BLOCK_DB = "block.db"; - public static final String NODEPOOL_DB = "nodepool.db"; public static final String OPEN_CONTAINERS_DB = "openContainers.db"; public static final String DELETED_BLOCK_DB = "deletedBlock.db"; public static final String KSM_DB_NAME = "ksm.db"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 7a91610c65..25365c8d9d 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -571,25 +571,6 @@ allocation. - - ozone.scm.container.report.processing.interval - 60s - OZONE, PERFORMANCE - Time interval for scm to process container reports - for a node pool. Scm handles node pool reports in a cyclic clock - manner, it fetches pools periodically with this time interval. - - - - ozone.scm.container.reports.wait.timeout - 300s - OZONE, PERFORMANCE, MANAGEMENT - Maximum time to wait in seconds for processing all container - reports from - a node pool. It determines the timeout for a - node pool report. - - ozone.scm.container.size.gb 5 @@ -792,17 +773,6 @@ The keytab file for Kerberos authentication in SCM. - - ozone.scm.max.container.report.threads - 100 - OZONE, PERFORMANCE - - Maximum number of threads to process container reports in scm. - Each container report from a data node is processed by scm in a worker - thread, fetched from a thread pool. This property is used to control the - maximum size of the thread pool. - - ozone.scm.max.hb.count.to.process 5000 @@ -814,14 +784,6 @@ for more info. - - ozone.scm.max.nodepool.processing.threads - 1 - OZONE, MANAGEMENT, PERFORMANCE - - Number of node pools to process in parallel. - - ozone.scm.names @@ -843,15 +805,6 @@ see ozone.scm.heartbeat.thread.interval before changing this value. - - ozone.scm.max.nodepool.processing.threads - 1 - OZONE, SCM - - Controls the number of node pools that can be processed in parallel by - Container Supervisor. - - ozone.trace.enabled false diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java index b563e90e76..9fd30f2ad0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.replication.ContainerSupervisor; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; @@ -80,7 +79,6 @@ public class ContainerMapping implements Mapping { private final PipelineSelector pipelineSelector; private final ContainerStateManager containerStateManager; private final LeaseManager containerLeaseManager; - private final ContainerSupervisor containerSupervisor; private final float containerCloseThreshold; private final ContainerCloser closer; private final long size; @@ -127,9 +125,7 @@ public ContainerMapping( OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024; this.containerStateManager = new ContainerStateManager(conf, this); - this.containerSupervisor = - new ContainerSupervisor(conf, nodeManager, - nodeManager.getNodePoolManager()); + this.containerCloseThreshold = conf.getFloat( ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD, ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT); @@ -407,8 +403,8 @@ public void processContainerReports(DatanodeDetails datanodeDetails, throws IOException { List containerInfos = reports.getReportsList(); - containerSupervisor.handleContainerReport(datanodeDetails, reports); - for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState : + + for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState : containerInfos) { byte[] dbKey = Longs.toByteArray(datanodeState.getContainerID()); lock.lock(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java deleted file mode 100644 index 5bd05746bf..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java +++ /dev/null @@ -1,340 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.IOException; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.PriorityQueue; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import static com.google.common.util.concurrent.Uninterruptibles - .sleepUninterruptibly; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT; - -/** - * This class takes a set of container reports that belong to a pool and then - * computes the replication levels for each container. - */ -public class ContainerSupervisor implements Closeable { - public static final Logger LOG = - LoggerFactory.getLogger(ContainerSupervisor.class); - - private final NodePoolManager poolManager; - private final HashSet poolNames; - private final PriorityQueue poolQueue; - private final NodeManager nodeManager; - private final long containerProcessingLag; - private final AtomicBoolean runnable; - private final ExecutorService executorService; - private final long maxPoolWait; - private long poolProcessCount; - private final List inProgressPoolList; - private final AtomicInteger threadFaultCount; - private final int inProgressPoolMaxCount; - - private final ReadWriteLock inProgressPoolListLock; - - /** - * Returns the number of times we have processed pools. - * @return long - */ - public long getPoolProcessCount() { - return poolProcessCount; - } - - - /** - * Constructs a class that computes Replication Levels. - * - * @param conf - OzoneConfiguration - * @param nodeManager - Node Manager - * @param poolManager - Pool Manager - */ - public ContainerSupervisor(Configuration conf, NodeManager nodeManager, - NodePoolManager poolManager) { - Preconditions.checkNotNull(poolManager); - Preconditions.checkNotNull(nodeManager); - this.containerProcessingLag = - conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL, - OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT, - TimeUnit.SECONDS - ) * 1000; - int maxContainerReportThreads = - conf.getInt(OZONE_SCM_MAX_CONTAINER_REPORT_THREADS, - OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT - ); - this.maxPoolWait = - conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT, - OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - this.inProgressPoolMaxCount = conf.getInt( - OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS, - OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT); - this.poolManager = poolManager; - this.nodeManager = nodeManager; - this.poolNames = new HashSet<>(); - this.poolQueue = new PriorityQueue<>(); - this.runnable = new AtomicBoolean(true); - this.threadFaultCount = new AtomicInteger(0); - this.executorService = newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Container Reports Processing Thread - %d") - .build(), maxContainerReportThreads); - this.inProgressPoolList = new LinkedList<>(); - this.inProgressPoolListLock = new ReentrantReadWriteLock(); - - initPoolProcessThread(); - } - - private ExecutorService newCachedThreadPool(ThreadFactory threadFactory, - int maxThreads) { - return new HadoopThreadPoolExecutor(0, maxThreads, 60L, TimeUnit.SECONDS, - new LinkedBlockingQueue<>(), threadFactory); - } - - /** - * Returns the number of pools that are under process right now. - * @return int - Number of pools that are in process. - */ - public int getInProgressPoolCount() { - return inProgressPoolList.size(); - } - - /** - * Exits the background thread. - */ - public void setExit() { - this.runnable.set(false); - } - - /** - * Adds or removes pools from names that we need to process. - * - * There are two different cases that we need to process. - * The case where some pools are being added and some times we have to - * handle cases where pools are removed. - */ - private void refreshPools() { - List pools = this.poolManager.getNodePools(); - if (pools != null) { - - HashSet removedPools = - computePoolDifference(this.poolNames, new HashSet<>(pools)); - - HashSet addedPools = - computePoolDifference(new HashSet<>(pools), this.poolNames); - // TODO: Support remove pool API in pool manager so that this code - // path can be tested. This never happens in the current code base. - for (String poolName : removedPools) { - for (PeriodicPool periodicPool : poolQueue) { - if (periodicPool.getPoolName().compareTo(poolName) == 0) { - poolQueue.remove(periodicPool); - } - } - } - // Remove the pool names that we have in the list. - this.poolNames.removeAll(removedPools); - - for (String poolName : addedPools) { - poolQueue.add(new PeriodicPool(poolName)); - } - - // Add to the pool names we are tracking. - poolNames.addAll(addedPools); - } - - } - - /** - * Handle the case where pools are added. - * - * @param newPools - New Pools list - * @param oldPool - oldPool List. - */ - private HashSet computePoolDifference(HashSet newPools, - Set oldPool) { - Preconditions.checkNotNull(newPools); - Preconditions.checkNotNull(oldPool); - HashSet newSet = new HashSet<>(newPools); - newSet.removeAll(oldPool); - return newSet; - } - - private void initPoolProcessThread() { - - /* - * Task that runs to check if we need to start a pool processing job. - * if so we create a pool reconciliation job and find out of all the - * expected containers are on the nodes. - */ - Runnable processPools = () -> { - while (runnable.get()) { - // Make sure that we don't have any new pools. - refreshPools(); - while (inProgressPoolList.size() < inProgressPoolMaxCount) { - PeriodicPool pool = poolQueue.poll(); - if (pool != null) { - if (pool.getLastProcessedTime() + this.containerProcessingLag > - Time.monotonicNow()) { - LOG.debug("Not within the time window for processing: {}", - pool.getPoolName()); - // we might over sleep here, not a big deal. - sleepUninterruptibly(this.containerProcessingLag, - TimeUnit.MILLISECONDS); - } - LOG.debug("Adding pool {} to container processing queue", - pool.getPoolName()); - InProgressPool inProgressPool = new InProgressPool(maxPoolWait, - pool, this.nodeManager, this.poolManager, this.executorService); - inProgressPool.startReconciliation(); - inProgressPoolListLock.writeLock().lock(); - try { - inProgressPoolList.add(inProgressPool); - } finally { - inProgressPoolListLock.writeLock().unlock(); - } - poolProcessCount++; - } else { - break; - } - } - sleepUninterruptibly(this.maxPoolWait, TimeUnit.MILLISECONDS); - inProgressPoolListLock.readLock().lock(); - try { - for (InProgressPool inProgressPool : inProgressPoolList) { - inProgressPool.finalizeReconciliation(); - poolQueue.add(inProgressPool.getPool()); - } - } finally { - inProgressPoolListLock.readLock().unlock(); - } - inProgressPoolListLock.writeLock().lock(); - try { - inProgressPoolList.clear(); - } finally { - inProgressPoolListLock.writeLock().unlock(); - } - } - }; - - // We will have only one thread for pool processing. - Thread poolProcessThread = new Thread(processPools); - poolProcessThread.setDaemon(true); - poolProcessThread.setName("Pool replica thread"); - poolProcessThread.setUncaughtExceptionHandler((Thread t, Throwable e) -> { - // Let us just restart this thread after logging a critical error. - // if this thread is not running we cannot handle commands from SCM. - LOG.error("Critical Error : Pool replica thread encountered an " + - "error. Thread: {} Error Count : {}", t.toString(), e, - threadFaultCount.incrementAndGet()); - poolProcessThread.start(); - // TODO : Add a config to restrict how many times we will restart this - // thread in a single session. - }); - poolProcessThread.start(); - } - - /** - * Adds a container report to appropriate inProgress Pool. - * @param containerReport -- Container report for a specific container from - * a datanode. - */ - public void handleContainerReport(DatanodeDetails datanodeDetails, - ContainerReportsProto containerReport) { - inProgressPoolListLock.readLock().lock(); - try { - String poolName = poolManager.getNodePool(datanodeDetails); - for (InProgressPool ppool : inProgressPoolList) { - if (ppool.getPoolName().equalsIgnoreCase(poolName)) { - ppool.handleContainerReport(datanodeDetails, containerReport); - return; - } - } - // TODO: Decide if we can do anything else with this report. - LOG.debug("Discarding the container report for pool {}. " + - "That pool is not currently in the pool reconciliation process." + - " Container Name: {}", poolName, datanodeDetails); - } catch (SCMException e) { - LOG.warn("Skipping processing container report from datanode {}, " - + "cause: failed to get the corresponding node pool", - datanodeDetails.toString(), e); - } finally { - inProgressPoolListLock.readLock().unlock(); - } - } - - /** - * Get in process pool list, used for testing. - * @return List of InProgressPool - */ - @VisibleForTesting - public List getInProcessPoolList() { - return inProgressPoolList; - } - - /** - * Shutdown the Container Replication Manager. - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - setExit(); - HadoopExecutors.shutdown(executorService, LOG, 5, TimeUnit.SECONDS); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java deleted file mode 100644 index 4b547311da..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerInfo; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Predicate; -import java.util.stream.Collectors; - -/** - * These are pools that are actively checking for replication status of the - * containers. - */ -public final class InProgressPool { - public static final Logger LOG = - LoggerFactory.getLogger(InProgressPool.class); - - private final PeriodicPool pool; - private final NodeManager nodeManager; - private final NodePoolManager poolManager; - private final ExecutorService executorService; - private final Map containerCountMap; - private final Map processedNodeSet; - private final long startTime; - private ProgressStatus status; - private AtomicInteger nodeCount; - private AtomicInteger nodeProcessed; - private AtomicInteger containerProcessedCount; - private long maxWaitTime; - /** - * Constructs an pool that is being processed. - * @param maxWaitTime - Maximum wait time in milliseconds. - * @param pool - Pool that we are working against - * @param nodeManager - Nodemanager - * @param poolManager - pool manager - * @param executorService - Shared Executor service. - */ - InProgressPool(long maxWaitTime, PeriodicPool pool, - NodeManager nodeManager, NodePoolManager poolManager, - ExecutorService executorService) { - Preconditions.checkNotNull(pool); - Preconditions.checkNotNull(nodeManager); - Preconditions.checkNotNull(poolManager); - Preconditions.checkNotNull(executorService); - Preconditions.checkArgument(maxWaitTime > 0); - this.pool = pool; - this.nodeManager = nodeManager; - this.poolManager = poolManager; - this.executorService = executorService; - this.containerCountMap = new ConcurrentHashMap<>(); - this.processedNodeSet = new ConcurrentHashMap<>(); - this.maxWaitTime = maxWaitTime; - startTime = Time.monotonicNow(); - } - - /** - * Returns periodic pool. - * - * @return PeriodicPool - */ - public PeriodicPool getPool() { - return pool; - } - - /** - * We are done if we have got reports from all nodes or we have - * done waiting for the specified time. - * - * @return true if we are done, false otherwise. - */ - public boolean isDone() { - return (nodeCount.get() == nodeProcessed.get()) || - (this.startTime + this.maxWaitTime) > Time.monotonicNow(); - } - - /** - * Gets the number of containers processed. - * - * @return int - */ - public int getContainerProcessedCount() { - return containerProcessedCount.get(); - } - - /** - * Returns the start time in milliseconds. - * - * @return - Start Time. - */ - public long getStartTime() { - return startTime; - } - - /** - * Get the number of nodes in this pool. - * - * @return - node count - */ - public int getNodeCount() { - return nodeCount.get(); - } - - /** - * Get the number of nodes that we have already processed container reports - * from. - * - * @return - Processed count. - */ - public int getNodeProcessed() { - return nodeProcessed.get(); - } - - /** - * Returns the current status. - * - * @return Status - */ - public ProgressStatus getStatus() { - return status; - } - - /** - * Starts the reconciliation process for all the nodes in the pool. - */ - public void startReconciliation() { - List datanodeDetailsList = - this.poolManager.getNodes(pool.getPoolName()); - if (datanodeDetailsList.size() == 0) { - LOG.error("Datanode list for {} is Empty. Pool with no nodes ? ", - pool.getPoolName()); - this.status = ProgressStatus.Error; - return; - } - - nodeProcessed = new AtomicInteger(0); - containerProcessedCount = new AtomicInteger(0); - nodeCount = new AtomicInteger(0); - this.status = ProgressStatus.InProgress; - this.getPool().setLastProcessedTime(Time.monotonicNow()); - } - - /** - * Queues a container Report for handling. This is done in a worker thread - * since decoding a container report might be compute intensive . We don't - * want to block since we have asked for bunch of container reports - * from a set of datanodes. - * - * @param containerReport - ContainerReport - */ - public void handleContainerReport(DatanodeDetails datanodeDetails, - ContainerReportsProto containerReport) { - if (status == ProgressStatus.InProgress) { - executorService.submit(processContainerReport(datanodeDetails, - containerReport)); - } else { - LOG.debug("Cannot handle container report when the pool is in {} status.", - status); - } - } - - private Runnable processContainerReport(DatanodeDetails datanodeDetails, - ContainerReportsProto reports) { - return () -> { - if (processedNodeSet.computeIfAbsent(datanodeDetails.getUuid(), - (k) -> true)) { - nodeProcessed.incrementAndGet(); - LOG.debug("Total Nodes processed : {} Node Name: {} ", nodeProcessed, - datanodeDetails.getUuid()); - for (ContainerInfo info : reports.getReportsList()) { - containerProcessedCount.incrementAndGet(); - LOG.debug("Total Containers processed: {} Container Name: {}", - containerProcessedCount.get(), info.getContainerID()); - - // Update the container map with count + 1 if the key exists or - // update the map with 1. Since this is a concurrentMap the - // computation and update is atomic. - containerCountMap.merge(info.getContainerID(), 1, Integer::sum); - } - } - }; - } - - /** - * Filter the containers based on specific rules. - * - * @param predicate -- Predicate to filter by - * @return A list of map entries. - */ - public List> filterContainer( - Predicate> predicate) { - return containerCountMap.entrySet().stream() - .filter(predicate).collect(Collectors.toList()); - } - - /** - * Used only for testing, calling this will abort container report - * processing. This is very dangerous call and should not be made by any users - */ - @VisibleForTesting - public void setDoneProcessing() { - nodeProcessed.set(nodeCount.get()); - } - - /** - * Returns the pool name. - * - * @return Name of the pool. - */ - String getPoolName() { - return pool.getPoolName(); - } - - public void finalizeReconciliation() { - status = ProgressStatus.Done; - //TODO: Add finalizing logic. This is where actual reconciliation happens. - } - - /** - * Current status of the computing replication status. - */ - public enum ProgressStatus { - InProgress, Done, Error - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java deleted file mode 100644 index ef28aa78d0..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import java.util.concurrent.atomic.AtomicLong; - -/** - * Periodic pool is a pool with a time stamp, this allows us to process pools - * based on a cyclic clock. - */ -public class PeriodicPool implements Comparable { - private final String poolName; - private long lastProcessedTime; - private AtomicLong totalProcessedCount; - - /** - * Constructs a periodic pool. - * - * @param poolName - Name of the pool - */ - public PeriodicPool(String poolName) { - this.poolName = poolName; - lastProcessedTime = 0; - totalProcessedCount = new AtomicLong(0); - } - - /** - * Get pool Name. - * @return PoolName - */ - public String getPoolName() { - return poolName; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object is - * less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(PeriodicPool o) { - return Long.compare(this.lastProcessedTime, o.lastProcessedTime); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - PeriodicPool that = (PeriodicPool) o; - - return poolName.equals(that.poolName); - } - - @Override - public int hashCode() { - return poolName.hashCode(); - } - - /** - * Returns the Total Times we have processed this pool. - * - * @return processed count. - */ - public long getTotalProcessedCount() { - return totalProcessedCount.get(); - } - - /** - * Gets the last time we processed this pool. - * @return time in milliseconds - */ - public long getLastProcessedTime() { - return this.lastProcessedTime; - } - - - /** - * Sets the last processed time. - * - * @param lastProcessedTime - Long in milliseconds. - */ - - public void setLastProcessedTime(long lastProcessedTime) { - this.lastProcessedTime = lastProcessedTime; - } - - /* - * Increments the total processed count. - */ - public void incTotalProcessedCount() { - this.totalProcessedCount.incrementAndGet(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java deleted file mode 100644 index 7bbe2efe57..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; -/* - This package contains routines that manage replication of a container. This - relies on container reports to understand the replication level of a - container - UnderReplicated, Replicated, OverReplicated -- and manages the - replication level based on that. - */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 4392633b16..72d7e946cc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -123,12 +123,6 @@ public interface NodeManager extends StorageContainerNodeProtocol, */ SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails); - /** - * Returns the NodePoolManager associated with the NodeManager. - * @return NodePoolManager - */ - NodePoolManager getNodePoolManager(); - /** * Wait for the heartbeat is processed by NodeManager. * @return true if heartbeat has been processed. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java deleted file mode 100644 index 46faf9ca4d..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; - -/** - * Interface that defines SCM NodePoolManager. - */ -public interface NodePoolManager extends Closeable { - - /** - * Add a node to a node pool. - * @param pool - name of the node pool. - * @param node - data node. - */ - void addNode(String pool, DatanodeDetails node) throws IOException; - - /** - * Remove a node from a node pool. - * @param pool - name of the node pool. - * @param node - data node. - * @throws SCMException - */ - void removeNode(String pool, DatanodeDetails node) - throws SCMException; - - /** - * Get a list of known node pools. - * @return a list of known node pool names or an empty list if not node pool - * is defined. - */ - List getNodePools(); - - /** - * Get all nodes of a node pool given the name of the node pool. - * @param pool - name of the node pool. - * @return a list of datanode ids or an empty list if the node pool was not - * found. - */ - List getNodes(String pool); - - /** - * Get the node pool name if the node has been added to a node pool. - * @param datanodeDetails - datanode ID. - * @return node pool name if it has been assigned. - * null if the node has not been assigned to any node pool yet. - */ - String getNodePool(DatanodeDetails datanodeDetails) throws SCMException; -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index fc8b0137f3..adca8eae0c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -53,7 +53,6 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; -import com.google.protobuf.GeneratedMessage; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -159,7 +158,6 @@ public class SCMNodeManager private ObjectName nmInfoBean; // Node pool manager. - private final SCMNodePoolManager nodePoolManager; private final StorageContainerManager scmManager; public static final Event DATANODE_COMMAND = @@ -210,7 +208,6 @@ public SCMNodeManager(OzoneConfiguration conf, String clusterID, registerMXBean(); - this.nodePoolManager = new SCMNodePoolManager(conf); this.scmManager = scmManager; } @@ -682,7 +679,6 @@ private void updateNodeStat(UUID dnId, NodeReportProto nodeReport) { @Override public void close() throws IOException { unregisterMXBean(); - nodePoolManager.close(); executorService.shutdown(); try { if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { @@ -760,20 +756,6 @@ public RegisteredCommand register( LOG.info("Leaving startup chill mode."); } - // TODO: define node pool policy for non-default node pool. - // For now, all nodes are added to the "DefaultNodePool" upon registration - // if it has not been added to any node pool yet. - try { - if (nodePoolManager.getNodePool(datanodeDetails) == null) { - nodePoolManager.addNode(SCMNodePoolManager.DEFAULT_NODEPOOL, - datanodeDetails); - } - } catch (IOException e) { - // TODO: make sure registration failure is handled correctly. - return RegisteredCommand.newBuilder() - .setErrorCode(ErrorCode.errorNodeNotPermitted) - .build(); - } // Updating Node Report, as registration is successful updateNodeStat(datanodeDetails.getUuid(), nodeReport); LOG.info("Data node with ID: {} Registered.", @@ -859,11 +841,6 @@ public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) { return new SCMNodeMetric(nodeStats.get(datanodeDetails.getUuid())); } - @Override - public NodePoolManager getNodePoolManager() { - return nodePoolManager; - } - @Override public Map getNodeCount() { Map nodeCountMap = new HashMap(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java deleted file mode 100644 index faf330ea1d..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java +++ /dev/null @@ -1,269 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.utils.MetadataStore; -import org.apache.hadoop.utils.MetadataStoreBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_FIND_NODE_IN_POOL; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_LOAD_NODEPOOL; -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; -import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; - -/** - * SCM node pool manager that manges node pools. - */ -public final class SCMNodePoolManager implements NodePoolManager { - - private static final Logger LOG = - LoggerFactory.getLogger(SCMNodePoolManager.class); - private static final List EMPTY_NODE_LIST = - new ArrayList<>(); - private static final List EMPTY_NODEPOOL_LIST = new ArrayList<>(); - public static final String DEFAULT_NODEPOOL = "DefaultNodePool"; - - // DB that saves the node to node pool mapping. - private MetadataStore nodePoolStore; - - // In-memory node pool to nodes mapping - private HashMap> nodePools; - - // Read-write lock for nodepool operations - private ReadWriteLock lock; - - /** - * Construct SCMNodePoolManager class that manages node to node pool mapping. - * @param conf - configuration. - * @throws IOException - */ - public SCMNodePoolManager(final OzoneConfiguration conf) - throws IOException { - final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, - OZONE_SCM_DB_CACHE_SIZE_DEFAULT); - File metaDir = getOzoneMetaDirPath(conf); - String scmMetaDataDir = metaDir.getPath(); - File nodePoolDBPath = new File(scmMetaDataDir, NODEPOOL_DB); - nodePoolStore = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setDbFile(nodePoolDBPath) - .setCacheSize(cacheSize * OzoneConsts.MB) - .build(); - nodePools = new HashMap<>(); - lock = new ReentrantReadWriteLock(); - init(); - } - - /** - * Initialize the in-memory store based on persist store from level db. - * No lock is needed as init() is only invoked by constructor. - * @throws SCMException - */ - private void init() throws SCMException { - try { - nodePoolStore.iterate(null, (key, value) -> { - try { - DatanodeDetails nodeId = DatanodeDetails.getFromProtoBuf( - HddsProtos.DatanodeDetailsProto.PARSER.parseFrom(key)); - String poolName = DFSUtil.bytes2String(value); - - Set nodePool = null; - if (nodePools.containsKey(poolName)) { - nodePool = nodePools.get(poolName); - } else { - nodePool = new HashSet<>(); - nodePools.put(poolName, nodePool); - } - nodePool.add(nodeId); - if (LOG.isDebugEnabled()) { - LOG.debug("Adding node: {} to node pool: {}", - nodeId, poolName); - } - } catch (IOException e) { - LOG.warn("Can't add a datanode to node pool, continue next..."); - } - return true; - }); - } catch (IOException e) { - LOG.error("Loading node pool error " + e); - throw new SCMException("Failed to load node pool", - FAILED_TO_LOAD_NODEPOOL); - } - } - - /** - * Add a datanode to a node pool. - * @param pool - name of the node pool. - * @param node - name of the datanode. - */ - @Override - public void addNode(final String pool, final DatanodeDetails node) - throws IOException { - Preconditions.checkNotNull(pool, "pool name is null"); - Preconditions.checkNotNull(node, "node is null"); - lock.writeLock().lock(); - try { - // add to the persistent store - nodePoolStore.put(node.getProtoBufMessage().toByteArray(), - DFSUtil.string2Bytes(pool)); - - // add to the in-memory store - Set nodePool = null; - if (nodePools.containsKey(pool)) { - nodePool = nodePools.get(pool); - } else { - nodePool = new HashSet(); - nodePools.put(pool, nodePool); - } - nodePool.add(node); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Remove a datanode from a node pool. - * @param pool - name of the node pool. - * @param node - datanode id. - * @throws SCMException - */ - @Override - public void removeNode(final String pool, final DatanodeDetails node) - throws SCMException { - Preconditions.checkNotNull(pool, "pool name is null"); - Preconditions.checkNotNull(node, "node is null"); - lock.writeLock().lock(); - try { - // Remove from the persistent store - byte[] kName = node.getProtoBufMessage().toByteArray(); - byte[] kData = nodePoolStore.get(kName); - if (kData == null) { - throw new SCMException(String.format("Unable to find node %s from" + - " pool %s in DB.", DFSUtil.bytes2String(kName), pool), - FAILED_TO_FIND_NODE_IN_POOL); - } - nodePoolStore.delete(kName); - - // Remove from the in-memory store - if (nodePools.containsKey(pool)) { - Set nodePool = nodePools.get(pool); - nodePool.remove(node); - } else { - throw new SCMException(String.format("Unable to find node %s from" + - " pool %s in MAP.", DFSUtil.bytes2String(kName), pool), - FAILED_TO_FIND_NODE_IN_POOL); - } - } catch (IOException e) { - throw new SCMException("Failed to remove node " + node.toString() - + " from node pool " + pool, e, - SCMException.ResultCodes.IO_EXCEPTION); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Get all the node pools. - * @return all the node pools. - */ - @Override - public List getNodePools() { - lock.readLock().lock(); - try { - if (!nodePools.isEmpty()) { - return nodePools.keySet().stream().collect(Collectors.toList()); - } else { - return EMPTY_NODEPOOL_LIST; - } - } finally { - lock.readLock().unlock(); - } - } - - /** - * Get all datanodes of a specific node pool. - * @param pool - name of the node pool. - * @return all datanodes of the specified node pool. - */ - @Override - public List getNodes(final String pool) { - Preconditions.checkNotNull(pool, "pool name is null"); - if (nodePools.containsKey(pool)) { - return nodePools.get(pool).stream().collect(Collectors.toList()); - } else { - return EMPTY_NODE_LIST; - } - } - - /** - * Get the node pool name if the node has been added to a node pool. - * @param datanodeDetails - datanode ID. - * @return node pool name if it has been assigned. - * null if the node has not been assigned to any node pool yet. - * TODO: Put this in a in-memory map if performance is an issue. - */ - @Override - public String getNodePool(final DatanodeDetails datanodeDetails) - throws SCMException { - Preconditions.checkNotNull(datanodeDetails, "node is null"); - try { - byte[] result = nodePoolStore.get( - datanodeDetails.getProtoBufMessage().toByteArray()); - return result == null ? null : DFSUtil.bytes2String(result); - } catch (IOException e) { - throw new SCMException("Failed to get node pool for node " - + datanodeDetails.toString(), e, - SCMException.ResultCodes.IO_EXCEPTION); - } - } - - /** - * Close node pool level db store. - * @throws IOException - */ - @Override - public void close() throws IOException { - nodePoolStore.close(); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 8c59462b40..80b5d6e182 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -19,7 +19,6 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -273,11 +272,6 @@ public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) { return new SCMNodeMetric(nodeMetricMap.get(datanodeDetails.getUuid())); } - @Override - public NodePoolManager getNodePoolManager() { - return Mockito.mock(NodePoolManager.class); - } - /** * Used for testing. * diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java deleted file mode 100644 index 8f412dedda..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.commons.collections.ListUtils; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.test.PathUtils; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.Collections; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -/** - * Test for SCM node pool manager. - */ -public class TestSCMNodePoolManager { - private static final Logger LOG = - LoggerFactory.getLogger(TestSCMNodePoolManager.class); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private final File testDir = PathUtils.getTestDir( - TestSCMNodePoolManager.class); - - SCMNodePoolManager createNodePoolManager(OzoneConfiguration conf) - throws IOException { - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); - return new SCMNodePoolManager(conf); - } - - /** - * Test default node pool. - * - * @throws IOException - */ - @Test - public void testDefaultNodePool() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - try { - final String defaultPool = "DefaultPool"; - NodePoolManager npMgr = createNodePoolManager(conf); - - final int nodeCount = 4; - final List nodes = TestUtils - .getListOfDatanodeDetails(nodeCount); - assertEquals(0, npMgr.getNodePools().size()); - for (DatanodeDetails node: nodes) { - npMgr.addNode(defaultPool, node); - } - List nodesRetrieved = npMgr.getNodes(defaultPool); - assertEquals(nodeCount, nodesRetrieved.size()); - assertTwoDatanodeListsEqual(nodes, nodesRetrieved); - - DatanodeDetails nodeRemoved = nodes.remove(2); - npMgr.removeNode(defaultPool, nodeRemoved); - List nodesAfterRemove = npMgr.getNodes(defaultPool); - assertTwoDatanodeListsEqual(nodes, nodesAfterRemove); - - List nonExistSet = npMgr.getNodes("NonExistSet"); - assertEquals(0, nonExistSet.size()); - } finally { - FileUtil.fullyDelete(testDir); - } - } - - - /** - * Test default node pool reload. - * - * @throws IOException - */ - @Test - public void testDefaultNodePoolReload() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - final String defaultPool = "DefaultPool"; - final int nodeCount = 4; - final List nodes = TestUtils - .getListOfDatanodeDetails(nodeCount); - - try { - try { - SCMNodePoolManager npMgr = createNodePoolManager(conf); - assertEquals(0, npMgr.getNodePools().size()); - for (DatanodeDetails node : nodes) { - npMgr.addNode(defaultPool, node); - } - List nodesRetrieved = npMgr.getNodes(defaultPool); - assertEquals(nodeCount, nodesRetrieved.size()); - assertTwoDatanodeListsEqual(nodes, nodesRetrieved); - npMgr.close(); - } finally { - LOG.info("testDefaultNodePoolReload: Finish adding nodes to pool" + - " and close."); - } - - // try reload with a new NodePoolManager instance - try { - SCMNodePoolManager npMgr = createNodePoolManager(conf); - List nodesRetrieved = npMgr.getNodes(defaultPool); - assertEquals(nodeCount, nodesRetrieved.size()); - assertTwoDatanodeListsEqual(nodes, nodesRetrieved); - } finally { - LOG.info("testDefaultNodePoolReload: Finish reloading node pool."); - } - } finally { - FileUtil.fullyDelete(testDir); - } - } - - /** - * Compare and verify that two datanode lists are equal. - * @param list1 - datanode list 1. - * @param list2 - datanode list 2. - */ - private void assertTwoDatanodeListsEqual(List list1, - List list2) { - assertEquals(list1.size(), list2.size()); - Collections.sort(list1); - Collections.sort(list2); - assertTrue(ListUtils.isEqualList(list1, list2)); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index 072d821247..1a4dcd7ad2 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.node.CommandQueue; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -201,10 +200,6 @@ public SCMNodeMetric getNodeStat(DatanodeDetails dd) { return null; } - @Override - public NodePoolManager getNodePoolManager() { - return Mockito.mock(NodePoolManager.class); - } /** * Wait for the heartbeat is processed by NodeManager. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java deleted file mode 100644 index ffcd752e84..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.testutils; - -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** - * Pool Manager replication mock. - */ -public class ReplicationNodePoolManagerMock implements NodePoolManager { - - private final Map nodeMemberShip; - - /** - * A node pool manager for testing. - */ - public ReplicationNodePoolManagerMock() { - nodeMemberShip = new HashMap<>(); - } - - /** - * Add a node to a node pool. - * - * @param pool - name of the node pool. - * @param node - data node. - */ - @Override - public void addNode(String pool, DatanodeDetails node) { - nodeMemberShip.put(node, pool); - } - - /** - * Remove a node from a node pool. - * - * @param pool - name of the node pool. - * @param node - data node. - * @throws SCMException - */ - @Override - public void removeNode(String pool, DatanodeDetails node) - throws SCMException { - nodeMemberShip.remove(node); - - } - - /** - * Get a list of known node pools. - * - * @return a list of known node pool names or an empty list if not node pool - * is defined. - */ - @Override - public List getNodePools() { - Set poolSet = new HashSet<>(); - for (Map.Entry entry : nodeMemberShip.entrySet()) { - poolSet.add(entry.getValue()); - } - return new ArrayList<>(poolSet); - - } - - /** - * Get all nodes of a node pool given the name of the node pool. - * - * @param pool - name of the node pool. - * @return a list of datanode ids or an empty list if the node pool was not - * found. - */ - @Override - public List getNodes(String pool) { - Set datanodeSet = new HashSet<>(); - for (Map.Entry entry : nodeMemberShip.entrySet()) { - if (entry.getValue().equals(pool)) { - datanodeSet.add(entry.getKey()); - } - } - return new ArrayList<>(datanodeSet); - } - - /** - * Get the node pool name if the node has been added to a node pool. - * - * @param datanodeDetails DatanodeDetails. - * @return node pool name if it has been assigned. null if the node has not - * been assigned to any node pool yet. - */ - @Override - public String getNodePool(DatanodeDetails datanodeDetails) { - return nodeMemberShip.get(datanodeDetails); - } - - /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - *

    - *

    As noted in {@link AutoCloseable#close()}, cases where the - * close may fail require careful attention. It is strongly advised - * to relinquish the underlying resources and to internally - * mark the {@code Closeable} as closed, prior to throwing - * the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java index 4d70af84a2..b4ed2b12c2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java @@ -51,12 +51,9 @@ import java.util.HashMap; import java.util.UUID; -import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB; import static org.apache.hadoop.ozone.OzoneConsts.KB; -import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; /** * This class tests the CLI that transforms container into SQLite DB files. @@ -176,34 +173,6 @@ public void shutdown() throws InterruptedException { } } - @Test - public void testConvertNodepoolDB() throws Exception { - String dbOutPath = GenericTestUtils.getTempPath( - UUID.randomUUID() + "/out_sql.db"); - String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); - String dbPath = dbRootPath + "/" + NODEPOOL_DB; - String[] args = {"-p", dbPath, "-o", dbOutPath}; - - cli.run(args); - - // verify the sqlite db - HashMap expectedPool = new HashMap<>(); - for (DatanodeDetails dnid : nodeManager.getAllNodes()) { - expectedPool.put(dnid.getUuidString(), "DefaultNodePool"); - } - Connection conn = connectDB(dbOutPath); - String sql = "SELECT * FROM nodePool"; - ResultSet rs = executeQuery(conn, sql); - while(rs.next()) { - String datanodeUUID = rs.getString("datanodeUUID"); - String poolName = rs.getString("poolName"); - assertTrue(expectedPool.remove(datanodeUUID).equals(poolName)); - } - assertEquals(0, expectedPool.size()); - - Files.delete(Paths.get(dbOutPath)); - } - @Test public void testConvertContainerDB() throws Exception { String dbOutPath = GenericTestUtils.getTempPath( diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java index 2bd43fb93a..edc0d7b597 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java @@ -19,7 +19,6 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; -import com.google.protobuf.ByteString; import org.apache.commons.cli.BasicParser; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; @@ -60,13 +59,11 @@ import java.util.HashSet; import java.util.Set; -import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX; import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.KSM_USER_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.KSM_BUCKET_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.KSM_VOLUME_PREFIX; -import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB; /** @@ -111,15 +108,6 @@ public class SQLCLI extends Configured implements Tool { private static final String INSERT_CONTAINER_MEMBERS = "INSERT INTO containerMembers (containerName, datanodeUUID) " + "VALUES (\"%s\", \"%s\")"; - // for nodepool.db - private static final String CREATE_NODE_POOL = - "CREATE TABLE nodePool (" + - "datanodeUUID TEXT NOT NULL," + - "poolName TEXT NOT NULL," + - "PRIMARY KEY(datanodeUUID, poolName))"; - private static final String INSERT_NODE_POOL = - "INSERT INTO nodePool (datanodeUUID, poolName) " + - "VALUES (\"%s\", \"%s\")"; // and reuse CREATE_DATANODE_INFO and INSERT_DATANODE_INFO // for openContainer.db private static final String CREATE_OPEN_CONTAINER = @@ -285,9 +273,6 @@ public int run(String[] args) throws Exception { if (dbName.toString().endsWith(CONTAINER_DB_SUFFIX)) { LOG.info("Converting container DB"); convertContainerDB(dbPath, outPath); - } else if (dbName.toString().equals(NODEPOOL_DB)) { - LOG.info("Converting node pool DB"); - convertNodePoolDB(dbPath, outPath); } else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) { LOG.info("Converting open container DB"); convertOpenContainerDB(dbPath, outPath); @@ -543,66 +528,7 @@ private void insertContainerDB(Connection conn, long containerID, } LOG.info("Insertion completed."); } - /** - * Converts nodePool.db to sqlite. The schema of sql db: - * two tables, nodePool and datanodeInfo (the same datanode Info as for - * container.db). - * - * nodePool - * --------------------------------------------------------- - * datanodeUUID* | poolName* - * --------------------------------------------------------- - * - * datanodeInfo: - * --------------------------------------------------------- - * hostname | datanodeUUid* | xferPort | ipcPort - * --------------------------------------------------------- - * - * -------------------------------- - * |containerPort - * -------------------------------- - * - * @param dbPath path to container db. - * @param outPath path to output sqlite - * @throws IOException throws exception. - */ - private void convertNodePoolDB(Path dbPath, Path outPath) throws Exception { - LOG.info("Create table for sql node pool db."); - File dbFile = dbPath.toFile(); - try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder() - .setConf(conf).setDbFile(dbFile).build(); - Connection conn = connectDB(outPath.toString())) { - executeSQL(conn, CREATE_NODE_POOL); - executeSQL(conn, CREATE_DATANODE_INFO); - dbStore.iterate(null, (key, value) -> { - DatanodeDetails nodeId = DatanodeDetails - .getFromProtoBuf(HddsProtos.DatanodeDetailsProto - .PARSER.parseFrom(key)); - String blockPool = DFSUtil.bytes2String(value); - try { - insertNodePoolDB(conn, blockPool, nodeId); - return true; - } catch (SQLException e) { - throw new IOException(e); - } - }); - } - } - - private void insertNodePoolDB(Connection conn, String blockPool, - DatanodeDetails datanodeDetails) throws SQLException { - String insertNodePool = String.format(INSERT_NODE_POOL, - datanodeDetails.getUuidString(), blockPool); - executeSQL(conn, insertNodePool); - - String insertDatanodeDetails = String - .format(INSERT_DATANODE_INFO, datanodeDetails.getHostName(), - datanodeDetails.getUuidString(), datanodeDetails.getIpAddress(), - datanodeDetails.getPort(DatanodeDetails.Port.Name.STANDALONE) - .getValue()); - executeSQL(conn, insertDatanodeDetails); - } /** * Convert openContainer.db to sqlite db file. This is rather simple db, From e9ec3d78f520a8543dc77d763d4b358aa608bae8 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Wed, 27 Jun 2018 13:35:30 -0700 Subject: [PATCH 63/70] HDDS-186. Create under replicated queue. Contributed by Ajay Kumar. --- .../replication/ReplicationQueue.java | 76 ++++++++++ .../replication/ReplicationRequest.java | 106 ++++++++++++++ .../container/replication/package-info.java | 23 +++ .../replication/TestReplicationQueue.java | 134 ++++++++++++++++++ .../container/replication/package-info.java | 23 +++ 5 files changed, 362 insertions(+) create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java new file mode 100644 index 0000000000..e0a235122e --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.replication; + +import java.util.List; +import java.util.PriorityQueue; +import java.util.Queue; + +/** + * Priority queue to handle under-replicated and over replicated containers + * in ozone. ReplicationManager will consume these messages and decide + * accordingly. + */ +public class ReplicationQueue { + + private final Queue queue; + + ReplicationQueue() { + queue = new PriorityQueue<>(); + } + + public synchronized boolean add(ReplicationRequest repObj) { + if (this.queue.contains(repObj)) { + // Remove the earlier message and insert this one + this.queue.remove(repObj); + } + return this.queue.add(repObj); + } + + public synchronized boolean remove(ReplicationRequest repObj) { + return queue.remove(repObj); + } + + /** + * Retrieves, but does not remove, the head of this queue, + * or returns {@code null} if this queue is empty. + * + * @return the head of this queue, or {@code null} if this queue is empty + */ + public synchronized ReplicationRequest peek() { + return queue.peek(); + } + + /** + * Retrieves and removes the head of this queue, + * or returns {@code null} if this queue is empty. + * + * @return the head of this queue, or {@code null} if this queue is empty + */ + public synchronized ReplicationRequest poll() { + return queue.poll(); + } + + public synchronized boolean removeAll(List repObjs) { + return queue.removeAll(repObjs); + } + + public int size() { + return queue.size(); + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java new file mode 100644 index 0000000000..a6ccce13e0 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.replication; + +import java.io.Serializable; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; + +/** + * Wrapper class for hdds replication queue. Implements its natural + * ordering for priority queue. + */ +public class ReplicationRequest implements Comparable, + Serializable { + private final long containerId; + private final short replicationCount; + private final short expecReplicationCount; + private final long timestamp; + + public ReplicationRequest(long containerId, short replicationCount, + long timestamp, short expecReplicationCount) { + this.containerId = containerId; + this.replicationCount = replicationCount; + this.timestamp = timestamp; + this.expecReplicationCount = expecReplicationCount; + } + + /** + * Compares this object with the specified object for order. Returns a + * negative integer, zero, or a positive integer as this object is less + * than, equal to, or greater than the specified object. + * @param o the object to be compared. + * @return a negative integer, zero, or a positive integer as this object + * is less than, equal to, or greater than the specified object. + * @throws NullPointerException if the specified object is null + * @throws ClassCastException if the specified object's type prevents it + * from being compared to this object. + */ + @Override + public int compareTo(ReplicationRequest o) { + if (o == null) { + return 1; + } + if (this == o) { + return 0; + } + int retVal = Integer + .compare(getReplicationCount() - getExpecReplicationCount(), + o.getReplicationCount() - o.getExpecReplicationCount()); + if (retVal != 0) { + return retVal; + } + return Long.compare(getTimestamp(), o.getTimestamp()); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(91, 1011) + .append(getContainerId()) + .toHashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ReplicationRequest that = (ReplicationRequest) o; + return new EqualsBuilder().append(getContainerId(), that.getContainerId()) + .isEquals(); + } + + public long getContainerId() { + return containerId; + } + + public short getReplicationCount() { + return replicationCount; + } + + public long getTimestamp() { + return timestamp; + } + + public short getExpecReplicationCount() { + return expecReplicationCount; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java new file mode 100644 index 0000000000..7f335e37c1 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.replication; + +/** + * Ozone Container replicaton related classes. + */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java new file mode 100644 index 0000000000..6d74c683ee --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.replication; + +import java.util.Random; +import java.util.UUID; +import org.apache.hadoop.util.Time; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Test class for ReplicationQueue. + */ +public class TestReplicationQueue { + + private ReplicationQueue replicationQueue; + private Random random; + + @Before + public void setUp() { + replicationQueue = new ReplicationQueue(); + random = new Random(); + } + + @Test + public void testDuplicateAddOp() { + long contId = random.nextLong(); + String nodeId = UUID.randomUUID().toString(); + ReplicationRequest obj1, obj2, obj3; + long time = Time.monotonicNow(); + obj1 = new ReplicationRequest(contId, (short) 2, time, (short) 3); + obj2 = new ReplicationRequest(contId, (short) 2, time + 1, (short) 3); + obj3 = new ReplicationRequest(contId, (short) 1, time+2, (short) 3); + + replicationQueue.add(obj1); + replicationQueue.add(obj2); + replicationQueue.add(obj3); + Assert.assertEquals("Should add only 1 msg as second one is duplicate", + 1, replicationQueue.size()); + ReplicationRequest temp = replicationQueue.poll(); + Assert.assertEquals(temp, obj3); + } + + @Test + public void testPollOp() { + long contId = random.nextLong(); + String nodeId = UUID.randomUUID().toString(); + ReplicationRequest msg1, msg2, msg3, msg4, msg5; + msg1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(), + (short) 3); + long time = Time.monotonicNow(); + msg2 = new ReplicationRequest(contId + 1, (short) 4, time, (short) 3); + msg3 = new ReplicationRequest(contId + 2, (short) 0, time, (short) 3); + msg4 = new ReplicationRequest(contId, (short) 2, time, (short) 3); + // Replication message for same container but different nodeId + msg5 = new ReplicationRequest(contId + 1, (short) 2, time, (short) 3); + + replicationQueue.add(msg1); + replicationQueue.add(msg2); + replicationQueue.add(msg3); + replicationQueue.add(msg4); + replicationQueue.add(msg5); + Assert.assertEquals("Should have 3 objects", + 3, replicationQueue.size()); + + // Since Priority queue orders messages according to replication count, + // message with lowest replication should be first + ReplicationRequest temp; + temp = replicationQueue.poll(); + Assert.assertEquals("Should have 2 objects", + 2, replicationQueue.size()); + Assert.assertEquals(temp, msg3); + + temp = replicationQueue.poll(); + Assert.assertEquals("Should have 1 objects", + 1, replicationQueue.size()); + Assert.assertEquals(temp, msg5); + + // Message 2 should be ordered before message 5 as both have same replication + // number but message 2 has earlier timestamp. + temp = replicationQueue.poll(); + Assert.assertEquals("Should have 0 objects", + replicationQueue.size(), 0); + Assert.assertEquals(temp, msg4); + } + + @Test + public void testRemoveOp() { + long contId = random.nextLong(); + String nodeId = UUID.randomUUID().toString(); + ReplicationRequest obj1, obj2, obj3; + obj1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(), + (short) 3); + obj2 = new ReplicationRequest(contId + 1, (short) 2, Time.monotonicNow(), + (short) 3); + obj3 = new ReplicationRequest(contId + 2, (short) 3, Time.monotonicNow(), + (short) 3); + + replicationQueue.add(obj1); + replicationQueue.add(obj2); + replicationQueue.add(obj3); + Assert.assertEquals("Should have 3 objects", + 3, replicationQueue.size()); + + replicationQueue.remove(obj3); + Assert.assertEquals("Should have 2 objects", + 2, replicationQueue.size()); + + replicationQueue.remove(obj2); + Assert.assertEquals("Should have 1 objects", + 1, replicationQueue.size()); + + replicationQueue.remove(obj1); + Assert.assertEquals("Should have 0 objects", + 0, replicationQueue.size()); + } + +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java new file mode 100644 index 0000000000..5b1fd0f43a --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * SCM Testing and Mocking Utils. + */ +package org.apache.hadoop.ozone.container.replication; +// Test classes for Replication functionality. \ No newline at end of file From 1e30547642c7c6c014745862dd06f90f091f90b6 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Wed, 27 Jun 2018 13:56:45 -0700 Subject: [PATCH 64/70] HDDS-170. Fix TestBlockDeletingService#testBlockDeletionTimeout. Contributed by Lokesh Jain. --- .../org/apache/hadoop/utils/BackgroundService.java | 2 +- .../statemachine/background/BlockDeletingService.java | 8 ++++---- .../ozone/container/ozoneimpl/OzoneContainer.java | 2 +- .../testutils/BlockDeletingServiceTestImpl.java | 4 ++-- .../container/common/TestBlockDeletingService.java | 11 +++++++---- 5 files changed, 15 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java index 431da64094..5718008b41 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java @@ -126,7 +126,7 @@ public synchronized void run() { try { // Collect task results BackgroundTaskResult result = serviceTimeout > 0 - ? taskResultFuture.get(serviceTimeout, TimeUnit.MILLISECONDS) + ? taskResultFuture.get(serviceTimeout, unit) : taskResultFuture.get(); if (LOG.isDebugEnabled()) { LOG.debug("task execution result size {}", result.getSize()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java index 63f57b4845..bff591367c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java @@ -83,10 +83,10 @@ public class BlockDeletingService extends BackgroundService{ private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10; public BlockDeletingService(ContainerManager containerManager, - long serviceInterval, long serviceTimeout, Configuration conf) { - super("BlockDeletingService", serviceInterval, - TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, - serviceTimeout); + long serviceInterval, long serviceTimeout, TimeUnit unit, + Configuration conf) { + super("BlockDeletingService", serviceInterval, unit, + BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout); this.containerManager = containerManager; this.conf = conf; this.blockLimitPerTask = conf.getInt( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 4156f5a4e0..7931f6f7dd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -122,7 +122,7 @@ public OzoneContainer( OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); this.blockDeletingService = new BlockDeletingService(manager, - svcInterval, serviceTimeout, ozoneConfig); + svcInterval, serviceTimeout, TimeUnit.MILLISECONDS, ozoneConfig); this.dispatcher = new Dispatcher(manager, this.ozoneConfig); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java index 86888aa790..7c129457fd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java @@ -44,8 +44,8 @@ public class BlockDeletingServiceTestImpl public BlockDeletingServiceTestImpl(ContainerManager containerManager, int serviceInterval, Configuration conf) { - super(containerManager, serviceInterval, - SERVICE_TIMEOUT_IN_MILLISECONDS, conf); + super(containerManager, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS, + TimeUnit.MILLISECONDS, conf); } @VisibleForTesting diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 0686e4e5d3..8d01c806a3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -280,10 +280,11 @@ public void testBlockDeletionTimeout() throws Exception { ContainerManager containerManager = createContainerManager(conf); createToDeleteBlocks(containerManager, conf, 1, 3, 1, chunksDir); - // set timeout value as 1ms to trigger timeout behavior + // set timeout value as 1ns to trigger timeout behavior long timeout = 1; - BlockDeletingService svc = - new BlockDeletingService(containerManager, 1000, timeout, conf); + BlockDeletingService svc = new BlockDeletingService(containerManager, + TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS, + conf); svc.start(); LogCapturer log = LogCapturer.captureLogs(BackgroundService.LOG); @@ -303,7 +304,9 @@ public void testBlockDeletionTimeout() throws Exception { // test for normal case that doesn't have timeout limitation timeout = 0; createToDeleteBlocks(containerManager, conf, 1, 3, 1, chunksDir); - svc = new BlockDeletingService(containerManager, 1000, timeout, conf); + svc = new BlockDeletingService(containerManager, + TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS, + conf); svc.start(); // get container meta data From 18932717c42382ed8842de7719ec6d20c1765366 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Tue, 26 Jun 2018 18:28:47 -0700 Subject: [PATCH 65/70] HDDS-94. Change ozone datanode command to start the standalone datanode plugin. Contributed by Sandeep Nemuri. --- .../src/main/compose/ozone/docker-compose.yaml | 12 ------------ hadoop-dist/src/main/compose/ozone/docker-config | 5 ----- .../src/main/compose/ozoneperf/docker-compose.yaml | 13 ------------- .../src/main/compose/ozoneperf/docker-config | 5 ----- .../apache/hadoop/ozone/HddsDatanodeService.java | 13 ++++++++++++- .../src/test/acceptance/basic/docker-compose.yaml | 12 ------------ .../src/test/acceptance/basic/docker-config | 5 ----- .../src/test/acceptance/commonlib.robot | 1 - .../src/test/acceptance/ozonefs/docker-compose.yaml | 12 ------------ .../src/test/acceptance/ozonefs/docker-config | 5 ----- hadoop-ozone/common/src/main/bin/ozone | 9 +++------ 11 files changed, 15 insertions(+), 77 deletions(-) diff --git a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml index faf420c7f5..512c649e21 100644 --- a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml +++ b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml @@ -16,18 +16,6 @@ version: "3" services: - namenode: - image: apache/hadoop-runner - hostname: namenode - volumes: - - ../../ozone:/opt/hadoop - ports: - - 9870:9870 - environment: - ENSURE_NAMENODE_DIR: /data/namenode - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/hdfs","namenode"] datanode: image: apache/hadoop-runner volumes: diff --git a/hadoop-dist/src/main/compose/ozone/docker-config b/hadoop-dist/src/main/compose/ozone/docker-config index c693db0428..632f8701d2 100644 --- a/hadoop-dist/src/main/compose/ozone/docker-config +++ b/hadoop-dist/src/main/compose/ozone/docker-config @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 OZONE-SITE.XML_ozone.ksm.address=ksm OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.enabled=True @@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService -HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 -HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml index fb7873bf88..3233c11641 100644 --- a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml +++ b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml @@ -16,19 +16,6 @@ version: "3" services: - namenode: - image: apache/hadoop-runner - hostname: namenode - volumes: - - ../../ozone:/opt/hadoop - - ./jmxpromo.jar:/opt/jmxpromo.jar - ports: - - 9870:9870 - environment: - ENSURE_NAMENODE_DIR: /data/namenode - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/hdfs","namenode"] datanode: image: apache/hadoop-runner volumes: diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-config b/hadoop-dist/src/main/compose/ozoneperf/docker-config index e4f5485ac5..2be22a7792 100644 --- a/hadoop-dist/src/main/compose/ozoneperf/docker-config +++ b/hadoop-dist/src/main/compose/ozoneperf/docker-config @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 OZONE-SITE.XML_ozone.ksm.address=ksm OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.enabled=True @@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService -HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 -HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index fa4187a254..ddeec873bc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -25,9 +25,11 @@ import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.statemachine .DatanodeStateMachine; +import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; @@ -229,9 +231,18 @@ public static HddsDatanodeService createHddsDatanodeService( public static void main(String[] args) { try { + if (DFSUtil.parseHelpArgument(args, "Starts HDDS Datanode", System.out, false)) { + System.exit(0); + } + Configuration conf = new OzoneConfiguration(); + GenericOptionsParser hParser = new GenericOptionsParser(conf, args); + if (!hParser.isParseSuccessful()) { + GenericOptionsParser.printGenericCommandUsage(System.err); + System.exit(1); + } StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG); HddsDatanodeService hddsDatanodeService = - createHddsDatanodeService(new OzoneConfiguration()); + createHddsDatanodeService(conf); hddsDatanodeService.start(null); hddsDatanodeService.join(); } catch (Throwable e) { diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml index 44bd4a0aae..b50f42d3e9 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml @@ -16,18 +16,6 @@ version: "3" services: - namenode: - image: apache/hadoop-runner - hostname: namenode - volumes: - - ${OZONEDIR}:/opt/hadoop - ports: - - 9870 - environment: - ENSURE_NAMENODE_DIR: /data/namenode - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/hdfs","namenode"] datanode: image: apache/hadoop-runner volumes: diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config index 180dc8ef49..c3ec2ef71b 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 OZONE-SITE.XML_ozone.ksm.address=ksm OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874 OZONE-SITE.XML_ozone.scm.names=scm @@ -24,13 +23,9 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s -HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 -HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot index 01ed302e25..a5ea30af34 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot @@ -28,7 +28,6 @@ Startup Ozone cluster with size Daemons are running without error Is daemon running without error ksm Is daemon running without error scm - Is daemon running without error namenode Is daemon running without error datanode Check if datanode is connected to the scm diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml index 3323557511..12022dfe61 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml @@ -16,18 +16,6 @@ version: "3" services: - namenode: - image: apache/hadoop-runner - hostname: namenode - volumes: - - ${OZONEDIR}:/opt/hadoop - ports: - - 9870 - environment: - ENSURE_NAMENODE_DIR: /data/namenode - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/hdfs","namenode"] datanode: image: apache/hadoop-runner volumes: diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config index dec863e94e..e06d434bb4 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 CORE-SITE.XML_fs.o3.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem OZONE-SITE.XML_ozone.ksm.address=ksm OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874 @@ -25,13 +24,9 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s -HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 -HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone index 6843bddca5..390f0895b7 100755 --- a/hadoop-ozone/common/src/main/bin/ozone +++ b/hadoop-ozone/common/src/main/bin/ozone @@ -34,7 +34,7 @@ function hadoop_usage hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries" - hadoop_add_subcommand "datanode" daemon "run a DFS datanode" + hadoop_add_subcommand "datanode" daemon "run a HDDS datanode" hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" hadoop_add_subcommand "freon" client "runs an ozone data generator" hadoop_add_subcommand "genesis" client "runs a collection of ozone benchmarks to help with tuning." @@ -45,7 +45,7 @@ function hadoop_usage hadoop_add_subcommand "o3" client "command line interface for ozone" hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data" hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service" - hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager " + hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager" hadoop_add_subcommand "version" client "print the version" hadoop_add_subcommand "genconf" client "generate minimally required ozone configs and output to ozone-site.xml in specified path" @@ -68,10 +68,7 @@ function ozonecmd_case ;; datanode) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_SECURE_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter" - HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.datanode.DataNode' - hadoop_deprecate_envvar HADOOP_SECURE_DN_PID_DIR HADOOP_SECURE_PID_DIR - hadoop_deprecate_envvar HADOOP_SECURE_DN_LOG_DIR HADOOP_SECURE_LOG_DIR + HADOOP_CLASSNAME=org.apache.hadoop.ozone.HddsDatanodeService ;; envvars) echo "JAVA_HOME='${JAVA_HOME}'" From 8752a48564028cb5892c19e29d4e5b984d70c076 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Wed, 27 Jun 2018 14:18:25 -0700 Subject: [PATCH 66/70] HDDS-193. Make Datanode heartbeat dispatcher in SCM event based. Contributed by Elek, Marton. --- .../SCMDatanodeHeartbeatDispatcher.java | 126 +++++++++ .../scm/server/SCMDatanodeProtocolServer.java | 14 +- .../scm/server/StorageContainerManager.java | 4 +- .../SCMDatanodeContainerReportHandler.java | 76 ------ .../SCMDatanodeHeartbeatDispatcher.java | 189 ------------- .../report/SCMDatanodeNodeReportHandler.java | 43 --- .../report/SCMDatanodeReportHandler.java | 83 ------ .../SCMDatanodeReportHandlerFactory.java | 82 ------ .../hdds/scm/server/report/package-info.java | 57 ---- .../TestSCMDatanodeHeartbeatDispatcher.java | 119 ++++++++ ...TestSCMDatanodeContainerReportHandler.java | 34 --- .../TestSCMDatanodeHeartbeatDispatcher.java | 138 ---------- .../TestSCMDatanodeNodeReportHandler.java | 36 --- .../TestSCMDatanodeReportHandlerFactory.java | 51 ---- .../hdds/scm/server/report/package-info.java | 21 -- .../hadoop/ozone/scm/TestSCMMetrics.java | 253 ------------------ 16 files changed, 254 insertions(+), 1072 deletions(-) create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeContainerReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeContainerReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeNodeReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java new file mode 100644 index 0000000000..36f10a93dc --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.server; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; +import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.server.events.TypedEvent; + +import com.google.protobuf.GeneratedMessage; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class is responsible for dispatching heartbeat from datanode to + * appropriate EventHandler at SCM. + */ +public final class SCMDatanodeHeartbeatDispatcher { + + private static final Logger LOG = + LoggerFactory.getLogger(SCMDatanodeHeartbeatDispatcher.class); + + private EventPublisher eventPublisher; + + public static final TypedEvent NODE_REPORT = + new TypedEvent<>(NodeReportFromDatanode.class); + + public static final TypedEvent CONTAINER_REPORT = + new TypedEvent(ContainerReportFromDatanode.class); + + public SCMDatanodeHeartbeatDispatcher(EventPublisher eventPublisher) { + this.eventPublisher = eventPublisher; + } + + + /** + * Dispatches heartbeat to registered event handlers. + * + * @param heartbeat heartbeat to be dispatched. + */ + public void dispatch(SCMHeartbeatRequestProto heartbeat) { + DatanodeDetails datanodeDetails = + DatanodeDetails.getFromProtoBuf(heartbeat.getDatanodeDetails()); + + if (heartbeat.hasNodeReport()) { + eventPublisher.fireEvent(NODE_REPORT, + new NodeReportFromDatanode(datanodeDetails, + heartbeat.getNodeReport())); + } + + if (heartbeat.hasContainerReport()) { + eventPublisher.fireEvent(CONTAINER_REPORT, + new ContainerReportFromDatanode(datanodeDetails, + heartbeat.getContainerReport())); + + } + } + + /** + * Wrapper class for events with the datanode origin. + */ + public static class ReportFromDatanode { + + private final DatanodeDetails datanodeDetails; + + private final T report; + + public ReportFromDatanode(DatanodeDetails datanodeDetails, T report) { + this.datanodeDetails = datanodeDetails; + this.report = report; + } + + public DatanodeDetails getDatanodeDetails() { + return datanodeDetails; + } + + public T getReport() { + return report; + } + } + + /** + * Node report event payload with origin. + */ + public static class NodeReportFromDatanode + extends ReportFromDatanode { + + public NodeReportFromDatanode(DatanodeDetails datanodeDetails, + NodeReportProto report) { + super(datanodeDetails, report); + } + } + + /** + * Container report event payload with origin. + */ + public static class ContainerReportFromDatanode + extends ReportFromDatanode { + + public ContainerReportFromDatanode(DatanodeDetails datanodeDetails, + ContainerReportsProto report) { + super(datanodeDetails, report); + } + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index eb5ce1a827..56b07190a5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -73,7 +73,7 @@ import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.server.report.SCMDatanodeHeartbeatDispatcher; +import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; @@ -122,14 +122,19 @@ public class SCMDatanodeProtocolServer implements private final SCMDatanodeHeartbeatDispatcher heartbeatDispatcher; public SCMDatanodeProtocolServer(final OzoneConfiguration conf, - StorageContainerManager scm) throws IOException { + StorageContainerManager scm, EventPublisher eventPublisher) + throws IOException { Preconditions.checkNotNull(scm, "SCM cannot be null"); + Preconditions.checkNotNull(eventPublisher, "EventPublisher cannot be null"); + this.scm = scm; final int handlerCount = conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY, OZONE_SCM_HANDLER_COUNT_DEFAULT); + heartbeatDispatcher = new SCMDatanodeHeartbeatDispatcher(eventPublisher); + RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class, ProtobufRpcEngine.class); BlockingService dnProtoPbService = @@ -155,10 +160,6 @@ public SCMDatanodeProtocolServer(final OzoneConfiguration conf, conf, OZONE_SCM_DATANODE_ADDRESS_KEY, datanodeRpcAddr, datanodeRpcServer); - heartbeatDispatcher = SCMDatanodeHeartbeatDispatcher.newBuilder(conf, scm) - .addHandlerFor(NodeReportProto.class) - .addHandlerFor(ContainerReportsProto.class) - .build(); } public void start() { @@ -319,7 +320,6 @@ public void stop() { try { LOG.info("Stopping the RPC server for DataNodes"); datanodeRpcServer.stop(); - heartbeatDispatcher.shutdown(); } catch (Exception ex) { LOG.error(" datanodeRpcServer stop failed.", ex); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 5725d236ae..568a86ab4f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -52,7 +52,6 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.Storage.StorageState; import org.apache.hadoop.ozone.common.StorageInfo; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.StringUtils; @@ -182,7 +181,8 @@ private StorageContainerManager(OzoneConfiguration conf) throws IOException { scmAdminUsernames.add(scmUsername); } - datanodeProtocolServer = new SCMDatanodeProtocolServer(conf, this); + datanodeProtocolServer = new SCMDatanodeProtocolServer(conf, this, + eventQueue); blockProtocolServer = new SCMBlockProtocolServer(conf, this); clientProtocolServer = new SCMClientProtocolServer(conf, this); httpServer = new StorageContainerManagerHttpServer(conf); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeContainerReportHandler.java deleted file mode 100644 index 00ce94d7f5..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeContainerReportHandler.java +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Handler for Datanode Container Report. - */ -public class SCMDatanodeContainerReportHandler extends - SCMDatanodeReportHandler { - - private static final Logger LOG = LoggerFactory.getLogger( - SCMDatanodeContainerReportHandler.class); - - @Override - public void processReport(DatanodeDetails datanodeDetails, - ContainerReportsProto report) throws IOException { - LOG.trace("Processing container report from {}.", datanodeDetails); - updateContainerReportMetrics(datanodeDetails, report); - getSCM().getScmContainerManager() - .processContainerReports(datanodeDetails, report); - } - - /** - * Updates container report metrics in SCM. - * - * @param datanodeDetails Datanode Information - * @param reports Container Reports - */ - private void updateContainerReportMetrics(DatanodeDetails datanodeDetails, - ContainerReportsProto reports) { - ContainerStat newStat = new ContainerStat(); - for (StorageContainerDatanodeProtocolProtos.ContainerInfo info : reports - .getReportsList()) { - newStat.add(new ContainerStat(info.getSize(), info.getUsed(), - info.getKeyCount(), info.getReadBytes(), info.getWriteBytes(), - info.getReadCount(), info.getWriteCount())); - } - // update container metrics - StorageContainerManager.getMetrics().setLastContainerStat(newStat); - - // Update container stat entry, this will trigger a removal operation if it - // exists in cache. - String datanodeUuid = datanodeDetails.getUuidString(); - getSCM().getContainerReportCache().put(datanodeUuid, newStat); - // update global view container metrics - StorageContainerManager.getMetrics().incrContainerStat(newStat); - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java deleted file mode 100644 index d50edff7c5..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java +++ /dev/null @@ -1,189 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutorService; - -/** - * This class is responsible for dispatching heartbeat from datanode to - * appropriate ReportHandlers at SCM. - * Only one handler per report is supported now, it's very easy to support - * multiple handlers for a report. - */ -public final class SCMDatanodeHeartbeatDispatcher { - - private static final Logger LOG = LoggerFactory.getLogger( - SCMDatanodeHeartbeatDispatcher.class); - - /** - * This stores Report to Handler mapping. - */ - private final Map, - SCMDatanodeReportHandler> handlers; - - /** - * Executor service which will be used for processing reports. - */ - private final ExecutorService executorService; - - /** - * Constructs SCMDatanodeHeartbeatDispatcher instance with the given - * handlers. - * - * @param handlers report to report handler mapping - */ - private SCMDatanodeHeartbeatDispatcher(Map, - SCMDatanodeReportHandler> handlers) { - this.handlers = handlers; - this.executorService = HadoopExecutors.newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("SCMDatanode Heartbeat Dispatcher Thread - %d") - .build()); - } - - /** - * Dispatches heartbeat to registered handlers. - * - * @param heartbeat heartbeat to be dispatched. - */ - public void dispatch(SCMHeartbeatRequestProto heartbeat) { - DatanodeDetails datanodeDetails = DatanodeDetails - .getFromProtoBuf(heartbeat.getDatanodeDetails()); - if (heartbeat.hasNodeReport()) { - processReport(datanodeDetails, heartbeat.getNodeReport()); - } - if (heartbeat.hasContainerReport()) { - processReport(datanodeDetails, heartbeat.getContainerReport()); - } - } - - /** - * Invokes appropriate ReportHandler and submits the task to executor - * service for processing. - * - * @param datanodeDetails Datanode Information - * @param report Report to be processed - */ - @SuppressWarnings("unchecked") - private void processReport(DatanodeDetails datanodeDetails, - GeneratedMessage report) { - executorService.submit(() -> { - try { - SCMDatanodeReportHandler handler = handlers.get(report.getClass()); - handler.processReport(datanodeDetails, report); - } catch (IOException ex) { - LOG.error("Exception wile processing report {}, from {}", - report.getClass(), datanodeDetails, ex); - } - }); - } - - /** - * Shuts down SCMDatanodeHeartbeatDispatcher. - */ - public void shutdown() { - executorService.shutdown(); - } - - /** - * Returns a new Builder to construct {@link SCMDatanodeHeartbeatDispatcher}. - * - * @param conf Configuration to be used by SCMDatanodeHeartbeatDispatcher - * @param scm {@link StorageContainerManager} instance to be used by report - * handlers - * - * @return {@link SCMDatanodeHeartbeatDispatcher.Builder} instance - */ - public static Builder newBuilder(Configuration conf, - StorageContainerManager scm) { - return new Builder(conf, scm); - } - - /** - * Builder for SCMDatanodeHeartbeatDispatcher. - */ - public static class Builder { - - private final SCMDatanodeReportHandlerFactory reportHandlerFactory; - private final Map, - SCMDatanodeReportHandler> report2handler; - - /** - * Constructs SCMDatanodeHeartbeatDispatcher.Builder instance. - * - * @param conf Configuration object to be used. - * @param scm StorageContainerManager instance to be used for report - * handler initialization. - */ - private Builder(Configuration conf, StorageContainerManager scm) { - this.report2handler = new HashMap<>(); - this.reportHandlerFactory = - new SCMDatanodeReportHandlerFactory(conf, scm); - } - - /** - * Adds new report handler for the given report. - * - * @param report Report for which handler has to be added - * - * @return Builder - */ - public Builder addHandlerFor(Class report) { - report2handler.put(report, reportHandlerFactory.getHandlerFor(report)); - return this; - } - - /** - * Associates the given report handler for the given report. - * - * @param report Report to be associated with - * @param handler Handler to be used for the report - * - * @return Builder - */ - public Builder addHandler(Class report, - SCMDatanodeReportHandler handler) { - report2handler.put(report, handler); - return this; - } - - /** - * Builds and returns {@link SCMDatanodeHeartbeatDispatcher} instance. - * - * @return SCMDatanodeHeartbeatDispatcher - */ - public SCMDatanodeHeartbeatDispatcher build() { - return new SCMDatanodeHeartbeatDispatcher(report2handler); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java deleted file mode 100644 index fb89b02215..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Handles Datanode Node Report. - */ -public class SCMDatanodeNodeReportHandler extends - SCMDatanodeReportHandler { - - private static final Logger LOG = LoggerFactory.getLogger( - SCMDatanodeNodeReportHandler.class); - - @Override - public void processReport(DatanodeDetails datanodeDetails, - NodeReportProto report) throws IOException { - LOG.debug("Processing node report from {}.", datanodeDetails); - //TODO: add logic to process node report. - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java deleted file mode 100644 index d3386493c1..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; - -import java.io.IOException; - -/** - * Datanode Report handlers should implement this interface in order to get - * call back whenever the report is received from datanode. - * - * @param Type of report the handler is interested in. - */ -public abstract class SCMDatanodeReportHandler - implements Configurable { - - private Configuration config; - private StorageContainerManager scm; - - /** - * Initializes SCMDatanodeReportHandler and associates it with the given - * StorageContainerManager instance. - * - * @param storageContainerManager StorageContainerManager instance to be - * associated with. - */ - public void init(StorageContainerManager storageContainerManager) { - this.scm = storageContainerManager; - } - - /** - * Returns the associated StorageContainerManager instance. This will be - * used by the ReportHandler implementations. - * - * @return {@link StorageContainerManager} - */ - protected StorageContainerManager getSCM() { - return scm; - } - - @Override - public void setConf(Configuration conf) { - this.config = conf; - } - - @Override - public Configuration getConf() { - return config; - } - - /** - * Processes the report received from datanode. Each ReportHandler - * implementation is responsible for providing the logic to process the - * report it's interested in. - * - * @param datanodeDetails Datanode Information - * @param report Report to be processed - * - * @throws IOException In case of any exception - */ - abstract void processReport(DatanodeDetails datanodeDetails, T report) - throws IOException; -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java deleted file mode 100644 index e88495fc23..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.util.ReflectionUtils; - -import java.util.HashMap; -import java.util.Map; - - -/** - * Factory class to construct {@link SCMDatanodeReportHandler} given a report. - */ -public class SCMDatanodeReportHandlerFactory { - - private final Configuration conf; - private final StorageContainerManager scm; - private final Map, - Class>> - report2handler; - - /** - * Constructs {@link SCMDatanodeReportHandler} instance. - * - * @param conf Configuration to be passed to the - * {@link SCMDatanodeReportHandler} - */ - public SCMDatanodeReportHandlerFactory(Configuration conf, - StorageContainerManager scm) { - this.conf = conf; - this.scm = scm; - this.report2handler = new HashMap<>(); - - report2handler.put(NodeReportProto.class, - SCMDatanodeNodeReportHandler.class); - report2handler.put(ContainerReportsProto.class, - SCMDatanodeContainerReportHandler.class); - } - - /** - * Returns the SCMDatanodeReportHandler for the corresponding report. - * - * @param report report - * - * @return report handler - */ - public SCMDatanodeReportHandler getHandlerFor( - Class report) { - Class> - handlerClass = report2handler.get(report); - if (handlerClass == null) { - throw new RuntimeException("No handler found for report " + report); - } - SCMDatanodeReportHandler instance = - ReflectionUtils.newInstance(handlerClass, conf); - instance.init(scm); - return instance; - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java deleted file mode 100644 index fda3993096..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.server.report; -/** - * Handling of all the datanode reports in SCM which are received through - * heartbeat is done here. - * - * SCM Datanode Report Processing State Diagram: - * - * SCMDatanode SCMDatanodeHeartbeat SCMDatanodeReport - * ProtocolServer Dispatcher Handler - * | | | - * | | | - * | construct | | - * |----------------------->| | - * | | | - * | | register | - * | |<-----------------------| - * | | | - * +------------+------------------------+------------------------+--------+ - * | loop | | | | - * | | | | | - * | | | | | - * | heartbeat | | | | - * - +----------->| | | | - * | from | heartbeat | | | - * | Datanode |----------------------->| | | - * | | | report | | - * | | |----------------------->| | - * | | | | | - * | DN | | | | - * <-+------------| | | | - * | commands | | | | - * | | | | | - * +------------+------------------------+------------------------+--------+ - * | | | - * | | | - * | shutdown | | - * |----------------------->| | - * | | | - * | | | - * - - - - */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java new file mode 100644 index 0000000000..326a34b792 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java @@ -0,0 +1,119 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.server; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher + .ContainerReportFromDatanode; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher + .NodeReportFromDatanode; +import org.apache.hadoop.hdds.server.events.Event; +import org.apache.hadoop.hdds.server.events.EventPublisher; + +import org.junit.Assert; +import org.junit.Test; + +/** + * This class tests the behavior of SCMDatanodeHeartbeatDispatcher. + */ +public class TestSCMDatanodeHeartbeatDispatcher { + + + @Test + public void testNodeReportDispatcher() throws IOException { + + Configuration conf = new OzoneConfiguration(); + + AtomicInteger eventReceived = new AtomicInteger(); + + NodeReportProto nodeReport = NodeReportProto.getDefaultInstance(); + + SCMDatanodeHeartbeatDispatcher dispatcher = + new SCMDatanodeHeartbeatDispatcher(new EventPublisher() { + @Override + public > void fireEvent( + EVENT_TYPE event, PAYLOAD payload) { + Assert.assertEquals(event, + SCMDatanodeHeartbeatDispatcher.NODE_REPORT); + eventReceived.incrementAndGet(); + Assert.assertEquals(nodeReport, ((NodeReportFromDatanode)payload).getReport()); + + } + }); + + DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); + + SCMHeartbeatRequestProto heartbeat = + SCMHeartbeatRequestProto.newBuilder() + .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) + .setNodeReport(nodeReport) + .build(); + dispatcher.dispatch(heartbeat); + Assert.assertEquals(1, eventReceived.get()); + + + } + + @Test + public void testContainerReportDispatcher() throws IOException { + + Configuration conf = new OzoneConfiguration(); + + AtomicInteger eventReceived = new AtomicInteger(); + + ContainerReportsProto containerReport = + ContainerReportsProto.getDefaultInstance(); + + SCMDatanodeHeartbeatDispatcher dispatcher = + new SCMDatanodeHeartbeatDispatcher(new EventPublisher() { + @Override + public > void fireEvent( + EVENT_TYPE event, PAYLOAD payload) { + Assert.assertEquals(event, + SCMDatanodeHeartbeatDispatcher.CONTAINER_REPORT); + Assert.assertEquals(containerReport, ((ContainerReportFromDatanode)payload).getReport()); + eventReceived.incrementAndGet(); + } + }); + + DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); + + SCMHeartbeatRequestProto heartbeat = + SCMHeartbeatRequestProto.newBuilder() + .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) + .setContainerReport(containerReport) + .build(); + dispatcher.dispatch(heartbeat); + Assert.assertEquals(1, eventReceived.get()); + + + } + +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeContainerReportHandler.java deleted file mode 100644 index 776ae88754..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeContainerReportHandler.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test cases to verify SCMDatanodeContainerReportHandler's behavior. - */ -public class TestSCMDatanodeContainerReportHandler { - - //TODO: add test cases to verify SCMDatanodeContainerReportHandler. - - @Test - public void dummyTest() { - Assert.assertTrue(true); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java deleted file mode 100644 index 5d086471c1..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; - -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -/** - * This class tests the behavior of SCMDatanodeHeartbeatDispatcher. - */ -public class TestSCMDatanodeHeartbeatDispatcher { - - @Test - public void testSCMDatanodeHeartbeatDispatcherBuilder() { - Configuration conf = new OzoneConfiguration(); - SCMDatanodeHeartbeatDispatcher dispatcher = - SCMDatanodeHeartbeatDispatcher.newBuilder(conf, null) - .addHandlerFor(NodeReportProto.class) - .addHandlerFor(ContainerReportsProto.class) - .build(); - Assert.assertNotNull(dispatcher); - } - - @Test - public void testNodeReportDispatcher() throws IOException { - Configuration conf = new OzoneConfiguration(); - SCMDatanodeNodeReportHandler nodeReportHandler = - Mockito.mock(SCMDatanodeNodeReportHandler.class); - SCMDatanodeHeartbeatDispatcher dispatcher = - SCMDatanodeHeartbeatDispatcher.newBuilder(conf, null) - .addHandler(NodeReportProto.class, nodeReportHandler) - .build(); - - DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); - NodeReportProto nodeReport = NodeReportProto.getDefaultInstance(); - SCMHeartbeatRequestProto heartbeat = - SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) - .setNodeReport(nodeReport) - .build(); - dispatcher.dispatch(heartbeat); - verify(nodeReportHandler, - times(1)) - .processReport(any(DatanodeDetails.class), eq(nodeReport)); - } - - @Test - public void testContainerReportDispatcher() throws IOException { - Configuration conf = new OzoneConfiguration(); - SCMDatanodeContainerReportHandler containerReportHandler = - Mockito.mock(SCMDatanodeContainerReportHandler.class); - SCMDatanodeHeartbeatDispatcher dispatcher = - SCMDatanodeHeartbeatDispatcher.newBuilder(conf, null) - .addHandler(ContainerReportsProto.class, containerReportHandler) - .build(); - - DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); - ContainerReportsProto containerReport = - ContainerReportsProto.getDefaultInstance(); - SCMHeartbeatRequestProto heartbeat = - SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) - .setContainerReport(containerReport) - .build(); - dispatcher.dispatch(heartbeat); - verify(containerReportHandler, - times(1)) - .processReport(any(DatanodeDetails.class), - any(ContainerReportsProto.class)); - } - - @Test - public void testNodeAndContainerReportDispatcher() throws IOException { - Configuration conf = new OzoneConfiguration(); - SCMDatanodeNodeReportHandler nodeReportHandler = - Mockito.mock(SCMDatanodeNodeReportHandler.class); - SCMDatanodeContainerReportHandler containerReportHandler = - Mockito.mock(SCMDatanodeContainerReportHandler.class); - SCMDatanodeHeartbeatDispatcher dispatcher = - SCMDatanodeHeartbeatDispatcher.newBuilder(conf, null) - .addHandler(NodeReportProto.class, nodeReportHandler) - .addHandler(ContainerReportsProto.class, containerReportHandler) - .build(); - - DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); - NodeReportProto nodeReport = NodeReportProto.getDefaultInstance(); - ContainerReportsProto containerReport = - ContainerReportsProto.getDefaultInstance(); - SCMHeartbeatRequestProto heartbeat = - SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) - .setNodeReport(nodeReport) - .setContainerReport(containerReport) - .build(); - dispatcher.dispatch(heartbeat); - verify(nodeReportHandler, - times(1)) - .processReport(any(DatanodeDetails.class), any(NodeReportProto.class)); - verify(containerReportHandler, - times(1)) - .processReport(any(DatanodeDetails.class), - any(ContainerReportsProto.class)); - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeNodeReportHandler.java deleted file mode 100644 index 30a753c024..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeNodeReportHandler.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test cases to verify TestSCMDatanodeNodeReportHandler's behavior. - */ -public class TestSCMDatanodeNodeReportHandler { - - - //TODO: add test cases to verify SCMDatanodeNodeReportHandler. - - @Test - public void dummyTest() { - Assert.assertTrue(true); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java deleted file mode 100644 index 4b918f76c7..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.junit.Assert; -import org.junit.Test; - -/** - * Test cases to verify the functionality of SCMDatanodeReportHandlerFactory. - */ -public class TestSCMDatanodeReportHandlerFactory { - - @Test - public void testNodeReportHandlerConstruction() { - Configuration conf = new OzoneConfiguration(); - SCMDatanodeReportHandlerFactory factory = - new SCMDatanodeReportHandlerFactory(conf, null); - Assert.assertTrue(factory.getHandlerFor(NodeReportProto.class) - instanceof SCMDatanodeNodeReportHandler); - } - - @Test - public void testContainerReporttHandlerConstruction() { - Configuration conf = new OzoneConfiguration(); - SCMDatanodeReportHandlerFactory factory = - new SCMDatanodeReportHandlerFactory(conf, null); - Assert.assertTrue(factory.getHandlerFor(ContainerReportsProto.class) - instanceof SCMDatanodeContainerReportHandler); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java deleted file mode 100644 index 4a3f59f016..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; -/** - * Contains test-cases to test Datanode report handlers in SCM. - */ \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java deleted file mode 100644 index ecddf8eaca..0000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java +++ /dev/null @@ -1,253 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.scm; - -import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; -import static org.apache.hadoop.test.MetricsAsserts.getLongGauge; -import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.Assert.assertEquals; - -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.server.report - .SCMDatanodeContainerReportHandler; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ContainerReport; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics; -import org.apache.hadoop.hdds.scm.node.SCMNodeManager; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -/** - * This class tests the metrics of Storage Container Manager. - */ -public class TestSCMMetrics { - /** - * Set the timeout for each test. - */ - @Rule - public Timeout testTimeout = new Timeout(90000); - - private static MiniOzoneCluster cluster = null; - - @Test - public void testContainerMetrics() throws Exception { - int nodeCount = 2; - int numReport = 2; - long size = OzoneConsts.GB * 5; - long used = OzoneConsts.GB * 2; - long readBytes = OzoneConsts.GB * 1; - long writeBytes = OzoneConsts.GB * 2; - int keyCount = 1000; - int readCount = 100; - int writeCount = 50; - OzoneConfiguration conf = new OzoneConfiguration(); - - try { - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(nodeCount).build(); - cluster.waitForClusterToBeReady(); - - ContainerStat stat = new ContainerStat(size, used, keyCount, readBytes, - writeBytes, readCount, writeCount); - StorageContainerManager scmManager = cluster.getStorageContainerManager(); - DatanodeDetails fstDatanodeDetails = TestUtils.getDatanodeDetails(); - ContainerReportsProto request = createContainerReport(numReport, stat); - String fstDatanodeUuid = fstDatanodeDetails.getUuidString(); - SCMDatanodeContainerReportHandler containerReportHandler = - new SCMDatanodeContainerReportHandler(); - containerReportHandler.setConf(conf); - containerReportHandler.init(scmManager); - containerReportHandler.processReport( - fstDatanodeDetails, request); - - // verify container stat metrics - MetricsRecordBuilder scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME); - assertEquals(size * numReport, - getLongGauge("LastContainerReportSize", scmMetrics)); - assertEquals(used * numReport, - getLongGauge("LastContainerReportUsed", scmMetrics)); - assertEquals(readBytes * numReport, - getLongGauge("LastContainerReportReadBytes", scmMetrics)); - assertEquals(writeBytes * numReport, - getLongGauge("LastContainerReportWriteBytes", scmMetrics)); - - assertEquals(keyCount * numReport, - getLongGauge("LastContainerReportKeyCount", scmMetrics)); - assertEquals(readCount * numReport, - getLongGauge("LastContainerReportReadCount", scmMetrics)); - assertEquals(writeCount * numReport, - getLongGauge("LastContainerReportWriteCount", scmMetrics)); - - // add one new report - DatanodeDetails sndDatanodeDetails = TestUtils.getDatanodeDetails(); - request = createContainerReport(1, stat); - String sndDatanodeUuid = sndDatanodeDetails.getUuidString(); - containerReportHandler.processReport( - sndDatanodeDetails, request); - - scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME); - assertEquals(size * (numReport + 1), - getLongCounter("ContainerReportSize", scmMetrics)); - assertEquals(used * (numReport + 1), - getLongCounter("ContainerReportUsed", scmMetrics)); - assertEquals(readBytes * (numReport + 1), - getLongCounter("ContainerReportReadBytes", scmMetrics)); - assertEquals(writeBytes * (numReport + 1), - getLongCounter("ContainerReportWriteBytes", scmMetrics)); - - assertEquals(keyCount * (numReport + 1), - getLongCounter("ContainerReportKeyCount", scmMetrics)); - assertEquals(readCount * (numReport + 1), - getLongCounter("ContainerReportReadCount", scmMetrics)); - assertEquals(writeCount * (numReport + 1), - getLongCounter("ContainerReportWriteCount", scmMetrics)); - - // Re-send reports but with different value for validating - // the aggregation. - stat = new ContainerStat(100, 50, 3, 50, 60, 5, 6); - containerReportHandler.processReport( - fstDatanodeDetails, createContainerReport(1, stat)); - - stat = new ContainerStat(1, 1, 1, 1, 1, 1, 1); - containerReportHandler.processReport( - sndDatanodeDetails, createContainerReport(1, stat)); - - // the global container metrics value should be updated - scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME); - assertEquals(101, getLongCounter("ContainerReportSize", scmMetrics)); - assertEquals(51, getLongCounter("ContainerReportUsed", scmMetrics)); - assertEquals(51, getLongCounter("ContainerReportReadBytes", scmMetrics)); - assertEquals(61, getLongCounter("ContainerReportWriteBytes", scmMetrics)); - - assertEquals(4, getLongCounter("ContainerReportKeyCount", scmMetrics)); - assertEquals(6, getLongCounter("ContainerReportReadCount", scmMetrics)); - assertEquals(7, getLongCounter("ContainerReportWriteCount", scmMetrics)); - } finally { - if (cluster != null) { - cluster.shutdown(); - } - } - } - - @Test - public void testStaleNodeContainerReport() throws Exception { - int nodeCount = 2; - int numReport = 2; - long size = OzoneConsts.GB * 5; - long used = OzoneConsts.GB * 2; - long readBytes = OzoneConsts.GB * 1; - long writeBytes = OzoneConsts.GB * 2; - int keyCount = 1000; - int readCount = 100; - int writeCount = 50; - OzoneConfiguration conf = new OzoneConfiguration(); - - try { - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(nodeCount).build(); - cluster.waitForClusterToBeReady(); - - ContainerStat stat = new ContainerStat(size, used, keyCount, readBytes, - writeBytes, readCount, writeCount); - StorageContainerManager scmManager = cluster.getStorageContainerManager(); - - DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0) - .getDatanodeDetails(); - SCMDatanodeContainerReportHandler containerReportHandler = - new SCMDatanodeContainerReportHandler(); - containerReportHandler.setConf(conf); - containerReportHandler.init(scmManager); - ContainerReportsProto request = createContainerReport(numReport, stat); - containerReportHandler.processReport( - datanodeDetails, request); - - MetricsRecordBuilder scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME); - assertEquals(size * numReport, - getLongCounter("ContainerReportSize", scmMetrics)); - assertEquals(used * numReport, - getLongCounter("ContainerReportUsed", scmMetrics)); - assertEquals(readBytes * numReport, - getLongCounter("ContainerReportReadBytes", scmMetrics)); - assertEquals(writeBytes * numReport, - getLongCounter("ContainerReportWriteBytes", scmMetrics)); - - assertEquals(keyCount * numReport, - getLongCounter("ContainerReportKeyCount", scmMetrics)); - assertEquals(readCount * numReport, - getLongCounter("ContainerReportReadCount", scmMetrics)); - assertEquals(writeCount * numReport, - getLongCounter("ContainerReportWriteCount", scmMetrics)); - - // reset stale interval time to move node from healthy to stale - SCMNodeManager nodeManager = (SCMNodeManager) cluster - .getStorageContainerManager().getScmNodeManager(); - nodeManager.setStaleNodeIntervalMs(100); - - // verify the metrics when node becomes stale - GenericTestUtils.waitFor(() -> { - MetricsRecordBuilder metrics = getMetrics(SCMMetrics.SOURCE_NAME); - return 0 == getLongCounter("ContainerReportSize", metrics) - && 0 == getLongCounter("ContainerReportUsed", metrics) - && 0 == getLongCounter("ContainerReportReadBytes", metrics) - && 0 == getLongCounter("ContainerReportWriteBytes", metrics) - && 0 == getLongCounter("ContainerReportKeyCount", metrics) - && 0 == getLongCounter("ContainerReportReadCount", metrics) - && 0 == getLongCounter("ContainerReportWriteCount", metrics); - }, 1000, 60000); - } finally { - if (cluster != null) { - cluster.shutdown(); - } - } - } - - private ContainerReportsProto createContainerReport(int numReport, - ContainerStat stat) { - StorageContainerDatanodeProtocolProtos.ContainerReportsProto.Builder - reportsBuilder = StorageContainerDatanodeProtocolProtos - .ContainerReportsProto.newBuilder(); - - for (int i = 0; i < numReport; i++) { - ContainerReport report = new ContainerReport( - RandomUtils.nextLong(), DigestUtils.sha256Hex("Simulated")); - report.setSize(stat.getSize().get()); - report.setBytesUsed(stat.getUsed().get()); - report.setReadCount(stat.getReadCount().get()); - report.setReadBytes(stat.getReadBytes().get()); - report.setKeyCount(stat.getKeyCount().get()); - report.setWriteCount(stat.getWriteCount().get()); - report.setWriteBytes(stat.getWriteBytes().get()); - reportsBuilder.addReports(report.getProtoBufMessage()); - } - return reportsBuilder.build(); - } -} From 2b2399d623539ab68e71a38fa9fbfc9a405bddb8 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 28 Jun 2018 14:29:40 +0900 Subject: [PATCH 67/70] HADOOP-15495. Upgrade commons-lang version to 3.7 in hadoop-common-project and hadoop-tools. Contributed by Takanobu Asanuma. --- .../hadoop-client-minicluster/pom.xml | 8 --- hadoop-common-project/hadoop-common/pom.xml | 5 -- .../hadoop/conf/ReconfigurationServlet.java | 42 ++++++------ .../apache/hadoop/crypto/key/KeyProvider.java | 4 +- .../apache/hadoop/crypto/key/KeyShell.java | 2 +- .../java/org/apache/hadoop/fs/FsShell.java | 3 +- .../main/java/org/apache/hadoop/fs/Path.java | 2 +- .../org/apache/hadoop/fs/shell/Count.java | 2 +- .../hadoop/io/ElasticByteBufferPool.java | 2 +- .../hadoop/io/erasurecode/ECSchema.java | 4 +- .../nativeio/SharedFileDescriptorFactory.java | 2 +- .../org/apache/hadoop/ipc/CallerContext.java | 4 +- .../apache/hadoop/ipc/DecayRpcScheduler.java | 4 +- .../org/apache/hadoop/ipc/FairCallQueue.java | 4 +- .../hadoop/metrics2/MetricsJsonBuilder.java | 2 +- .../hadoop/metrics2/lib/MethodMetric.java | 2 +- .../metrics2/lib/MutableMetricsFactory.java | 2 +- .../hadoop/metrics2/lib/MutableQuantiles.java | 2 +- .../metrics2/lib/MutableRollingAverages.java | 2 +- .../hadoop/metrics2/lib/MutableStat.java | 2 +- .../metrics2/sink/RollingFileSystemSink.java | 2 +- .../org/apache/hadoop/net/TableMapping.java | 2 +- .../apache/hadoop/net/unix/DomainSocket.java | 2 +- .../hadoop/net/unix/DomainSocketWatcher.java | 2 +- .../security/ShellBasedUnixGroupsMapping.java | 2 +- .../security/alias/CredentialShell.java | 2 +- .../security/http/CrossOriginFilter.java | 2 +- .../security/token/DtFileOperations.java | 2 +- .../org/apache/hadoop/tools/TableListing.java | 6 +- .../org/apache/hadoop/util/StringUtils.java | 64 ++++++++++++++++++- .../apache/hadoop/conf/TestConfiguration.java | 2 +- .../conf/TestConfigurationFieldsBase.java | 2 +- .../crypto/random/TestOsSecureRandom.java | 2 +- .../org/apache/hadoop/fs/FSTestWrapper.java | 2 +- .../hadoop/fs/FileContextTestHelper.java | 1 - .../hadoop/fs/TestDFCachingGetSpaceUsed.java | 2 +- .../fs/TestFileSystemStorageStatistics.java | 18 +++--- .../hadoop/fs/shell/TestCopyFromLocal.java | 10 +-- .../TestSharedFileDescriptorFactory.java | 2 +- .../apache/hadoop/ipc/TestProtoBufRpc.java | 2 +- .../org/apache/hadoop/ipc/TestSaslRPC.java | 2 +- .../org/apache/hadoop/net/TestNetUtils.java | 2 +- .../hadoop/net/unix/TestDomainSocket.java | 2 +- ...onTokenAuthenticationHandlerWithMocks.java | 2 +- .../AbstractServiceLauncherTestBase.java | 2 +- .../apache/hadoop/test/GenericTestUtils.java | 4 +- .../hadoop/util/TestShutdownHookManager.java | 2 +- .../apache/hadoop/util/TestSignalLogger.java | 2 +- .../server/namenode/FSNamesystemLock.java | 2 +- hadoop-project/pom.xml | 5 -- .../aliyun/oss/AliyunCredentialsProvider.java | 2 +- .../fs/aliyun/oss/AliyunOSSFileSystem.java | 2 +- .../aliyun/oss/AliyunOSSFileSystemStore.java | 2 +- .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java | 2 +- .../fs/aliyun/oss/AliyunOSSTestUtils.java | 2 +- .../fs/s3a/AWSCredentialProviderList.java | 2 +- .../fs/s3a/BasicAWSCredentialsProvider.java | 2 +- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 4 +- .../apache/hadoop/fs/s3a/S3AInputStream.java | 2 +- .../org/apache/hadoop/fs/s3a/S3AUtils.java | 2 +- .../fs/s3a/SimpleAWSCredentialsProvider.java | 2 +- .../s3a/TemporaryAWSCredentialsProvider.java | 2 +- .../auth/AssumedRoleCredentialProvider.java | 2 +- .../s3a/commit/files/SinglePendingCommit.java | 2 +- .../fs/s3a/commit/files/SuccessData.java | 2 +- .../s3a/commit/magic/MagicCommitTracker.java | 2 +- .../hadoop/fs/s3a/commit/staging/Paths.java | 2 +- .../fs/s3a/s3guard/DynamoDBClientFactory.java | 2 +- .../fs/s3a/s3guard/DynamoDBMetadataStore.java | 2 +- .../fs/s3a/s3guard/LocalMetadataStore.java | 2 +- .../hadoop/fs/s3a/s3guard/S3GuardTool.java | 2 +- .../hadoop/fs/s3native/S3xLoginHelper.java | 4 +- .../hadoop/fs/s3a/ITestS3AConfiguration.java | 4 +- ...TestS3AEncryptionSSEKMSUserDefinedKey.java | 2 +- ...SSEKMSUserDefinedKeyBlockOutputStream.java | 2 +- .../apache/hadoop/fs/s3a/S3ATestUtils.java | 2 +- .../fs/s3a/commit/AbstractCommitITest.java | 2 +- .../apache/hadoop/fs/adl/AdlFileSystem.java | 2 +- .../fs/adl/TestAzureADTokenProvider.java | 2 +- .../fs/azure/AzureNativeFileSystemStore.java | 2 +- .../fs/azure/BlockBlobAppendStream.java | 2 +- .../fs/azure/ClientThrottlingAnalyzer.java | 2 +- .../fs/azure/NativeAzureFileSystem.java | 2 +- .../hadoop/fs/azure/PageBlobOutputStream.java | 2 +- .../fs/azure/RemoteWasbAuthorizerImpl.java | 2 +- .../fs/azure/SecureWasbRemoteCallHelper.java | 2 +- .../fs/azure/ITestBlobDataValidation.java | 2 +- .../ITestNativeAzureFileSystemAppend.java | 2 +- .../hadoop/fs/azure/MockStorageInterface.java | 12 ++-- ...tNativeAzureFileSystemBlockCompaction.java | 2 +- .../fs/azure/integration/AzureTestUtils.java | 2 +- .../apache/hadoop/tools/DistCpOptions.java | 2 +- .../apache/hadoop/tools/OptionsParser.java | 2 +- .../hadoop/tools/mapred/CopyMapper.java | 2 +- .../mapred/gridmix/ClusterSummarizer.java | 2 +- .../mapred/gridmix/ExecutionSummarizer.java | 4 +- .../hadoop/mapred/gridmix/JobFactory.java | 2 +- .../gridmix/RandomTextDataGenerator.java | 2 +- .../translator/impl/BaseLogParser.java | 1 - .../WordListAnonymizerUtility.java | 2 +- .../util/MapReduceJobPropertiesParser.java | 2 +- .../hadoop/streaming/TestUnconsumedInput.java | 2 +- 102 files changed, 207 insertions(+), 168 deletions(-) diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml index b9363de569..6fa24b49e5 100644 --- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml +++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml @@ -166,10 +166,6 @@ commons-io commons-io - - commons-lang - commons-lang - commons-logging commons-logging @@ -495,10 +491,6 @@ commons-codec commons-codec - - commons-lang - commons-lang - commons-logging commons-logging diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 1a16dc48fb..67a5a54839 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -156,11 +156,6 @@ junit test - - commons-lang - commons-lang - compile - commons-beanutils commons-beanutils diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java index 5a616f72b9..c5bdf4e021 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java @@ -18,7 +18,7 @@ package org.apache.hadoop.conf; -import org.apache.commons.lang.StringEscapeUtils; +import org.apache.commons.lang3.StringEscapeUtils; import java.util.Collection; import java.util.Enumeration; @@ -72,10 +72,10 @@ private Reconfigurable getReconfigurable(HttpServletRequest req) { private void printHeader(PrintWriter out, String nodeName) { out.print(""); out.printf("%s Reconfiguration Utility%n", - StringEscapeUtils.escapeHtml(nodeName)); + StringEscapeUtils.escapeHtml4(nodeName)); out.print("\n"); out.printf("

    %s Reconfiguration Utility

    %n", - StringEscapeUtils.escapeHtml(nodeName)); + StringEscapeUtils.escapeHtml4(nodeName)); } private void printFooter(PrintWriter out) { @@ -103,20 +103,20 @@ private void printConf(PrintWriter out, Reconfigurable reconf) { out.print(""); if (!reconf.isPropertyReconfigurable(c.prop)) { out.print("" + - StringEscapeUtils.escapeHtml(c.prop) + ""); + StringEscapeUtils.escapeHtml4(c.prop) + ""); changeOK = false; } else { - out.print(StringEscapeUtils.escapeHtml(c.prop)); + out.print(StringEscapeUtils.escapeHtml4(c.prop)); out.print(""); + StringEscapeUtils.escapeHtml4(c.prop) + "\" value=\"" + + StringEscapeUtils.escapeHtml4(c.newVal) + "\"/>"); } out.print("" + (c.oldVal == null ? "default" : - StringEscapeUtils.escapeHtml(c.oldVal)) + + StringEscapeUtils.escapeHtml4(c.oldVal)) + "" + (c.newVal == null ? "default" : - StringEscapeUtils.escapeHtml(c.newVal)) + + StringEscapeUtils.escapeHtml4(c.newVal)) + ""); out.print("\n"); } @@ -147,9 +147,9 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf, synchronized(oldConf) { while (params.hasMoreElements()) { String rawParam = params.nextElement(); - String param = StringEscapeUtils.unescapeHtml(rawParam); + String param = StringEscapeUtils.unescapeHtml4(rawParam); String value = - StringEscapeUtils.unescapeHtml(req.getParameter(rawParam)); + StringEscapeUtils.unescapeHtml4(req.getParameter(rawParam)); if (value != null) { if (value.equals(newConf.getRaw(param)) || value.equals("default") || value.equals("null") || value.isEmpty()) { @@ -157,8 +157,8 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf, value.isEmpty()) && oldConf.getRaw(param) != null) { out.println("

    Changed \"" + - StringEscapeUtils.escapeHtml(param) + "\" from \"" + - StringEscapeUtils.escapeHtml(oldConf.getRaw(param)) + + StringEscapeUtils.escapeHtml4(param) + "\" from \"" + + StringEscapeUtils.escapeHtml4(oldConf.getRaw(param)) + "\" to default

    "); reconf.reconfigureProperty(param, null); } else if (!value.equals("default") && !value.equals("null") && @@ -168,16 +168,16 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf, // change from default or value to different value if (oldConf.getRaw(param) == null) { out.println("

    Changed \"" + - StringEscapeUtils.escapeHtml(param) + + StringEscapeUtils.escapeHtml4(param) + "\" from default to \"" + - StringEscapeUtils.escapeHtml(value) + "\"

    "); + StringEscapeUtils.escapeHtml4(value) + "\"

    "); } else { out.println("

    Changed \"" + - StringEscapeUtils.escapeHtml(param) + "\" from \"" + - StringEscapeUtils.escapeHtml(oldConf. + StringEscapeUtils.escapeHtml4(param) + "\" from \"" + + StringEscapeUtils.escapeHtml4(oldConf. getRaw(param)) + "\" to \"" + - StringEscapeUtils.escapeHtml(value) + "\"

    "); + StringEscapeUtils.escapeHtml4(value) + "\"

    "); } reconf.reconfigureProperty(param, value); } else { @@ -185,10 +185,10 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf, } } else { // parameter value != newConf value - out.println("

    \"" + StringEscapeUtils.escapeHtml(param) + + out.println("

    \"" + StringEscapeUtils.escapeHtml4(param) + "\" not changed because value has changed from \"" + - StringEscapeUtils.escapeHtml(value) + "\" to \"" + - StringEscapeUtils.escapeHtml(newConf.getRaw(param)) + + StringEscapeUtils.escapeHtml4(value) + "\" to \"" + + StringEscapeUtils.escapeHtml4(newConf.getRaw(param)) + "\" since approval

    "); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java index 050540b4cb..286312ce5e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java @@ -33,8 +33,8 @@ import com.google.gson.stream.JsonReader; import com.google.gson.stream.JsonWriter; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java index 9fdf242fd5..fa84c47d26 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java @@ -27,7 +27,7 @@ import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider.Metadata; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java index 94d3389408..5be6e5f829 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java @@ -23,7 +23,6 @@ import java.util.Arrays; import java.util.LinkedList; -import org.apache.commons.lang.WordUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -275,7 +274,7 @@ private void printInstanceHelp(PrintStream out, Command instance) { listing = null; } - for (String descLine : WordUtils.wrap( + for (String descLine : StringUtils.wrap( line, MAX_LINE_WIDTH, "\n", true).split("\n")) { out.println(prefix + descLine); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java index 252b3cca79..b6244d6a36 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java @@ -27,7 +27,7 @@ import java.util.regex.Pattern; import org.apache.avro.reflect.Stringable; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java index 8f6fc4d570..011e489df2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java @@ -23,7 +23,7 @@ import java.util.LinkedList; import java.util.List; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java index 9dd7771fd5..bbedf2a2dc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java @@ -18,7 +18,7 @@ package org.apache.hadoop.io; import com.google.common.collect.ComparisonChain; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import java.nio.ByteBuffer; import java.util.Map; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java index f008e24d2f..0f95058afc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java @@ -22,8 +22,8 @@ import java.util.HashMap; import java.util.Map; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java index 412634462a..4d820c271a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.io.FileDescriptor; -import org.apache.commons.lang.SystemUtils; +import org.apache.commons.lang3.SystemUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java index bdfa471f53..b156d1fe64 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.ipc; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java index 0a00ca73d9..f12ecb6462 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java @@ -39,7 +39,7 @@ import com.fasterxml.jackson.databind.ObjectWriter; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.AtomicDoubleArray; -import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.metrics2.MetricsCollector; @@ -429,7 +429,7 @@ private void decayCurrentCounts() { updateAverageResponseTime(true); } catch (Exception ex) { LOG.error("decayCurrentCounts exception: " + - ExceptionUtils.getFullStackTrace(ex)); + ExceptionUtils.getStackTrace(ex)); throw ex; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java index 6d9ea3e72e..3a8c83dea7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java @@ -32,7 +32,7 @@ import java.util.concurrent.atomic.AtomicLong; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.NotImplementedException; +import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException; import org.apache.hadoop.metrics2.util.MBeans; @@ -286,7 +286,7 @@ public int size() { */ @Override public Iterator iterator() { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java index ce6fbe1d82..1d62c0a29f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2; -import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.codehaus.jackson.map.ObjectMapper; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java index 3d7a90e7ee..9b54adcb43 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java @@ -21,7 +21,7 @@ import java.lang.reflect.Method; import static com.google.common.base.Preconditions.*; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsRecordBuilder; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java index b2042e7a12..a3ca98d040 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java @@ -21,7 +21,7 @@ import java.lang.reflect.Field; import java.lang.reflect.Method; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java index cc32975513..6b30618475 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java @@ -26,7 +26,7 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java index 053cb5535c..22c288a3b1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java @@ -32,7 +32,7 @@ import java.util.function.Function; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java index 92fe3d1496..5ef31785a6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.lib; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java index 0f6e9a9172..92ac9529be 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java @@ -37,7 +37,7 @@ import java.util.regex.Pattern; import org.apache.commons.configuration2.SubsetConfiguration; -import org.apache.commons.lang.time.FastDateFormat; +import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java index ead9a7430b..45759df6ad 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java @@ -28,7 +28,7 @@ import java.util.List; import java.util.Map; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java index ac118c0517..9693220438 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java @@ -28,7 +28,7 @@ import java.nio.channels.ReadableByteChannel; import java.nio.ByteBuffer; -import org.apache.commons.lang.SystemUtils; +import org.apache.commons.lang3.SystemUtils; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.CloseableReferenceCount; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java index c7af97f60a..e36399ff96 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java @@ -32,7 +32,7 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; -import org.apache.commons.lang.SystemUtils; +import org.apache.commons.lang3.SystemUtils; import org.apache.hadoop.util.NativeCodeLoader; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java index 976ddba84d..31f4398055 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java @@ -25,7 +25,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java index 608512155b..0a00d79104 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java @@ -27,7 +27,7 @@ import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.tools.CommandShell; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java index 34d9fe2b70..02c168f7b6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java @@ -34,7 +34,7 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import com.google.common.annotations.VisibleForTesting; import java.util.stream.Collectors; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java index d36ad9bf67..f154f2d816 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java @@ -26,7 +26,7 @@ import java.util.Date; import java.util.ServiceLoader; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java index bc2e2d49d7..85015fbe30 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java @@ -20,8 +20,7 @@ import java.util.ArrayList; import java.util.LinkedList; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.WordUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; /** @@ -103,7 +102,8 @@ String[] getRow(int idx) { // Line-wrap if it's too long String[] lines = new String[] {raw}; if (wrap) { - lines = WordUtils.wrap(lines[0], wrapWidth, "\n", true).split("\n"); + lines = org.apache.hadoop.util.StringUtils.wrap(lines[0], wrapWidth, + "\n", true).split("\n"); } for (int i=0; inull uses the system property line separator + * @param wrapLongWords true if long words (such as URLs) should be wrapped + * @return a line with newlines inserted, null if null input + */ + public static String wrap(String str, int wrapLength, String newLineStr, + boolean wrapLongWords) { + if(str == null) { + return null; + } else { + if(newLineStr == null) { + newLineStr = System.lineSeparator(); + } + + if(wrapLength < 1) { + wrapLength = 1; + } + + int inputLineLength = str.length(); + int offset = 0; + StringBuffer wrappedLine = new StringBuffer(inputLineLength + 32); + + while(inputLineLength - offset > wrapLength) { + if(str.charAt(offset) == 32) { + ++offset; + } else { + int spaceToWrapAt = str.lastIndexOf(32, wrapLength + offset); + if(spaceToWrapAt >= offset) { + wrappedLine.append(str.substring(offset, spaceToWrapAt)); + wrappedLine.append(newLineStr); + offset = spaceToWrapAt + 1; + } else if(wrapLongWords) { + wrappedLine.append(str.substring(offset, wrapLength + offset)); + wrappedLine.append(newLineStr); + offset += wrapLength; + } else { + spaceToWrapAt = str.indexOf(32, wrapLength + offset); + if(spaceToWrapAt >= 0) { + wrappedLine.append(str.substring(offset, spaceToWrapAt)); + wrappedLine.append(newLineStr); + offset = spaceToWrapAt + 1; + } else { + wrappedLine.append(str.substring(offset)); + offset = inputLineLength; + } + } + } + } + + wrappedLine.append(str.substring(offset)); + return wrappedLine.toString(); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index e865bf1d93..2361626c3f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -62,7 +62,7 @@ import static org.hamcrest.core.Is.is; import static org.junit.Assert.*; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration.IntegerRanges; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java index 7f27d7d51e..152159b3f3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java @@ -18,7 +18,7 @@ package org.apache.hadoop.conf; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.junit.Before; import org.junit.Ignore; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java index 6c2e5b88bc..2ea45231a1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.util.Arrays; -import org.apache.commons.lang.SystemUtils; +import org.apache.commons.lang3.SystemUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.junit.Assume; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java index da071050eb..8cbca8e815 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java @@ -19,7 +19,7 @@ import java.io.IOException; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.test.GenericTestUtils; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java index 1f37f74e71..b5307a4e27 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java @@ -22,7 +22,6 @@ import java.io.FileNotFoundException; import java.util.EnumSet; -import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.Options.CreateOpts.BlockSize; import org.apache.hadoop.io.IOUtils; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java index 3def5d5388..6b9a34c3b3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.fs; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java index 597eb93b58..fa682649a0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs; -import org.apache.commons.lang.math.RandomUtils; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.fs.StorageStatistics.LongStatistic; import org.junit.Before; @@ -67,15 +67,15 @@ public class TestFileSystemStorageStatistics { @Before public void setup() { - statistics.incrementBytesRead(RandomUtils.nextInt(100)); - statistics.incrementBytesWritten(RandomUtils.nextInt(100)); - statistics.incrementLargeReadOps(RandomUtils.nextInt(100)); - statistics.incrementWriteOps(RandomUtils.nextInt(100)); + statistics.incrementBytesRead(RandomUtils.nextInt(0, 100)); + statistics.incrementBytesWritten(RandomUtils.nextInt(0, 100)); + statistics.incrementLargeReadOps(RandomUtils.nextInt(0, 100)); + statistics.incrementWriteOps(RandomUtils.nextInt(0, 100)); - statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(100)); - statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(100)); - statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(100)); - statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(100)); + statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(0, 100)); + statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(0, 100)); + statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(0, 100)); + statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(0, 100)); } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java index 8e60540126..e7f36fc850 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.fs.shell; -import org.apache.commons.lang.RandomStringUtils; -import org.apache.commons.lang.math.RandomUtils; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; @@ -56,11 +56,11 @@ public static int initialize(Path dir) throws Exception { fs.mkdirs(toDirPath); int numTotalFiles = 0; - int numDirs = RandomUtils.nextInt(5); + int numDirs = RandomUtils.nextInt(0, 5); for (int dirCount = 0; dirCount < numDirs; ++dirCount) { Path subDirPath = new Path(fromDirPath, "subdir" + dirCount); fs.mkdirs(subDirPath); - int numFiles = RandomUtils.nextInt(10); + int numFiles = RandomUtils.nextInt(0, 10); for (int fileCount = 0; fileCount < numFiles; ++fileCount) { numTotalFiles++; Path subFile = new Path(subDirPath, "file" + fileCount); @@ -115,7 +115,7 @@ public void testCopyFromLocalWithThreads() throws Exception { Path dir = new Path("dir" + RandomStringUtils.randomNumeric(4)); int numFiles = TestCopyFromLocal.initialize(dir); int maxThreads = Runtime.getRuntime().availableProcessors() * 2; - int randThreads = RandomUtils.nextInt(maxThreads - 1) + 1; + int randThreads = RandomUtils.nextInt(0, maxThreads - 1) + 1; String numThreads = Integer.toString(randThreads); run(new TestMultiThreadedCopy(randThreads, randThreads == 1 ? 0 : numFiles), "-t", numThreads, diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java index fbe3fb8118..17be5874c5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java @@ -26,7 +26,7 @@ import org.junit.Assume; import org.junit.Before; import org.junit.Test; -import org.apache.commons.lang.SystemUtils; +import org.apache.commons.lang3.SystemUtils; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.test.GenericTestUtils; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java index 41ae910cba..5fbd957312 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java @@ -20,7 +20,7 @@ import com.google.protobuf.BlockingService; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ipc.metrics.RpcMetrics; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java index 0b463a5130..520042017d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ipc; import com.google.protobuf.ServiceException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java index 30176f202c..62bd1b142e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java @@ -38,7 +38,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.KerberosAuthException; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java index 28b3cbe3fa..c0d204f86a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java @@ -43,7 +43,7 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; -import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.unix.DomainSocket.DomainChannel; import org.apache.hadoop.test.GenericTestUtils; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java index 9357f48df3..0f8f1e45c9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; -import org.apache.commons.lang.mutable.MutableBoolean; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java index 127b0b3827..d7c86316ef 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java @@ -18,7 +18,7 @@ package org.apache.hadoop.service.launcher; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.Service; import org.apache.hadoop.service.ServiceOperations; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index 53eb2be3bb..3e9da1b45f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -40,7 +40,7 @@ import java.util.regex.Pattern; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.fs.FileUtil; @@ -344,7 +344,7 @@ public static void assertExceptionContains(String expectedText, throw new AssertionError(E_NULL_THROWABLE_STRING, t); } if (expectedText != null && !msg.contains(expectedText)) { - String prefix = org.apache.commons.lang.StringUtils.isEmpty(message) + String prefix = org.apache.commons.lang3.StringUtils.isEmpty(message) ? "" : (message + ": "); throw new AssertionError( String.format("%s Expected to find '%s' %s: %s", diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java index 2aa5e95b04..d53982363d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.util; -import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; import org.slf4j.LoggerFactory; import org.junit.Assert; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java index a9fa4c64e9..b61cebc0a6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java @@ -18,7 +18,7 @@ package org.apache.hadoop.util; -import org.apache.commons.lang.SystemUtils; +import org.apache.commons.lang3.SystemUtils; import org.junit.Assert; import org.junit.Assume; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java index f8e69e288e..5992e54124 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java @@ -332,7 +332,7 @@ private void addMetric(String operationName, long value, boolean isWrite) { private static String getMetricName(String operationName, boolean isWrite) { return (isWrite ? WRITE_LOCK_METRIC_PREFIX : READ_LOCK_METRIC_PREFIX) + - org.apache.commons.lang.StringUtils.capitalize(operationName) + + org.apache.commons.lang3.StringUtils.capitalize(operationName) + LOCK_METRIC_SUFFIX; } } diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index dfd1eac293..8e28afec4e 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1043,11 +1043,6 @@ junit 4.11 - - commons-lang - commons-lang - 2.6 - commons-collections commons-collections diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java index 58c14a943b..32367aff17 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java @@ -22,7 +22,7 @@ import com.aliyun.oss.common.auth.CredentialsProvider; import com.aliyun.oss.common.auth.DefaultCredentials; import com.aliyun.oss.common.auth.InvalidCredentialsException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import java.io.IOException; diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java index 93e31d57e8..4fbb6fb8b1 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java @@ -30,7 +30,7 @@ import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CreateFlag; diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java index cc050c876e..5e2175926a 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java @@ -45,7 +45,7 @@ import com.aliyun.oss.model.UploadPartRequest; import com.aliyun.oss.model.UploadPartResult; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java index 2fe06c1b05..a7536d6d7a 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java @@ -23,7 +23,7 @@ import com.aliyun.oss.common.auth.CredentialsProvider; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.security.ProviderUtils; diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java index 901cb2bd08..79e0de3492 100644 --- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java +++ b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.aliyun.oss; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.junit.internal.AssumptionViolatedException; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java index e0bee0fdf6..10201f00d3 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java @@ -23,7 +23,7 @@ import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AnonymousAWSCredentials; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.IOUtils; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java index b1899e2293..01bcc6a05e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java @@ -21,7 +21,7 @@ import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.AWSCredentials; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 4b0c208805..737d7da95c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -121,8 +121,8 @@ import static org.apache.hadoop.fs.s3a.Invoker.*; import static org.apache.hadoop.fs.s3a.S3AUtils.*; import static org.apache.hadoop.fs.s3a.Statistic.*; -import static org.apache.commons.lang.StringUtils.isNotBlank; -import static org.apache.commons.lang.StringUtils.isNotEmpty; +import static org.apache.commons.lang3.StringUtils.isNotBlank; +import static org.apache.commons.lang3.StringUtils.isNotEmpty; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java index c54d3e2621..440739d9d1 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java @@ -24,7 +24,7 @@ import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.amazonaws.services.s3.model.SSECustomerKey; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CanSetReadahead; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java index 6a01a80308..a5f7d75449 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java @@ -35,7 +35,7 @@ import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java index 9939bb2571..7f9e57e2e2 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java @@ -21,7 +21,7 @@ import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java index e959908a8d..3b89bde198 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java @@ -21,7 +21,7 @@ import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicSessionCredentials; import com.amazonaws.auth.AWSCredentials; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import java.io.IOException; import java.net.URI; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java index 4b6a77e0dc..fdaf9bd544 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java @@ -32,7 +32,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java index 85cc38a846..596dd95685 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java @@ -33,7 +33,7 @@ import com.amazonaws.services.s3.model.PartETag; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java index 6cf1f1e63a..cf84cb32eb 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java @@ -28,7 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java index cf365c260d..a619fc7b7d 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java @@ -28,7 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.WriteOperationHelper; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java index a4d39d75d6..d5d256aefb 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java @@ -30,7 +30,7 @@ import com.google.common.collect.Sets; import com.google.common.util.concurrent.UncheckedExecutionException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java index 66ada497aa..91e64cddf6 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java @@ -26,7 +26,7 @@ import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java index 0ab86962a9..116827dd4f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java @@ -60,7 +60,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java index 49981ed1ee..f0ffb44623 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java @@ -23,7 +23,7 @@ import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java index ac10e0876a..527697f00f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java @@ -39,7 +39,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java index bfac9750e7..9e2f34def3 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.s3native; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -31,7 +31,7 @@ import java.net.URLDecoder; import java.util.Objects; -import static org.apache.commons.lang.StringUtils.equalsIgnoreCase; +import static org.apache.commons.lang3.StringUtils.equalsIgnoreCase; /** * Class to aid logging in to S3 endpoints. diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java index f1799ac856..aa6b5d8659 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java @@ -22,8 +22,8 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.S3ClientOptions; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.reflect.FieldUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java index 50c9fb554e..a8a78f6282 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKeyBlockOutputStream.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKeyBlockOutputStream.java index 4c953bd289..c1708305ec 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKeyBlockOutputStream.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKeyBlockOutputStream.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java index d259bf1d1e..869997b44e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.s3a; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java index b8610d64cd..90e88945b3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java @@ -28,7 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java index 3e149a6906..79e8a698da 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java +++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java @@ -42,7 +42,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java index 12c2e3ffc7..a68e6ac2bb 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java +++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java @@ -25,7 +25,7 @@ import com.microsoft.azure.datalake.store.oauth2.DeviceCodeTokenProvider; import com.microsoft.azure.datalake.store.oauth2.MsiTokenProvider; -import org.apache.commons.lang.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.adl.common.CustomMockTokenProvider; import org.apache.hadoop.fs.adl.oauth2.AzureADTokenProvider; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java index 9396a51fb2..197ab22be2 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java @@ -41,7 +41,7 @@ import java.util.Map; import java.util.Set; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java index 9a8530826e..5f051effef 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java @@ -44,7 +44,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.commons.codec.binary.Base64; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java index aa7ac2e1d7..850e552758 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java @@ -20,7 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java index dfc881ae5c..52027621ef 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java @@ -48,7 +48,7 @@ import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectReader; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java index 68ddcdf16e..6e98755e77 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java @@ -36,7 +36,7 @@ import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper; -import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java index ea77510164..76ced3b96d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azure.security.Constants; import org.apache.hadoop.io.retry.RetryPolicy; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java index a0204bef47..f4ec1721ec 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.azure; -import org.apache.commons.lang.Validate; +import org.apache.commons.lang3.Validate; import org.apache.hadoop.fs.azure.security.Constants; import org.apache.hadoop.fs.azure.security.SpnegoToken; import org.apache.hadoop.fs.azure.security.WasbDelegationTokenIdentifier; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java index 0aa93935fb..f54a2e1787 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java @@ -29,7 +29,7 @@ import java.net.HttpURLConnection; import java.util.Arrays; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java index 29611bf24d..4e88b4551d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java @@ -22,7 +22,7 @@ import java.net.URI; import java.util.Arrays; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java index d5f6437d96..1739cff76d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java @@ -36,7 +36,7 @@ import java.util.List; import org.apache.commons.codec.DecoderException; import org.apache.commons.codec.net.URLCodec; -import org.apache.commons.lang.NotImplementedException; +import org.apache.commons.lang3.NotImplementedException; import org.apache.http.client.utils.URIBuilder; import com.microsoft.azure.storage.AccessCondition; @@ -339,7 +339,7 @@ public Iterable listBlobs(String prefix, @Override public StorageUri getStorageUri() { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } } @@ -590,20 +590,20 @@ public MockCloudPageBlobWrapper(URI uri, HashMap metadata, @Override public void create(long length, BlobRequestOptions options, OperationContext opContext) throws StorageException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public void uploadPages(InputStream sourceStream, long offset, long length, BlobRequestOptions options, OperationContext opContext) throws StorageException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public ArrayList downloadPageRanges(BlobRequestOptions options, OperationContext opContext) throws StorageException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override @@ -622,7 +622,7 @@ public void setWriteBlockSizeInBytes(int writeBlockSizeInBytes) { @Override public StorageUri getStorageUri() { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java index 820ce4f240..b8cf5ba8bf 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.azure; import com.microsoft.azure.storage.blob.BlockEntry; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java index 2fbbcd1758..8d2a104eb4 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java @@ -28,7 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java index ea99016b2c..9db0eb549c 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java @@ -20,7 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java index 668b594be6..e49feb5f69 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java @@ -28,7 +28,7 @@ import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java index faa4aa275a..c486bdbc23 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.util.EnumSet; -import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java index 9568171219..cf6da25311 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.mapred.gridmix; -import org.apache.commons.lang.time.FastDateFormat; +import org.apache.commons.lang3.time.FastDateFormat; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java index 8f9d434eb9..973838acb9 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java @@ -19,7 +19,7 @@ import java.io.IOException; -import org.apache.commons.lang.time.FastDateFormat; +import org.apache.commons.lang3.time.FastDateFormat; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -76,7 +76,7 @@ class ExecutionSummarizer implements StatListener { startTime = System.currentTimeMillis(); // flatten the args string and store it commandLineArgs = - org.apache.commons.lang.StringUtils.join(args, ' '); + org.apache.commons.lang3.StringUtils.join(args, ' '); } /** diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java index 427174295c..73662bf8aa 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.mapred.gridmix; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java index 877d434e5a..494b9a11c9 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java @@ -21,7 +21,7 @@ import java.util.List; import java.util.Random; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/impl/BaseLogParser.java b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/impl/BaseLogParser.java index afafd55a69..2accbac784 100644 --- a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/impl/BaseLogParser.java +++ b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/impl/BaseLogParser.java @@ -30,7 +30,6 @@ import java.util.List; import java.util.Map; -import org.apache.commons.lang.CharSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.resourceestimator.common.api.RecurrenceId; import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline; diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java index 5856626818..e6d09dcb3c 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java @@ -18,7 +18,7 @@ package org.apache.hadoop.tools.rumen.anonymization; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; /** * Utility class to handle commonly performed tasks in a diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java index 1c92caf987..02fd48a071 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java @@ -25,7 +25,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.MRJobConfig; diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java index bd50ae0542..e1f6da5276 100644 --- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java @@ -25,7 +25,7 @@ import java.io.FileOutputStream; import java.io.IOException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; From ddbff7c8d3f1851e5c5fa9bc33637e859d7d8ccf Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 28 Jun 2018 14:58:40 +0900 Subject: [PATCH 68/70] HADOOP-14313. Replace/improve Hadoop's byte[] comparator. Contributed by Vikas Vishwakarma. --- NOTICE.txt | 8 ++++ .../apache/hadoop/io/FastByteComparisons.java | 44 +++++++------------ 2 files changed, 25 insertions(+), 27 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 95a670d9ee..a53f13c700 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -196,6 +196,14 @@ by Google Inc, which can be obtained at: * HOMEPAGE: * http://code.google.com/p/snappy/ +This product contains a modified portion of UnsignedBytes LexicographicalComparator +from Guava v21 project by Google Inc, which can be obtained at: + + * LICENSE: + * license/COPYING (Apache License 2.0) + * HOMEPAGE: + * https://github.com/google/guava + This product optionally depends on 'JBoss Marshalling', an alternative Java serialization API, which can be obtained at: diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java index a2903f89b9..5af6602b87 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java @@ -26,7 +26,6 @@ import org.slf4j.LoggerFactory; import sun.misc.Unsafe; -import com.google.common.primitives.Longs; import com.google.common.primitives.UnsignedBytes; /** @@ -195,52 +194,43 @@ public int compareTo(byte[] buffer1, int offset1, int length1, length1 == length2) { return 0; } + final int stride = 8; int minLength = Math.min(length1, length2); - int minWords = minLength / Longs.BYTES; + int strideLimit = minLength & ~(stride - 1); int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET; int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET; + int i; /* * Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a * time is no slower than comparing 4 bytes at a time even on 32-bit. * On the other hand, it is substantially faster on 64-bit. */ - for (int i = 0; i < minWords * Longs.BYTES; i += Longs.BYTES) { + for (i = 0; i < strideLimit; i += stride) { long lw = theUnsafe.getLong(buffer1, offset1Adj + (long) i); long rw = theUnsafe.getLong(buffer2, offset2Adj + (long) i); - long diff = lw ^ rw; - if (diff != 0) { + if (lw != rw) { if (!littleEndian) { return lessThanUnsigned(lw, rw) ? -1 : 1; } - // Use binary search - int n = 0; - int y; - int x = (int) diff; - if (x == 0) { - x = (int) (diff >>> 32); - n = 32; - } - - y = x << 16; - if (y == 0) { - n += 16; - } else { - x = y; - } - - y = x << 8; - if (y == 0) { - n += 8; - } - return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL)); + /* + * We want to compare only the first index where left[index] != + * right[index]. This corresponds to the least significant nonzero + * byte in lw ^ rw, since lw and rw are little-endian. + * Long.numberOfTrailingZeros(diff) tells us the least significant + * nonzero bit, and zeroing out the first three bits of L.nTZ gives + * us the shift to get that least significant nonzero byte. This + * comparison logic is based on UnsignedBytes from Guava v21 + */ + int n = Long.numberOfTrailingZeros(lw ^ rw) & ~0x7; + return ((int) ((lw >>> n) & 0xFF)) - ((int) ((rw >>> n) & 0xFF)); } } // The epilogue to cover the last (minLength % 8) elements. - for (int i = minWords * Longs.BYTES; i < minLength; i++) { + for (; i < minLength; i++) { int result = UnsignedBytes.compare( buffer1[offset1 + i], buffer2[offset2 + i]); From 85627e2cba91a61d675d20cdb35e188c6c81e3f2 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Wed, 27 Jun 2018 23:31:32 -0700 Subject: [PATCH 69/70] HDDS-195. Create generic CommandWatcher utility. Contributed by Elek, Marton. --- .../hdds/server/events/EventWatcher.java | 157 +++++++++++++ .../events/IdentifiableEventPayload.java | 30 +++ .../hdds/server/events/EventHandlerStub.java | 38 +++ .../hdds/server/events/TestEventWatcher.java | 220 ++++++++++++++++++ 4 files changed, 445 insertions(+) create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java create mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java create mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java new file mode 100644 index 0000000000..19fddde9b4 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.server.events; + +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import org.apache.hadoop.ozone.lease.Lease; +import org.apache.hadoop.ozone.lease.LeaseAlreadyExistException; +import org.apache.hadoop.ozone.lease.LeaseExpiredException; +import org.apache.hadoop.ozone.lease.LeaseManager; +import org.apache.hadoop.ozone.lease.LeaseNotFoundException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Event watcher the (re)send a message after timeout. + *

    + * Event watcher will send the tracked payload/event after a timeout period + * unless a confirmation from the original event (completion event) is arrived. + * + * @param The type of the events which are tracked. + * @param The type of event which could cancel the + * tracking. + */ +@SuppressWarnings("CheckStyle") +public abstract class EventWatcher { + + private static final Logger LOG = LoggerFactory.getLogger(EventWatcher.class); + + private final Event startEvent; + + private final Event completionEvent; + + private final LeaseManager leaseManager; + + protected final Map trackedEventsByUUID = + new ConcurrentHashMap<>(); + + protected final Set trackedEvents = new HashSet<>(); + + public EventWatcher(Event startEvent, + Event completionEvent, + LeaseManager leaseManager) { + this.startEvent = startEvent; + this.completionEvent = completionEvent; + this.leaseManager = leaseManager; + + } + + public void start(EventQueue queue) { + + queue.addHandler(startEvent, this::handleStartEvent); + + queue.addHandler(completionEvent, (completionPayload, publisher) -> { + UUID uuid = completionPayload.getUUID(); + try { + handleCompletion(uuid, publisher); + } catch (LeaseNotFoundException e) { + //It's already done. Too late, we already retried it. + //Not a real problem. + LOG.warn("Completion event without active lease. UUID={}", uuid); + } + }); + + } + + private synchronized void handleStartEvent(TIMEOUT_PAYLOAD payload, + EventPublisher publisher) { + UUID identifier = payload.getUUID(); + trackedEventsByUUID.put(identifier, payload); + trackedEvents.add(payload); + try { + Lease lease = leaseManager.acquire(identifier); + try { + lease.registerCallBack(() -> { + handleTimeout(publisher, identifier); + return null; + }); + + } catch (LeaseExpiredException e) { + handleTimeout(publisher, identifier); + } + } catch (LeaseAlreadyExistException e) { + //No problem at all. But timer is not reset. + } + } + + private synchronized void handleCompletion(UUID uuid, + EventPublisher publisher) throws LeaseNotFoundException { + leaseManager.release(uuid); + TIMEOUT_PAYLOAD payload = trackedEventsByUUID.remove(uuid); + trackedEvents.remove(payload); + onFinished(publisher, payload); + } + + private synchronized void handleTimeout(EventPublisher publisher, + UUID identifier) { + TIMEOUT_PAYLOAD payload = trackedEventsByUUID.remove(identifier); + trackedEvents.remove(payload); + onTimeout(publisher, payload); + } + + + /** + * Check if a specific payload is in-progress. + */ + public synchronized boolean contains(TIMEOUT_PAYLOAD payload) { + return trackedEvents.contains(payload); + } + + public synchronized boolean remove(TIMEOUT_PAYLOAD payload) { + try { + leaseManager.release(payload.getUUID()); + } catch (LeaseNotFoundException e) { + LOG.warn("Completion event without active lease. UUID={}", + payload.getUUID()); + } + trackedEventsByUUID.remove(payload.getUUID()); + return trackedEvents.remove(payload); + + } + + abstract void onTimeout(EventPublisher publisher, TIMEOUT_PAYLOAD payload); + + abstract void onFinished(EventPublisher publisher, TIMEOUT_PAYLOAD payload); + + public List getTimeoutEvents( + Predicate predicate) { + return trackedEventsByUUID.values().stream().filter(predicate) + .collect(Collectors.toList()); + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java new file mode 100644 index 0000000000..e73e30fcde --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.server.events; + +import java.util.UUID; + +/** + * Event with an additional unique identifier. + * + */ +public interface IdentifiableEventPayload { + + UUID getUUID(); + +} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java new file mode 100644 index 0000000000..3f34a70e6e --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.server.events; + +import java.util.ArrayList; +import java.util.List; + +/** + * Dummy class for testing to collect all the received events. + */ +public class EventHandlerStub implements EventHandler { + + private List receivedEvents = new ArrayList<>(); + + @Override + public void onMessage(PAYLOAD payload, EventPublisher publisher) { + receivedEvents.add(payload); + } + + public List getReceivedEvents() { + return receivedEvents; + } +} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java new file mode 100644 index 0000000000..1731350cfe --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java @@ -0,0 +1,220 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.server.events; + +import java.util.List; +import java.util.Objects; +import java.util.UUID; + +import org.apache.hadoop.ozone.lease.LeaseManager; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Test the basic functionality of event watcher. + */ +public class TestEventWatcher { + + private static final TypedEvent WATCH_UNDER_REPLICATED = + new TypedEvent<>(UnderreplicatedEvent.class); + + private static final TypedEvent UNDER_REPLICATED = + new TypedEvent<>(UnderreplicatedEvent.class); + + private static final TypedEvent + REPLICATION_COMPLETED = new TypedEvent<>(ReplicationCompletedEvent.class); + + LeaseManager leaseManager; + + @Before + public void startLeaseManager() { + leaseManager = new LeaseManager<>(2000l); + leaseManager.start(); + } + + @After + public void stopLeaseManager() { + leaseManager.shutdown(); + } + + + @Test + public void testEventHandling() throws InterruptedException { + + EventQueue queue = new EventQueue(); + + EventWatcher + replicationWatcher = createEventWatcher(); + + EventHandlerStub underReplicatedEvents = + new EventHandlerStub<>(); + + queue.addHandler(UNDER_REPLICATED, underReplicatedEvents); + + replicationWatcher.start(queue); + + UUID uuid1 = UUID.randomUUID(); + UUID uuid2 = UUID.randomUUID(); + + queue.fireEvent(WATCH_UNDER_REPLICATED, + new UnderreplicatedEvent(uuid1, "C1")); + + queue.fireEvent(WATCH_UNDER_REPLICATED, + new UnderreplicatedEvent(uuid2, "C2")); + + Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size()); + + Thread.sleep(1000); + + queue.fireEvent(REPLICATION_COMPLETED, + new ReplicationCompletedEvent(uuid1, "C2", "D1")); + + Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size()); + + Thread.sleep(1500); + + queue.processAll(1000L); + + Assert.assertEquals(1, underReplicatedEvents.getReceivedEvents().size()); + Assert.assertEquals(uuid2, + underReplicatedEvents.getReceivedEvents().get(0).UUID); + + } + + @Test + public void testInprogressFilter() throws InterruptedException { + + EventQueue queue = new EventQueue(); + + EventWatcher + replicationWatcher = createEventWatcher(); + + EventHandlerStub underReplicatedEvents = + new EventHandlerStub<>(); + + queue.addHandler(UNDER_REPLICATED, underReplicatedEvents); + + replicationWatcher.start(queue); + + UnderreplicatedEvent event1 = + new UnderreplicatedEvent(UUID.randomUUID(), "C1"); + + queue.fireEvent(WATCH_UNDER_REPLICATED, event1); + + queue.fireEvent(WATCH_UNDER_REPLICATED, + new UnderreplicatedEvent(UUID.randomUUID(), "C2")); + + queue.fireEvent(WATCH_UNDER_REPLICATED, + new UnderreplicatedEvent(UUID.randomUUID(), "C1")); + + queue.processAll(1000L); + Thread.sleep(1000L); + List c1todo = replicationWatcher + .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1")); + + Assert.assertEquals(2, c1todo.size()); + Assert.assertTrue(replicationWatcher.contains(event1)); + Thread.sleep(1500L); + + c1todo = replicationWatcher + .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1")); + Assert.assertEquals(0, c1todo.size()); + Assert.assertFalse(replicationWatcher.contains(event1)); + + + } + + private EventWatcher + createEventWatcher() { + return new EventWatcher( + WATCH_UNDER_REPLICATED, REPLICATION_COMPLETED, leaseManager) { + + @Override + void onTimeout(EventPublisher publisher, UnderreplicatedEvent payload) { + publisher.fireEvent(UNDER_REPLICATED, payload); + } + + @Override + void onFinished(EventPublisher publisher, UnderreplicatedEvent payload) { + //Good job. We did it. + } + }; + } + + private static class ReplicationCompletedEvent + implements IdentifiableEventPayload { + + private final UUID UUID; + + private final String containerId; + + private final String datanodeId; + + public ReplicationCompletedEvent(UUID UUID, String containerId, + String datanodeId) { + this.UUID = UUID; + this.containerId = containerId; + this.datanodeId = datanodeId; + } + + public UUID getUUID() { + return UUID; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ReplicationCompletedEvent that = (ReplicationCompletedEvent) o; + return Objects.equals(containerId, that.containerId) && Objects + .equals(datanodeId, that.datanodeId); + } + + @Override + public int hashCode() { + + return Objects.hash(containerId, datanodeId); + } + } + + private static class UnderreplicatedEvent + + implements IdentifiableEventPayload { + + private final UUID UUID; + + private final String containerId; + + public UnderreplicatedEvent(UUID UUID, String containerId) { + this.containerId = containerId; + this.UUID = UUID; + } + + public UUID getUUID() { + return UUID; + } + } + +} \ No newline at end of file From d3fa83a44b01c85f39bfb4deaf2972912ac61ca3 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Thu, 28 Jun 2018 09:21:56 -0700 Subject: [PATCH 70/70] HDFS-13705:The native ISA-L library loading failure should be made warning rather than an error message. Contributed by Shashikant Banerjee. --- .../org/apache/hadoop/io/erasurecode/ErasureCodeNative.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java index 3d6867aec4..ec317eee4d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java @@ -46,7 +46,7 @@ public final class ErasureCodeNative { loadLibrary(); } catch (Throwable t) { problem = "Loading ISA-L failed: " + t.getMessage(); - LOG.error("Loading ISA-L failed", t); + LOG.warn(problem); } LOADING_FAILURE_REASON = problem; }