HADOOP-4687 split the contrib dirs

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/HADOOP-4687/core@776180 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Owen O'Malley 2009-05-19 04:45:07 +00:00
parent abe7be9134
commit cab0a4bf54
243 changed files with 29719 additions and 0 deletions

View File

@ -0,0 +1,11 @@
Bash tab completion support for the hadoop script.
On Debian-like distributions, the script can be placed in
/etc/bash_completion.d/, and it will be sourced automatically by Bash. On
other distributions, you may source the file manually (`. hadoop.sh') or
source it from your bashrc (or equivalent) file.
The script allows tab completion of all the command names, subcommands for the
'fs', 'dfsadmin', 'job', 'namenode' and 'pipe' commands, arguments of the 'jar'
command and most arguments to the 'fs' subcommands (completing local and
dfs paths as appropriate).

View File

@ -0,0 +1,121 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Provides tab completion for the main hadoop script.
#
# On debian-based systems, place in /etc/bash_completion.d/ and either restart
# Bash or source the script manually (. /etc/bash_completion.d/hadoop.sh).
_hadoop() {
local script cur prev temp
COMPREPLY=()
cur=${COMP_WORDS[COMP_CWORD]}
prev=${COMP_WORDS[COMP_CWORD-1]}
script=${COMP_WORDS[0]}
# Bash lets you tab complete things even if the script doesn't
# exist (or isn't executable). Check to make sure it is, as we
# need to execute it to get options/info
if [ -f "$script" -a -x "$script" ]; then
case $COMP_CWORD in
1)
# Completing the first argument (the command).
temp=`$script | grep -n "^\s*or"`;
temp=`$script | head -n $((${temp%%:*} - 1)) | awk '/^ / {print $1}' | sort | uniq`;
COMPREPLY=(`compgen -W "${temp}" -- ${cur}`);
return 0;;
2)
# Completing the second arg (first arg to the command)
# The output of commands isn't hugely consistent, so certain
# names are hardcoded and parsed differently. Some aren't
# handled at all (mostly ones without args).
case ${COMP_WORDS[1]} in
dfs | dfsadmin | fs | job | pipes)
# One option per line, enclosed in square brackets
temp=`$script ${COMP_WORDS[1]} 2>&1 | awk '/^[ \t]*\[/ {gsub("[[\\]]", ""); print $1}'`;
COMPREPLY=(`compgen -W "${temp}" -- ${cur}`);
return 0;;
jar)
# Any (jar) file
COMPREPLY=(`compgen -A file -- ${cur}`);
return 0;;
namenode)
# All options specified in one line,
# enclosed in [] and separated with |
temp=`$script ${COMP_WORDS[1]} -help 2>&1 | grep Usage: | cut -d '[' -f 2- | awk '{gsub("] \\| \\[|]", " "); print $0}'`;
COMPREPLY=(`compgen -W "${temp}" -- ${cur}`);
return 0;;
*)
# Other commands - no idea
return 1;;
esac;;
*)
# Additional args
case ${COMP_WORDS[1]} in
dfs | fs)
# DFS/FS subcommand completion
# Pull the list of options, grep for the one the user is trying to use,
# and then select the description of the relevant argument
temp=$((${COMP_CWORD} - 1));
temp=`$script ${COMP_WORDS[1]} 2>&1 | grep -- "${COMP_WORDS[2]} " | awk '{gsub("[[ \\]]", ""); print $0}' | cut -d '<' -f ${temp}`;
if [ ${#temp} -lt 1 ]; then
# No match
return 1;
fi;
temp=${temp:0:$((${#temp} - 1))};
# Now do completion based on the argument
case $temp in
path | src | dst)
# DFS path completion
temp=`$script ${COMP_WORDS[1]} -ls "${cur}*" 2>&1 | grep -vE '^Found ' | cut -f 1 | awk '{gsub("^.* ", ""); print $0;}'`
COMPREPLY=(`compgen -W "${temp}" -- ${cur}`);
return 0;;
localsrc | localdst)
# Local path completion
COMPREPLY=(`compgen -A file -- ${cur}`);
return 0;;
*)
# Other arguments - no idea
return 1;;
esac;;
*)
# Other subcommands - no idea
return 1;;
esac;
esac;
fi;
}
complete -F _hadoop hadoop

View File

@ -0,0 +1,304 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- Imported by contrib/*/build.xml files to share generic targets. -->
<project name="hadoopbuildcontrib" xmlns:ivy="antlib:org.apache.ivy.ant">
<property name="name" value="${ant.project.name}"/>
<property name="root" value="${basedir}"/>
<!-- Load all the default properties, and any the user wants -->
<!-- to contribute (without having to type -D or edit this file -->
<property file="${user.home}/${name}.build.properties" />
<property file="${root}/build.properties" />
<property name="hadoop.root" location="${root}/../../../"/>
<property name="src.dir" location="${root}/src/java"/>
<property name="src.test" location="${root}/src/test"/>
<property name="src.examples" location="${root}/src/examples"/>
<available file="${src.examples}" type="dir" property="examples.available"/>
<available file="${src.test}" type="dir" property="test.available"/>
<property name="conf.dir" location="${hadoop.root}/conf"/>
<property name="test.junit.output.format" value="plain"/>
<property name="test.output" value="no"/>
<property name="test.timeout" value="900000"/>
<property name="build.dir" location="${hadoop.root}/build/contrib/${name}"/>
<property name="build.classes" location="${build.dir}/classes"/>
<property name="build.test" location="${build.dir}/test"/>
<property name="build.examples" location="${build.dir}/examples"/>
<property name="hadoop.log.dir" location="${build.dir}/test/logs"/>
<!-- all jars together -->
<property name="javac.deprecation" value="off"/>
<property name="javac.debug" value="on"/>
<property name="build.ivy.lib.dir" value="${hadoop.root}/build/ivy/lib"/>
<property name="javadoc.link"
value="http://java.sun.com/j2se/1.4/docs/api/"/>
<property name="build.encoding" value="ISO-8859-1"/>
<fileset id="lib.jars" dir="${root}" includes="lib/*.jar"/>
<!-- IVY properties set here -->
<property name="ivy.dir" location="ivy" />
<property name="ivysettings.xml" location="${hadoop.root}/ivy/ivysettings.xml"/>
<loadproperties srcfile="${ivy.dir}/libraries.properties"/>
<loadproperties srcfile="${hadoop.root}/ivy/libraries.properties"/>
<property name="ivy.jar" location="${hadoop.root}/ivy/ivy-${ivy.version}.jar"/>
<property name="ivy_repo_url"
value="http://repo2.maven.org/maven2/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar" />
<property name="build.dir" location="build" />
<property name="build.ivy.dir" location="${build.dir}/ivy" />
<property name="build.ivy.lib.dir" location="${build.ivy.dir}/lib" />
<property name="build.ivy.report.dir" location="${build.ivy.dir}/report" />
<property name="common.ivy.lib.dir" location="${build.ivy.lib.dir}/${ant.project.name}/common"/>
<!--this is the naming policy for artifacts we want pulled down-->
<property name="ivy.artifact.retrieve.pattern"
value="${ant.project.name}/[conf]/[artifact]-[revision].[ext]"/>
<!-- the normal classpath -->
<path id="contrib-classpath">
<pathelement location="${build.classes}"/>
<fileset refid="lib.jars"/>
<pathelement location="${hadoop.root}/build/classes"/>
<fileset dir="${hadoop.root}/lib">
<include name="**/*.jar" />
</fileset>
<path refid="${ant.project.name}.common-classpath"/>
<pathelement path="${clover.jar}"/>
</path>
<!-- the unit test classpath -->
<path id="test.classpath">
<pathelement location="${build.test}" />
<pathelement location="${hadoop.root}/build/test/classes"/>
<pathelement location="${hadoop.root}/build/test/core/classes"/>
<pathelement location="${hadoop.root}/build/test/hdfs/classes"/>
<pathelement location="${hadoop.root}/build/test/mapred/classes"/>
<pathelement location="${hadoop.root}/src/contrib/test"/>
<pathelement location="${conf.dir}"/>
<pathelement location="${hadoop.root}/build"/>
<pathelement location="${build.examples}"/>
<path refid="contrib-classpath"/>
</path>
<!-- to be overridden by sub-projects -->
<target name="check-contrib"/>
<target name="init-contrib"/>
<!-- ====================================================== -->
<!-- Stuff needed by all targets -->
<!-- ====================================================== -->
<target name="init" depends="check-contrib" unless="skip.contrib">
<echo message="contrib: ${name}"/>
<mkdir dir="${build.dir}"/>
<mkdir dir="${build.classes}"/>
<mkdir dir="${build.test}"/>
<mkdir dir="${build.examples}"/>
<mkdir dir="${hadoop.log.dir}"/>
<antcall target="init-contrib"/>
</target>
<!-- ====================================================== -->
<!-- Compile a Hadoop contrib's files -->
<!-- ====================================================== -->
<target name="compile" depends="init, ivy-retrieve-common" unless="skip.contrib">
<echo message="contrib: ${name}"/>
<javac
encoding="${build.encoding}"
srcdir="${src.dir}"
includes="**/*.java"
destdir="${build.classes}"
debug="${javac.debug}"
deprecation="${javac.deprecation}">
<classpath refid="contrib-classpath"/>
</javac>
</target>
<!-- ======================================================= -->
<!-- Compile a Hadoop contrib's example files (if available) -->
<!-- ======================================================= -->
<target name="compile-examples" depends="compile" if="examples.available">
<echo message="contrib: ${name}"/>
<javac
encoding="${build.encoding}"
srcdir="${src.examples}"
includes="**/*.java"
destdir="${build.examples}"
debug="${javac.debug}">
<classpath refid="contrib-classpath"/>
</javac>
</target>
<!-- ================================================================== -->
<!-- Compile test code -->
<!-- ================================================================== -->
<target name="compile-test" depends="compile-examples" if="test.available">
<echo message="contrib: ${name}"/>
<javac
encoding="${build.encoding}"
srcdir="${src.test}"
includes="**/*.java"
destdir="${build.test}"
debug="${javac.debug}">
<classpath refid="test.classpath"/>
</javac>
</target>
<!-- ====================================================== -->
<!-- Make a Hadoop contrib's jar -->
<!-- ====================================================== -->
<target name="jar" depends="compile" unless="skip.contrib">
<echo message="contrib: ${name}"/>
<jar
jarfile="${build.dir}/hadoop-${version}-${name}.jar"
basedir="${build.classes}"
/>
</target>
<!-- ====================================================== -->
<!-- Make a Hadoop contrib's examples jar -->
<!-- ====================================================== -->
<target name="jar-examples" depends="compile-examples"
if="examples.available" unless="skip.contrib">
<echo message="contrib: ${name}"/>
<jar jarfile="${build.dir}/hadoop-${version}-${name}-examples.jar">
<fileset dir="${build.classes}">
</fileset>
<fileset dir="${build.examples}">
</fileset>
</jar>
</target>
<!-- ====================================================== -->
<!-- Package a Hadoop contrib -->
<!-- ====================================================== -->
<target name="package" depends="jar, jar-examples" unless="skip.contrib">
<mkdir dir="${dist.dir}/contrib/${name}"/>
<copy todir="${dist.dir}/contrib/${name}" includeEmptyDirs="false" flatten="true">
<fileset dir="${build.dir}">
<include name="hadoop-${version}-${name}.jar" />
</fileset>
</copy>
</target>
<!-- ================================================================== -->
<!-- Run unit tests -->
<!-- ================================================================== -->
<target name="test" depends="compile-test, compile" if="test.available">
<echo message="contrib: ${name}"/>
<delete dir="${hadoop.log.dir}"/>
<mkdir dir="${hadoop.log.dir}"/>
<junit
printsummary="yes" showoutput="${test.output}"
haltonfailure="no" fork="yes" maxmemory="256m"
errorProperty="tests.failed" failureProperty="tests.failed"
timeout="${test.timeout}">
<sysproperty key="test.build.data" value="${build.test}/data"/>
<sysproperty key="build.test" value="${build.test}"/>
<sysproperty key="contrib.name" value="${name}"/>
<!-- requires fork=yes for:
relative File paths to use the specified user.dir
classpath to use build/contrib/*.jar
-->
<sysproperty key="user.dir" value="${build.test}/data"/>
<sysproperty key="fs.default.name" value="${fs.default.name}"/>
<sysproperty key="hadoop.test.localoutputfile" value="${hadoop.test.localoutputfile}"/>
<sysproperty key="hadoop.log.dir" value="${hadoop.log.dir}"/>
<sysproperty key="taskcontroller-path" value="${taskcontroller-path}"/>
<sysproperty key="taskcontroller-user" value="${taskcontroller-user}"/>
<classpath refid="test.classpath"/>
<formatter type="${test.junit.output.format}" />
<batchtest todir="${build.test}" unless="testcase">
<fileset dir="${src.test}"
includes="**/Test*.java" excludes="**/${test.exclude}.java" />
</batchtest>
<batchtest todir="${build.test}" if="testcase">
<fileset dir="${src.test}" includes="**/${testcase}.java"/>
</batchtest>
</junit>
<fail if="tests.failed">Tests failed!</fail>
</target>
<!-- ================================================================== -->
<!-- Clean. Delete the build files, and their directories -->
<!-- ================================================================== -->
<target name="clean">
<echo message="contrib: ${name}"/>
<delete dir="${build.dir}"/>
</target>
<target name="ivy-probe-antlib" >
<condition property="ivy.found">
<typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/>
</condition>
</target>
<target name="ivy-download" description="To download ivy " unless="offline">
<get src="${ivy_repo_url}" dest="${ivy.jar}" usetimestamp="true"/>
</target>
<target name="ivy-init-antlib" depends="ivy-download,ivy-probe-antlib" unless="ivy.found">
<typedef uri="antlib:org.apache.ivy.ant" onerror="fail"
loaderRef="ivyLoader">
<classpath>
<pathelement location="${ivy.jar}"/>
</classpath>
</typedef>
<fail >
<condition >
<not>
<typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/>
</not>
</condition>
You need Apache Ivy 2.0 or later from http://ant.apache.org/
It could not be loaded from ${ivy_repo_url}
</fail>
</target>
<target name="ivy-init" depends="ivy-init-antlib">
<ivy:configure settingsid="${ant.project.name}.ivy.settings" file="${ivysettings.xml}"/>
</target>
<target name="ivy-resolve-common" depends="ivy-init">
<ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="common" />
</target>
<target name="ivy-retrieve-common" depends="ivy-resolve-common"
description="Retrieve Ivy-managed artifacts for the compile/test configurations">
<ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}" sync="true" />
<ivy:cachepath pathid="${ant.project.name}.common-classpath" conf="common" />
</target>
</project>

67
src/contrib/build.xml Normal file
View File

@ -0,0 +1,67 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project name="hadoopcontrib" default="compile" basedir=".">
<!-- In case one of the contrib subdirectories -->
<!-- fails the build or test targets and you cannot fix it: -->
<!-- Then add to fileset: excludes="badcontrib/build.xml" -->
<!-- ====================================================== -->
<!-- Compile contribs. -->
<!-- ====================================================== -->
<target name="compile">
<subant target="compile">
<fileset dir="." includes="*/build.xml"/>
</subant>
</target>
<!-- ====================================================== -->
<!-- Package contrib jars. -->
<!-- ====================================================== -->
<target name="package">
<subant target="package">
<fileset dir="." includes="*/build.xml"/>
</subant>
</target>
<!-- ====================================================== -->
<!-- Test all the contribs. -->
<!-- ====================================================== -->
<target name="test">
<subant target="test">
<fileset dir="." includes="hdfsproxy/build.xml"/>
<fileset dir="." includes="streaming/build.xml"/>
<fileset dir="." includes="fairscheduler/build.xml"/>
<fileset dir="." includes="capacity-scheduler/build.xml"/>
<fileset dir="." includes="mrunit/build.xml"/>
</subant>
</target>
<!-- ====================================================== -->
<!-- Clean all the contribs. -->
<!-- ====================================================== -->
<target name="clean">
<subant target="clean">
<fileset dir="." includes="*/build.xml"/>
</subant>
</target>
</project>

View File

@ -0,0 +1,13 @@
Hadoop EC2
This collection of scripts allows you to run Hadoop clusters on Amazon.com's Elastic Compute Cloud (EC2) service described at:
http://aws.amazon.com/ec2
To get help, type the following in a shell:
bin/hadoop-ec2
For full instructions, please visit the Hadoop wiki at:
http://wiki.apache.org/hadoop/AmazonEC2#AutomatedScripts

View File

@ -0,0 +1,69 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Run commands on master or specified node of a running Hadoop EC2 cluster.
# if no args specified, show usage
if [ $# = 0 ]; then
echo "Command required!"
exit 1
fi
# get arguments
COMMAND="$1"
shift
# get group
CLUSTER="$1"
shift
if [ -z $CLUSTER ]; then
echo "Cluster name or instance id required!"
exit -1
fi
# Import variables
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hadoop-ec2-env.sh
if [[ $CLUSTER == i-* ]]; then
HOST=`ec2-describe-instances $CLUSTER | grep running | awk '{print $4}'`
[ -z $HOST ] && echo "Instance still pending or no longer running: $CLUSTER" && exit -1
else
[ ! -f $MASTER_IP_PATH ] && echo "Wrong group name, or cluster not launched! $CLUSTER" && exit -1
HOST=`cat $MASTER_IP_PATH`
fi
if [ "$COMMAND" = "login" ] ; then
echo "Logging in to host $HOST."
ssh $SSH_OPTS "root@$HOST"
elif [ "$COMMAND" = "proxy" ] ; then
echo "Proxying to host $HOST via local port 6666"
echo "Gangia: http://$HOST/ganglia"
echo "JobTracker: http://$HOST:50030/"
echo "NameNode: http://$HOST:50070/"
ssh $SSH_OPTS -D 6666 -N "root@$HOST"
elif [ "$COMMAND" = "push" ] ; then
echo "Pushing $1 to host $HOST."
scp $SSH_OPTS -r $1 "root@$HOST:"
elif [ "$COMMAND" = "screen" ] ; then
echo "Logging in and attaching screen on host $HOST."
ssh $SSH_OPTS -t "root@$HOST" 'screen -D -R'
else
echo "Executing command on host $HOST."
ssh $SSH_OPTS -t "root@$HOST" "$COMMAND"
fi

View File

@ -0,0 +1,78 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Create a Hadoop AMI.
# Inspired by Jonathan Siegel's EC2 script (http://blogsiegel.blogspot.com/2006/08/sandboxing-amazon-ec2.html)
# Import variables
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hadoop-ec2-env.sh
AMI_IMAGE=`ec2-describe-images -a | grep $S3_BUCKET | grep $HADOOP_VERSION | grep $ARCH | grep available | awk '{print $2}'`
[ ! -z $AMI_IMAGE ] && echo "AMI already registered, use: ec2-deregister $AMI_IMAGE" && exit -1
echo "Starting a AMI with ID $BASE_AMI_IMAGE."
OUTPUT=`ec2-run-instances $BASE_AMI_IMAGE -k $KEY_NAME -t $INSTANCE_TYPE`
BOOTING_INSTANCE=`echo $OUTPUT | awk '{print $6}'`
echo "Instance is $BOOTING_INSTANCE."
echo "Polling server status (ec2-describe-instances $BOOTING_INSTANCE)"
while true; do
printf "."
HOSTNAME=`ec2-describe-instances $BOOTING_INSTANCE | grep running | awk '{print $4}'`
if [ ! -z $HOSTNAME ]; then
break;
fi
sleep 1
done
echo "The server is available at $HOSTNAME."
while true; do
REPLY=`ssh $SSH_OPTS "root@$HOSTNAME" 'echo "hello"'`
if [ ! -z $REPLY ]; then
break;
fi
sleep 5
done
#read -p "Login first? [yes or no]: " answer
if [ "$answer" == "yes" ]; then
ssh $SSH_OPTS "root@$HOSTNAME"
fi
echo "Copying scripts."
# Copy setup scripts
scp $SSH_OPTS "$bin"/hadoop-ec2-env.sh "root@$HOSTNAME:/mnt"
scp $SSH_OPTS "$bin"/image/create-hadoop-image-remote "root@$HOSTNAME:/mnt"
scp $SSH_OPTS "$bin"/image/ec2-run-user-data "root@$HOSTNAME:/etc/init.d"
# Copy private key and certificate (for bundling image)
scp $SSH_OPTS $EC2_KEYDIR/pk*.pem "root@$HOSTNAME:/mnt"
scp $SSH_OPTS $EC2_KEYDIR/cert*.pem "root@$HOSTNAME:/mnt"
# Connect to it
ssh $SSH_OPTS "root@$HOSTNAME" '/mnt/create-hadoop-image-remote'
# Register image
ec2-register $S3_BUCKET/hadoop-$HADOOP_VERSION-$ARCH.manifest.xml
echo "Terminate with: ec2-terminate-instances $BOOTING_INSTANCE"

View File

@ -0,0 +1,58 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Delete the groups an local files associated with a cluster.
if [ -z $1 ]; then
echo "Cluster name required!"
exit -1
fi
CLUSTER=$1
# Finding Hadoop clusters
CLUSTERS=`ec2-describe-instances | \
awk '"RESERVATION" == $1 && $4 ~ /-master$/, "INSTANCE" == $1' | tr '\n' '\t' | \
grep "$CLUSTER" | grep running | cut -f4 | rev | cut -d'-' -f2- | rev`
if [ -n "$CLUSTERS" ]; then
echo "Cluster $CLUSTER has running instances. Please terminate them first."
exit 0
fi
# Import variables
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hadoop-ec2-env.sh
rm -f $MASTER_IP_PATH
rm -f $MASTER_PRIVATE_IP_PATH
ec2-describe-group | egrep "[[:space:]]$CLUSTER_MASTER[[:space:]]" > /dev/null
if [ $? -eq 0 ]; then
echo "Deleting group $CLUSTER_MASTER"
ec2-revoke $CLUSTER_MASTER -o $CLUSTER -u $AWS_ACCOUNT_ID
fi
ec2-describe-group | egrep "[[:space:]]$CLUSTER[[:space:]]" > /dev/null
if [ $? -eq 0 ]; then
echo "Deleting group $CLUSTER"
ec2-revoke $CLUSTER -o $CLUSTER_MASTER -u $AWS_ACCOUNT_ID
fi
ec2-delete-group $CLUSTER_MASTER
ec2-delete-group $CLUSTER

View File

@ -0,0 +1,61 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
# if no args specified, show usage
if [ $# = 0 ]; then
echo "Usage: hadoop-ec2 COMMAND"
echo "where COMMAND is one of:"
echo " list list all running Hadoop EC2 clusters"
echo " launch-cluster <group> <num slaves> launch a cluster of Hadoop EC2 instances - launch-master then launch-slaves"
echo " launch-master <group> launch or find a cluster master"
echo " launch-slaves <group> <num slaves> launch the cluster slaves"
echo " terminate-cluster <group> terminate all Hadoop EC2 instances"
echo " delete-cluster <group> delete the group information for a terminated cluster"
echo " login <group|instance id> login to the master node of the Hadoop EC2 cluster"
echo " screen <group|instance id> start or attach 'screen' on the master node of the Hadoop EC2 cluster"
echo " proxy <group|instance id> start a socks proxy on localhost:6666 (use w/foxyproxy)"
echo " push <group> <file> scp a file to the master node of the Hadoop EC2 cluster"
echo " <shell cmd> <group|instance id> execute any command remotely on the master"
echo " create-image create a Hadoop AMI"
exit 1
fi
# get arguments
COMMAND="$1"
shift
if [ "$COMMAND" = "create-image" ] ; then
. "$bin"/create-hadoop-image $*
elif [ "$COMMAND" = "launch-cluster" ] ; then
. "$bin"/launch-hadoop-cluster $*
elif [ "$COMMAND" = "launch-master" ] ; then
. "$bin"/launch-hadoop-master $*
elif [ "$COMMAND" = "launch-slaves" ] ; then
. "$bin"/launch-hadoop-slaves $*
elif [ "$COMMAND" = "delete-cluster" ] ; then
. "$bin"/delete-hadoop-cluster $*
elif [ "$COMMAND" = "terminate-cluster" ] ; then
. "$bin"/terminate-hadoop-cluster $*
elif [ "$COMMAND" = "list" ] ; then
. "$bin"/list-hadoop-clusters
else
. "$bin"/cmd-hadoop-cluster "$COMMAND" $*
fi

View File

@ -0,0 +1,93 @@
# Set environment variables for running Hadoop on Amazon EC2 here. All are required.
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Your Amazon Account Number.
AWS_ACCOUNT_ID=
# Your Amazon AWS access key.
AWS_ACCESS_KEY_ID=
# Your Amazon AWS secret access key.
AWS_SECRET_ACCESS_KEY=
# Location of EC2 keys.
# The default setting is probably OK if you set up EC2 following the Amazon Getting Started guide.
EC2_KEYDIR=`dirname "$EC2_PRIVATE_KEY"`
# The EC2 key name used to launch instances.
# The default is the value used in the Amazon Getting Started guide.
KEY_NAME=gsg-keypair
# Where your EC2 private key is stored (created when following the Amazon Getting Started guide).
# You need to change this if you don't store this with your other EC2 keys.
PRIVATE_KEY_PATH=`echo "$EC2_KEYDIR"/"id_rsa-$KEY_NAME"`
# SSH options used when connecting to EC2 instances.
SSH_OPTS=`echo -i "$PRIVATE_KEY_PATH" -o StrictHostKeyChecking=no -o ServerAliveInterval=30`
# The version of Hadoop to use.
HADOOP_VERSION=0.19.0
# The Amazon S3 bucket where the Hadoop AMI is stored.
# The default value is for public images, so can be left if you are using running a public image.
# Change this value only if you are creating your own (private) AMI
# so you can store it in a bucket you own.
S3_BUCKET=hadoop-images
# Enable public access to JobTracker and TaskTracker web interfaces
ENABLE_WEB_PORTS=true
# The script to run on instance boot.
USER_DATA_FILE=hadoop-ec2-init-remote.sh
# The EC2 instance type: m1.small, m1.large, m1.xlarge
INSTANCE_TYPE="m1.small"
#INSTANCE_TYPE="m1.large"
#INSTANCE_TYPE="m1.xlarge"
#INSTANCE_TYPE="c1.medium"
#INSTANCE_TYPE="c1.xlarge"
# The EC2 group master name. CLUSTER is set by calling scripts
CLUSTER_MASTER=$CLUSTER-master
# Cached values for a given cluster
MASTER_PRIVATE_IP_PATH=~/.hadooop-private-$CLUSTER_MASTER
MASTER_IP_PATH=~/.hadooop-$CLUSTER_MASTER
MASTER_ZONE_PATH=~/.hadooop-zone-$CLUSTER_MASTER
#
# The following variables are only used when creating an AMI.
#
# The version number of the installed JDK.
JAVA_VERSION=1.6.0_07
# SUPPORTED_ARCHITECTURES = ['i386', 'x86_64']
# The download URL for the Sun JDK. Visit http://java.sun.com/javase/downloads/index.jsp and get the URL for the "Linux self-extracting file".
if [ "$INSTANCE_TYPE" == "m1.small" -o "$INSTANCE_TYPE" == "c1.medium" ]; then
ARCH='i386'
BASE_AMI_IMAGE="ami-2b5fba42" # ec2-public-images/fedora-8-i386-base-v1.07.manifest.xml
JAVA_BINARY_URL=''
else
ARCH='x86_64'
BASE_AMI_IMAGE="ami-2a5fba43" # ec2-public-images/fedora-8-x86_64-base-v1.07.manifest.xml
JAVA_BINARY_URL=''
fi
if [ "$AMI_KERNEL" != "" ]; then
KERNEL_ARG="--kernel ${AMI_KERNEL}"
fi

View File

@ -0,0 +1,150 @@
#!/usr/bin/env bash
################################################################################
# Script that is run on each EC2 instance on boot. It is passed in the EC2 user
# data, so should not exceed 16K in size.
################################################################################
################################################################################
# Initialize variables
################################################################################
# Slaves are started after the master, and are told its address by sending a
# modified copy of this file which sets the MASTER_HOST variable.
# A node knows if it is the master or not by inspecting the security group
# name. If it is the master then it retrieves its address using instance data.
MASTER_HOST=%MASTER_HOST% # Interpolated before being sent to EC2 node
SECURITY_GROUPS=`wget -q -O - http://169.254.169.254/latest/meta-data/security-groups`
IS_MASTER=`echo $SECURITY_GROUPS | awk '{ a = match ($0, "-master$"); if (a) print "true"; else print "false"; }'`
if [ "$IS_MASTER" == "true" ]; then
MASTER_HOST=`wget -q -O - http://169.254.169.254/latest/meta-data/local-hostname`
fi
HADOOP_HOME=`ls -d /usr/local/hadoop-*`
################################################################################
# Hadoop configuration
# Modify this section to customize your Hadoop cluster.
################################################################################
cat > $HADOOP_HOME/conf/hadoop-site.xml <<EOF
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>/mnt/hadoop</value>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://$MASTER_HOST:50001</value>
</property>
<property>
<name>mapred.job.tracker</name>
<value>hdfs://$MASTER_HOST:50002</value>
</property>
<property>
<name>tasktracker.http.threads</name>
<value>80</value>
</property>
<property>
<name>mapred.tasktracker.map.tasks.maximum</name>
<value>3</value>
</property>
<property>
<name>mapred.tasktracker.reduce.tasks.maximum</name>
<value>3</value>
</property>
<property>
<name>mapred.output.compress</name>
<value>true</value>
</property>
<property>
<name>mapred.output.compression.type</name>
<value>BLOCK</value>
</property>
<property>
<name>dfs.client.block.write.retries</name>
<value>3</value>
</property>
</configuration>
EOF
# Configure Hadoop for Ganglia
# overwrite hadoop-metrics.properties
cat > $HADOOP_HOME/conf/hadoop-metrics.properties <<EOF
# Ganglia
# we push to the master gmond so hostnames show up properly
dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
dfs.period=10
dfs.servers=$MASTER_HOST:8649
mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
mapred.period=10
mapred.servers=$MASTER_HOST:8649
jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
jvm.period=10
jvm.servers=$MASTER_HOST:8649
EOF
################################################################################
# Start services
################################################################################
[ ! -f /etc/hosts ] && echo "127.0.0.1 localhost" > /etc/hosts
mkdir -p /mnt/hadoop/logs
# not set on boot
export USER="root"
if [ "$IS_MASTER" == "true" ]; then
# MASTER
# Prep Ganglia
sed -i -e "s|\( *mcast_join *=.*\)|#\1|" \
-e "s|\( *bind *=.*\)|#\1|" \
-e "s|\( *mute *=.*\)| mute = yes|" \
-e "s|\( *location *=.*\)| location = \"master-node\"|" \
/etc/gmond.conf
mkdir -p /mnt/ganglia/rrds
chown -R ganglia:ganglia /mnt/ganglia/rrds
rm -rf /var/lib/ganglia; cd /var/lib; ln -s /mnt/ganglia ganglia; cd
service gmond start
service gmetad start
apachectl start
# Hadoop
# only format on first boot
[ ! -e /mnt/hadoop/dfs ] && "$HADOOP_HOME"/bin/hadoop namenode -format
"$HADOOP_HOME"/bin/hadoop-daemon.sh start namenode
"$HADOOP_HOME"/bin/hadoop-daemon.sh start jobtracker
else
# SLAVE
# Prep Ganglia
sed -i -e "s|\( *mcast_join *=.*\)|#\1|" \
-e "s|\( *bind *=.*\)|#\1|" \
-e "s|\(udp_send_channel {\)|\1\n host=$MASTER_HOST|" \
/etc/gmond.conf
service gmond start
# Hadoop
"$HADOOP_HOME"/bin/hadoop-daemon.sh start datanode
"$HADOOP_HOME"/bin/hadoop-daemon.sh start tasktracker
fi
# Run this script on next boot
rm -f /var/ec2/ec2-run-user-data.*

View File

@ -0,0 +1,80 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Create a Hadoop AMI. Runs on the EC2 instance.
# Import variables
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hadoop-ec2-env.sh
# Remove environment script since it contains sensitive information
rm -f "$bin"/hadoop-ec2-env.sh
# Install Java
echo "Downloading and installing java binary."
cd /usr/local
wget -nv -O java.bin $JAVA_BINARY_URL
sh java.bin
rm -f java.bin
# Install tools
echo "Installing rpms."
yum -y install rsync lynx screen ganglia-gmetad ganglia-gmond ganglia-web httpd php
yum -y clean all
# Install Hadoop
echo "Installing Hadoop $HADOOP_VERSION."
cd /usr/local
wget -nv http://archive.apache.org/dist/hadoop/core/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz
[ ! -f hadoop-$HADOOP_VERSION.tar.gz ] && wget -nv http://www.apache.org/dist/hadoop/core/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz
tar xzf hadoop-$HADOOP_VERSION.tar.gz
rm -f hadoop-$HADOOP_VERSION.tar.gz
# Configure Hadoop
sed -i -e "s|# export JAVA_HOME=.*|export JAVA_HOME=/usr/local/jdk${JAVA_VERSION}|" \
-e 's|# export HADOOP_LOG_DIR=.*|export HADOOP_LOG_DIR=/mnt/hadoop/logs|' \
-e 's|# export HADOOP_SLAVE_SLEEP=.*|export HADOOP_SLAVE_SLEEP=1|' \
-e 's|# export HADOOP_OPTS=.*|export HADOOP_OPTS=-server|' \
/usr/local/hadoop-$HADOOP_VERSION/conf/hadoop-env.sh
# Run user data as script on instance startup
chmod +x /etc/init.d/ec2-run-user-data
echo "/etc/init.d/ec2-run-user-data" >> /etc/rc.d/rc.local
# Setup root user bash environment
echo "export JAVA_HOME=/usr/local/jdk${JAVA_VERSION}" >> /root/.bash_profile
echo "export HADOOP_HOME=/usr/local/hadoop-${HADOOP_VERSION}" >> /root/.bash_profile
echo 'export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$PATH' >> /root/.bash_profile
# Configure networking.
# Delete SSH authorized_keys since it includes the key it was launched with. (Note that it is re-populated when an instance starts.)
rm -f /root/.ssh/authorized_keys
# Ensure logging in to new hosts is seamless.
echo ' StrictHostKeyChecking no' >> /etc/ssh/ssh_config
# Bundle and upload image
cd ~root
# Don't need to delete .bash_history since it isn't written until exit.
df -h
ec2-bundle-vol -d /mnt -k /mnt/pk*.pem -c /mnt/cert*.pem -u $AWS_ACCOUNT_ID -s 3072 -p hadoop-$HADOOP_VERSION-$ARCH -r $ARCH
ec2-upload-bundle -b $S3_BUCKET -m /mnt/hadoop-$HADOOP_VERSION-$ARCH.manifest.xml -a $AWS_ACCESS_KEY_ID -s $AWS_SECRET_ACCESS_KEY
# End
echo Done

View File

@ -0,0 +1,51 @@
#!/bin/bash
#
# ec2-run-user-data - Run instance user-data if it looks like a script.
#
# Only retrieves and runs the user-data script once per instance. If
# you want the user-data script to run again (e.g., on the next boot)
# then add this command in the user-data script:
# rm -f /var/ec2/ec2-run-user-data.*
#
# History:
# 2008-05-16 Eric Hammond <ehammond@thinksome.com>
# - Initial version including code from Kim Scheibel, Jorge Oliveira
# 2008-08-06 Tom White
# - Updated to use mktemp on fedora
#
prog=$(basename $0)
logger="logger -t $prog"
curl="curl --retry 3 --silent --show-error --fail"
instance_data_url=http://169.254.169.254/2008-02-01
# Wait until networking is up on the EC2 instance.
perl -MIO::Socket::INET -e '
until(new IO::Socket::INET("169.254.169.254:80")){print"Waiting for network...\n";sleep 1}
' | $logger
# Exit if we have already run on this instance (e.g., previous boot).
ami_id=$($curl $instance_data_url/meta-data/ami-id)
been_run_file=/var/ec2/$prog.$ami_id
mkdir -p $(dirname $been_run_file)
if [ -f $been_run_file ]; then
$logger < $been_run_file
exit
fi
# Retrieve the instance user-data and run it if it looks like a script
user_data_file=`mktemp -t ec2-user-data.XXXXXXXXXX`
chmod 700 $user_data_file
$logger "Retrieving user-data"
$curl -o $user_data_file $instance_data_url/user-data 2>&1 | $logger
if [ ! -s $user_data_file ]; then
$logger "No user-data available"
elif head -1 $user_data_file | egrep -v '^#!'; then
$logger "Skipping user-data as it does not begin with #!"
else
$logger "Running user-data"
echo "user-data has already been run on this instance" > $been_run_file
$user_data_file 2>&1 | logger -t "user-data"
$logger "user-data exit code: $?"
fi
rm -f $user_data_file

View File

@ -0,0 +1,40 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Launch an EC2 cluster of Hadoop instances.
# Import variables
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
if [ -z $1 ]; then
echo "Cluster name required!"
exit -1
fi
if [ -z $2 ]; then
echo "Must specify the number of slaves to start."
exit -1
fi
if ! "$bin"/launch-hadoop-master $1 ; then
exit $?
fi
if ! "$bin"/launch-hadoop-slaves $*; then
exit $?
fi

View File

@ -0,0 +1,120 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Launch an EC2 Hadoop master.
if [ -z $1 ]; then
echo "Cluster name required!"
exit -1
fi
CLUSTER=$1
# Import variables
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hadoop-ec2-env.sh
if [ -z $AWS_ACCOUNT_ID ]; then
echo "Please set AWS_ACCOUNT_ID in $bin/hadoop-ec2-env.sh."
exit -1
fi
echo "Testing for existing master in group: $CLUSTER"
MASTER_EC2_HOST=`ec2-describe-instances | awk '"RESERVATION" == $1 && "'$CLUSTER_MASTER'" == $4, "RESERVATION" == $1 && "'$CLUSTER_MASTER'" != $4'`
MASTER_EC2_HOST=`echo "$MASTER_EC2_HOST" | awk '"INSTANCE" == $1 && "running" == $6 {print $4}'`
if [ ! -z "$MASTER_EC2_HOST" ]; then
echo "Master already running on: $MASTER_EC2_HOST"
MASTER_HOST=`ec2-describe-instances $INSTANCE | grep INSTANCE | grep running | grep $MASTER_EC2_HOST | awk '{print $5}'`
echo $MASTER_HOST > $MASTER_PRIVATE_IP_PATH
echo $MASTER_EC2_HOST > $MASTER_IP_PATH
exit 0
fi
ec2-describe-group | egrep "[[:space:]]$CLUSTER_MASTER[[:space:]]" > /dev/null
if [ ! $? -eq 0 ]; then
echo "Creating group $CLUSTER_MASTER"
ec2-add-group $CLUSTER_MASTER -d "Group for Hadoop Master."
ec2-authorize $CLUSTER_MASTER -o $CLUSTER_MASTER -u $AWS_ACCOUNT_ID
ec2-authorize $CLUSTER_MASTER -p 22 # ssh
if [ $ENABLE_WEB_PORTS == "true" ]; then
ec2-authorize $CLUSTER_MASTER -p 50030 # JobTracker web interface
ec2-authorize $CLUSTER_MASTER -p 50060 # TaskTracker web interface
ec2-authorize $CLUSTER_MASTER -p 50070 # NameNode web interface
ec2-authorize $CLUSTER_MASTER -p 50075 # DataNode web interface
fi
fi
ec2-describe-group | egrep "[[:space:]]$CLUSTER[[:space:]]" > /dev/null
if [ ! $? -eq 0 ]; then
echo "Creating group $CLUSTER"
ec2-add-group $CLUSTER -d "Group for Hadoop Slaves."
ec2-authorize $CLUSTER -o $CLUSTER -u $AWS_ACCOUNT_ID
ec2-authorize $CLUSTER -p 22 # ssh
if [ $ENABLE_WEB_PORTS == "true" ]; then
ec2-authorize $CLUSTER -p 50030 # JobTracker web interface
ec2-authorize $CLUSTER -p 50060 # TaskTracker web interface
ec2-authorize $CLUSTER -p 50070 # NameNode web interface
ec2-authorize $CLUSTER -p 50075 # DataNode web interface
fi
ec2-authorize $CLUSTER_MASTER -o $CLUSTER -u $AWS_ACCOUNT_ID
ec2-authorize $CLUSTER -o $CLUSTER_MASTER -u $AWS_ACCOUNT_ID
fi
# Finding Hadoop image
AMI_IMAGE=`ec2-describe-images -a | grep $S3_BUCKET | grep $HADOOP_VERSION | grep $ARCH | grep available | awk '{print $2}'`
# Start a master
echo "Starting master with AMI $AMI_IMAGE"
USER_DATA="MASTER_HOST=master,MAX_MAP_TASKS=$MAX_MAP_TASKS,MAX_REDUCE_TASKS=$MAX_REDUCE_TASKS,COMPRESS=$COMPRESS"
INSTANCE=`ec2-run-instances $AMI_IMAGE -n 1 -g $CLUSTER_MASTER -k $KEY_NAME -f "$bin"/$USER_DATA_FILE -t $INSTANCE_TYPE $KERNEL_ARG | grep INSTANCE | awk '{print $2}'`
echo "Waiting for instance $INSTANCE to start"
while true; do
printf "."
# get private dns
MASTER_HOST=`ec2-describe-instances $INSTANCE | grep running | awk '{print $5}'`
if [ ! -z $MASTER_HOST ]; then
echo "Started as $MASTER_HOST"
break;
fi
sleep 1
done
MASTER_EC2_HOST=`ec2-describe-instances $INSTANCE | grep INSTANCE | grep running | grep $MASTER_HOST | awk '{print $4}'`
echo $MASTER_HOST > $MASTER_PRIVATE_IP_PATH
echo $MASTER_EC2_HOST > $MASTER_IP_PATH
MASTER_EC2_ZONE=`ec2-describe-instances $INSTANCE | grep INSTANCE | grep running | grep $MASTER_HOST | awk '{print $11}'`
echo $MASTER_EC2_ZONE > $MASTER_ZONE_PATH
while true; do
REPLY=`ssh $SSH_OPTS "root@$MASTER_EC2_HOST" 'echo "hello"'`
if [ ! -z $REPLY ]; then
break;
fi
sleep 5
done
echo "Copying private key to master"
scp $SSH_OPTS $PRIVATE_KEY_PATH "root@$MASTER_EC2_HOST:/root/.ssh/id_rsa"
ssh $SSH_OPTS "root@$MASTER_EC2_HOST" "chmod 600 /root/.ssh/id_rsa"
MASTER_IP=`dig +short $MASTER_EC2_HOST`
echo "Master is $MASTER_EC2_HOST, ip is $MASTER_IP, zone is $MASTER_EC2_ZONE."

View File

@ -0,0 +1,55 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Launch an EC2 Hadoop slaves.
if [ -z $1 ]; then
echo "Cluster name required!"
exit -1
fi
if [ -z $2 ]; then
echo "Must specify the number of slaves to start."
exit -1
fi
CLUSTER=$1
NO_INSTANCES=$2
# Import variables
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hadoop-ec2-env.sh
if [ ! -f $MASTER_IP_PATH ]; then
echo "Must start Cluster Master first!"
exit -1
fi
# Finding Hadoop image
AMI_IMAGE=`ec2-describe-images -a | grep $S3_BUCKET | grep $HADOOP_VERSION | grep $ARCH |grep available | awk '{print $2}'`
MASTER_HOST=`cat $MASTER_PRIVATE_IP_PATH`
MASTER_ZONE=`cat $MASTER_ZONE_PATH`
# Substituting master hostname
sed -e "s|%MASTER_HOST%|$MASTER_HOST|" "$bin"/$USER_DATA_FILE > "$bin"/$USER_DATA_FILE.slave
# Start slaves
echo "Adding $1 node(s) to cluster group $CLUSTER with AMI $AMI_IMAGE"
ec2-run-instances $AMI_IMAGE -n "$NO_INSTANCES" -g "$CLUSTER" -k "$KEY_NAME" -f "$bin"/$USER_DATA_FILE.slave -t "$INSTANCE_TYPE" -z "$MASTER_ZONE" $KERNEL_ARG | grep INSTANCE | awk '{print $2}'
rm "$bin"/$USER_DATA_FILE.slave

View File

@ -0,0 +1,31 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# List running clusters.
# Import variables
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hadoop-ec2-env.sh
# Finding Hadoop clusters
CLUSTERS=`ec2-describe-instances | awk '"RESERVATION" == $1 && $4 ~ /-master$/, "INSTANCE" == $1' | tr '\n' '\t' | grep running | cut -f4 | rev | cut -d'-' -f2- | rev`
[ -z "$CLUSTERS" ] && echo "No running clusters." && exit 0
echo "Running Hadoop clusters:"
echo "$CLUSTERS"

View File

@ -0,0 +1,46 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Terminate a cluster.
if [ -z $1 ]; then
echo "Cluster name required!"
exit -1
fi
CLUSTER=$1
# Import variables
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hadoop-ec2-env.sh
# Finding Hadoop image
HADOOP_INSTANCES=`ec2-describe-instances | awk '"RESERVATION" == $1 && ("'$CLUSTER'" == $4 || "'$CLUSTER_MASTER'" == $4), "RESERVATION" == $1 && ("'$CLUSTER'" != $4 && "'$CLUSTER_MASTER'" != $4)'`
HADOOP_INSTANCES=`echo "$HADOOP_INSTANCES" | grep INSTANCE | grep running`
[ -z "$HADOOP_INSTANCES" ] && echo "No running instances in cluster $CLUSTER." && exit 0
echo "Running Hadoop instances:"
echo "$HADOOP_INSTANCES"
read -p "Terminate all instances? [yes or no]: " answer
if [ "$answer" != "yes" ]; then
exit 1
fi
ec2-terminate-instances `echo "$HADOOP_INSTANCES" | awk '{print $2}'`

View File

@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry excluding="org/apache/hadoop/eclipse/server/CopyOfHadoopServer.java" kind="src" path="src/java"/>
<classpathentry exported="true" kind="lib" path="classes" sourcepath="classes"/>
<classpathentry exported="true" kind="lib" path="lib/commons-cli-2.0-SNAPSHOT.jar"/>
<classpathentry kind="lib" path="lib/hadoop-core.jar" sourcepath="/hadoop-socks/src/java"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
<classpathentry kind="output" path="classes"/>
</classpath>

View File

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>MapReduceTools</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.pde.ManifestBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.pde.SchemaBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.pde.PluginNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>

View File

@ -0,0 +1,262 @@
#Sat Oct 13 13:37:43 CEST 2007
eclipse.preferences.version=1
instance/org.eclipse.core.net/org.eclipse.core.net.hasMigrated=true
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
org.eclipse.jdt.core.compiler.debug.lineNumber=generate
org.eclipse.jdt.core.compiler.debug.localVariable=generate
org.eclipse.jdt.core.compiler.debug.sourceFile=generate
org.eclipse.jdt.core.formatter.align_type_members_on_columns=false
org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression=16
org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant=16
org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call=16
org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation=16
org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression=16
org.eclipse.jdt.core.formatter.alignment_for_assignment=16
org.eclipse.jdt.core.formatter.alignment_for_binary_expression=16
org.eclipse.jdt.core.formatter.alignment_for_compact_if=16
org.eclipse.jdt.core.formatter.alignment_for_conditional_expression=80
org.eclipse.jdt.core.formatter.alignment_for_enum_constants=0
org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer=16
org.eclipse.jdt.core.formatter.alignment_for_multiple_fields=16
org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration=16
org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration=16
org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation=16
org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration=16
org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration=16
org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration=16
org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration=16
org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration=16
org.eclipse.jdt.core.formatter.blank_lines_after_imports=1
org.eclipse.jdt.core.formatter.blank_lines_after_package=1
org.eclipse.jdt.core.formatter.blank_lines_before_field=1
org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration=0
org.eclipse.jdt.core.formatter.blank_lines_before_imports=1
org.eclipse.jdt.core.formatter.blank_lines_before_member_type=1
org.eclipse.jdt.core.formatter.blank_lines_before_method=1
org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk=1
org.eclipse.jdt.core.formatter.blank_lines_before_package=0
org.eclipse.jdt.core.formatter.blank_lines_between_import_groups=1
org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations=1
org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration=end_of_line
org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration=end_of_line
org.eclipse.jdt.core.formatter.brace_position_for_array_initializer=end_of_line
org.eclipse.jdt.core.formatter.brace_position_for_block=end_of_line
org.eclipse.jdt.core.formatter.brace_position_for_block_in_case=end_of_line
org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration=end_of_line
org.eclipse.jdt.core.formatter.brace_position_for_enum_constant=end_of_line
org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration=end_of_line
org.eclipse.jdt.core.formatter.brace_position_for_method_declaration=end_of_line
org.eclipse.jdt.core.formatter.brace_position_for_switch=end_of_line
org.eclipse.jdt.core.formatter.brace_position_for_type_declaration=end_of_line
org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment=false
org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment=false
org.eclipse.jdt.core.formatter.comment.format_block_comments=true
org.eclipse.jdt.core.formatter.comment.format_header=false
org.eclipse.jdt.core.formatter.comment.format_html=true
org.eclipse.jdt.core.formatter.comment.format_javadoc_comments=true
org.eclipse.jdt.core.formatter.comment.format_line_comments=true
org.eclipse.jdt.core.formatter.comment.format_source_code=true
org.eclipse.jdt.core.formatter.comment.indent_parameter_description=false
org.eclipse.jdt.core.formatter.comment.indent_root_tags=true
org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags=insert
org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter=do not insert
org.eclipse.jdt.core.formatter.comment.line_length=77
org.eclipse.jdt.core.formatter.compact_else_if=true
org.eclipse.jdt.core.formatter.continuation_indentation=2
org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer=2
org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line=false
org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header=true
org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header=true
org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header=true
org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header=true
org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases=true
org.eclipse.jdt.core.formatter.indent_empty_lines=false
org.eclipse.jdt.core.formatter.indent_statements_compare_to_block=true
org.eclipse.jdt.core.formatter.indent_statements_compare_to_body=true
org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases=true
org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch=true
org.eclipse.jdt.core.formatter.indentation.size=4
org.eclipse.jdt.core.formatter.insert_new_line_after_annotation=insert
org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer=do not insert
org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing=insert
org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement=do not insert
org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer=do not insert
org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement=do not insert
org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement=do not insert
org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement=do not insert
org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration=insert
org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration=insert
org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block=insert
org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant=insert
org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration=insert
org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body=insert
org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration=insert
org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter=insert
org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator=insert
org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_binary_operator=insert
org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments=insert
org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters=insert
org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block=insert
org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast=insert
org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert=insert
org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case=insert
org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional=insert
org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for=insert
org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments=insert
org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters=insert
org.eclipse.jdt.core.formatter.insert_space_after_ellipsis=insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer=insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional=insert
org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard=do not insert
org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for=insert
org.eclipse.jdt.core.formatter.insert_space_after_unary_operator=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter=insert
org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator=insert
org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration=insert
org.eclipse.jdt.core.formatter.insert_space_before_binary_operator=insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer=insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert=insert
org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional=insert
org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for=insert
org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_ellipsis=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized=insert
org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while=insert
org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return=insert
org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw=insert
org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional=insert
org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_semicolon=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for=do not insert
org.eclipse.jdt.core.formatter.insert_space_before_unary_operator=do not insert
org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference=do not insert
org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer=do not insert
org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression=do not insert
org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration=do not insert
org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration=do not insert
org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant=do not insert
org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration=do not insert
org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation=do not insert
org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line=false
org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line=false
org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line=false
org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line=false
org.eclipse.jdt.core.formatter.lineSplit=77
org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column=false
org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column=false
org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body=0
org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve=1
org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line=true
org.eclipse.jdt.core.formatter.tabulation.char=space
org.eclipse.jdt.core.formatter.tabulation.size=2
org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations=false
org.eclipse.jdt.core.formatter.wrap_before_binary_operator=true

View File

@ -0,0 +1,6 @@
#Tue Aug 14 19:41:15 PDT 2007
eclipse.preferences.version=1
formatter_profile=_Lucene
formatter_settings_version=11
instance/org.eclipse.core.net/org.eclipse.core.net.hasMigrated=true
org.eclipse.jdt.ui.text.custom_code_templates=<?xml version\="1.0" encoding\="UTF-8" standalone\="no"?><templates/>

View File

@ -0,0 +1,6 @@
#Tue Aug 14 19:41:15 PDT 2007
DELEGATES_PREFERENCE=delegateValidatorListorg.eclipse.wst.xsd.core.internal.validation.eclipse.XSDDelegatingValidator\=org.eclipse.wst.xsd.core.internal.validation.eclipse.Validator;org.eclipse.wst.wsdl.validation.internal.eclipse.WSDLDelegatingValidator\=org.eclipse.wst.wsdl.validation.internal.eclipse.Validator;
USER_BUILD_PREFERENCE=enabledBuildValidatorListorg.eclipse.wst.xsd.core.internal.validation.eclipse.XSDDelegatingValidator;org.eclipse.jst.jsp.core.internal.validation.JSPContentValidator;org.eclipse.wst.html.internal.validation.HTMLValidator;org.eclipse.wst.xml.core.internal.validation.eclipse.Validator;org.eclipse.jst.jsf.validation.internal.appconfig.AppConfigValidator;org.eclipse.jst.jsp.core.internal.validation.JSPBatchValidator;org.eclipse.wst.dtd.core.internal.validation.eclipse.Validator;org.eclipse.wst.wsi.ui.internal.WSIMessageValidator;org.eclipse.wst.wsdl.validation.internal.eclipse.WSDLDelegatingValidator;org.eclipse.jst.jsf.validation.internal.JSPSemanticsValidator;
USER_MANUAL_PREFERENCE=enabledManualValidatorListorg.eclipse.wst.xsd.core.internal.validation.eclipse.XSDDelegatingValidator;org.eclipse.jst.jsp.core.internal.validation.JSPContentValidator;org.eclipse.wst.html.internal.validation.HTMLValidator;org.eclipse.wst.xml.core.internal.validation.eclipse.Validator;org.eclipse.jst.jsf.validation.internal.appconfig.AppConfigValidator;org.eclipse.jst.jsp.core.internal.validation.JSPBatchValidator;org.eclipse.wst.dtd.core.internal.validation.eclipse.Validator;org.eclipse.wst.wsi.ui.internal.WSIMessageValidator;org.eclipse.wst.wsdl.validation.internal.eclipse.WSDLDelegatingValidator;org.eclipse.jst.jsf.validation.internal.JSPSemanticsValidator;
USER_PREFERENCE=overrideGlobalPreferencesfalse
eclipse.preferences.version=1

View File

@ -0,0 +1,29 @@
Manifest-Version: 1.0
Bundle-ManifestVersion: 2
Bundle-Name: MapReduce Tools for Eclipse
Bundle-SymbolicName: org.apache.hadoop.eclipse;singleton:=true
Bundle-Version: 0.18
Bundle-Activator: org.apache.hadoop.eclipse.Activator
Bundle-Localization: plugin
Require-Bundle: org.eclipse.ui,
org.eclipse.core.runtime,
org.eclipse.jdt.launching,
org.eclipse.debug.core,
org.eclipse.jdt,
org.eclipse.jdt.core,
org.eclipse.core.resources,
org.eclipse.ui.ide,
org.eclipse.jdt.ui,
org.eclipse.debug.ui,
org.eclipse.jdt.debug.ui,
org.eclipse.core.expressions,
org.eclipse.ui.cheatsheets,
org.eclipse.ui.console,
org.eclipse.ui.navigator,
org.eclipse.core.filesystem,
org.apache.commons.logging
Eclipse-LazyStart: true
Bundle-ClassPath: classes/,
lib/commons-cli-2.0-SNAPSHOT.jar,
lib/hadoop-core.jar
Bundle-Vendor: Apache Hadoop

View File

@ -0,0 +1,7 @@
output.. = bin/
bin.includes = META-INF/,\
plugin.xml,\
resources/,\
classes/,\
classes/,\
lib/

View File

@ -0,0 +1,79 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project default="jar" name="eclipse-plugin">
<import file="../build-contrib.xml"/>
<path id="eclipse-sdk-jars">
<fileset dir="${eclipse.home}/plugins/">
<include name="org.eclipse.ui*.jar"/>
<include name="org.eclipse.jdt*.jar"/>
<include name="org.eclipse.core*.jar"/>
<include name="org.eclipse.equinox*.jar"/>
<include name="org.eclipse.debug*.jar"/>
<include name="org.eclipse.osgi*.jar"/>
<include name="org.eclipse.swt*.jar"/>
<include name="org.eclipse.jface*.jar"/>
<include name="org.eclipse.team.cvs.ssh2*.jar"/>
<include name="com.jcraft.jsch*.jar"/>
</fileset>
</path>
<!-- Override classpath to include Eclipse SDK jars -->
<path id="classpath">
<pathelement location="${build.classes}"/>
<pathelement location="${hadoop.root}/build/classes"/>
<path refid="eclipse-sdk-jars"/>
</path>
<!-- Skip building if eclipse.home is unset. -->
<target name="check-contrib" unless="eclipse.home">
<property name="skip.contrib" value="yes"/>
<echo message="eclipse.home unset: skipping eclipse plugin"/>
</target>
<target name="compile" depends="init, ivy-retrieve-common" unless="skip.contrib">
<echo message="contrib: ${name}"/>
<javac
encoding="${build.encoding}"
srcdir="${src.dir}"
includes="**/*.java"
destdir="${build.classes}"
debug="${javac.debug}"
deprecation="${javac.deprecation}">
<classpath refid="classpath"/>
</javac>
</target>
<!-- Override jar target to specify manifest -->
<target name="jar" depends="compile" unless="skip.contrib">
<mkdir dir="${build.dir}/lib"/>
<copy file="${hadoop.root}/build/hadoop-${version}-core.jar" tofile="${build.dir}/lib/hadoop-core.jar" verbose="true"/>
<copy file="${hadoop.root}/lib/commons-cli-2.0-SNAPSHOT.jar" todir="${build.dir}/lib" verbose="true"/>
<jar
jarfile="${build.dir}/hadoop-${version}-${name}.jar"
manifest="${root}/META-INF/MANIFEST.MF">
<fileset dir="${build.dir}" includes="classes/ lib/"/>
<fileset dir="${root}" includes="resources/ plugin.xml"/>
</jar>
</target>
</project>

View File

@ -0,0 +1,36 @@
<?xml version="1.0" ?>
<ivy-module version="1.0">
<info organisation="org.apache.hadoop" module="${ant.project.name}">
<license name="Apache 2.0"/>
<ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
<description>
Apache Hadoop
</description>
</info>
<configurations defaultconfmapping="default">
<!--these match the Maven configurations-->
<conf name="default" extends="master,runtime"/>
<conf name="master" description="contains the artifact but no dependencies"/>
<conf name="runtime" description="runtime but not the artifact" />
<conf name="common" visibility="private"
extends="runtime"
description="artifacts needed for compile/test the application"/>
<conf name="test" visibility="private" extends="runtime"/>
</configurations>
<publications>
<!--get the artifact from our module name-->
<artifact conf="master"/>
</publications>
<dependencies>
<dependency org="commons-logging"
name="commons-logging"
rev="${commons-logging.version}"
conf="common->default"/>
<dependency org="log4j"
name="log4j"
rev="${log4j.version}"
conf="common->master"/>
</dependencies>
</ivy-module>

View File

@ -0,0 +1,5 @@
#This properties file lists the versions of the various artifacts used by streaming.
#It drives ivy and the generation of a maven POM
#Please list the dependencies name with version if they are different from the ones
#listed in the global libraries.properties file (in alphabetical order)

View File

@ -0,0 +1,287 @@
<?xml version="1.0" encoding="UTF-8"?>
<?eclipse version="3.2"?>
<plugin>
<!-- extension
point="org.eclipse.debug.core.launchConfigurationTypes">
<launchConfigurationType
delegate="org.apache.hadoop.eclipse.launch.HadoopLaunchDelegate"
id="org.apache.hadoop.eclipse.launch.StartServer"
modes="run,debug"
name="Start Hadoop Server"
public="true"/>
</extension -->
<extension
name="MapReduce Nature"
id="org.apache.hadoop.eclipse.Nature"
point="org.eclipse.core.resources.natures">
<runtime>
<run class="org.apache.hadoop.eclipse.MapReduceNature"/>
</runtime>
</extension>
<extension
point="org.eclipse.ui.ide.projectNatureImages">
</extension>
<!-- Wizards: new Mapper, Reducer, Driver -->
<extension
point="org.eclipse.ui.newWizards">
<primaryWizard id="org.apache.hadoop.eclipse.NewProjectWizard"/>
<wizard
category="org.apache.hadoop.eclipse.category"
class="org.apache.hadoop.eclipse.NewMapReduceProjectWizard"
finalPerspective="org.apache.hadoop.eclipse.Perspective"
hasPages="true"
icon="resources/Elephant16x16.gif"
id="org.apache.hadoop.eclipse.NewProjectWizard"
name="Map/Reduce Project"
preferredPerspectives="org.apache.hadoop.eclipse.Perspective"
project="true"/>
<wizard
category="org.apache.hadoop.eclipse.category"
class="org.apache.hadoop.eclipse.NewMapperWizard"
icon="resources/mapper16.png"
id="org.apache.hadoop.eclipse.NewMapperWizard"
name="Mapper"
project="false"/>
<wizard
category="org.apache.hadoop.eclipse.category"
class="org.apache.hadoop.eclipse.NewReducerWizard"
icon="resources/reducer16.png"
id="org.apache.hadoop.eclipse.NewReducerWizard"
name="Reducer"
project="false"/>
<wizard
category="org.apache.hadoop.eclipse.category"
class="org.apache.hadoop.eclipse.NewDriverWizard"
icon="resources/driver.png"
id="org.apache.hadoop.eclipse.NewDriverWizard"
name="MapReduce Driver"
project="false"/>
<category
id="org.apache.hadoop.eclipse.category"
name="Map/Reduce"/>
</extension>
<extension
point="org.eclipse.debug.ui.launchConfigurationTypeImages">
<launchConfigurationTypeImage
configTypeID="org.apache.hadoop.eclipse.launch.Local"
icon="resources/elephantblue16x16.gif"
id="Hadouken.launchConfigurationTypeImage1"/>
</extension>
<extension
point="org.eclipse.debug.ui.launchConfigurationTabGroups">
<launchConfigurationTabGroup
class="org.apache.hadoop.eclipse.launch.StartHadoopLaunchTabGroup"
id="org.apache.hadoop.eclipse.launch.StartHadoopLaunchTabGroup"
type="org.apache.hadoop.eclipse.launch.StartServer"/>
</extension>
<!-- Perspective: Map/Reduce -->
<extension
point="org.eclipse.ui.perspectives">
<perspective
class="org.apache.hadoop.eclipse.HadoopPerspectiveFactory"
icon="resources/elephantblue16x16.gif"
id="org.apache.hadoop.eclipse.Perspective"
name="Map/Reduce"/>
</extension>
<!-- Needed: allows DFS Browsing in Navigator! [TODO] -->
<extension
point="org.eclipse.core.expressions.propertyTesters">
<propertyTester
class="org.apache.hadoop.eclipse.PropertyTester"
id="mapreduce.deployable"
namespace="mapreduce"
properties="deployable"
type="org.eclipse.core.resources.IResource"/>
<propertyTester
class="org.apache.hadoop.eclipse.PropertyTester"
id="mapreduce.server"
namespace="mapreduce"
properties="server"
type="org.eclipse.wst.server.core.IServer"/>
</extension>
<!-- Run on Hadoop action -->
<extension
point="org.eclipse.debug.ui.launchShortcuts">
<shortcut
class="org.apache.hadoop.eclipse.launch.HadoopApplicationLaunchShortcut"
icon="resources/elephantblue16x16.gif"
id="org.apache.hadoop.eclipse.launch.shortcut"
label="Run on Hadoop"
modes="run">
<contextualLaunch>
<enablement>
<with variable="selection">
<count value="1"/>
<iterate>
<or>
<test property="org.eclipse.jdt.launching.hasMain"/>
<and>
<test property="org.eclipse.jdt.launching.isContainer"/>
<test property="org.eclipse.jdt.launching.hasProjectNature" args="org.eclipse.jdt.core.javanature"/>
<test property="org.eclipse.jdt.launching.hasProjectNature" args="org.apache.hadoop.eclipse.Nature"/>
</and>
</or>
</iterate>
</with>
</enablement>
</contextualLaunch>
<perspective id="org.apache.hadoop.eclipse.Perspective"/>
</shortcut>
</extension>
<!-- Hadoop locations view -->
<extension
point="org.eclipse.ui.views">
<category
id="org.apache.hadoop.eclipse.view.servers"
name="MapReduce Tools"/>
<view
allowMultiple="false"
category="org.apache.hadoop.eclipse.view.servers"
class="org.apache.hadoop.eclipse.view.servers.ServerView"
icon="resources/hadoop-logo-16x16.png"
id="org.apache.hadoop.eclipse.view.servers"
name="Map/Reduce Locations">
</view>
</extension>
<!-- ??? -->
<!-- extension
point="org.eclipse.ui.cheatsheets.cheatSheetContent">
<category
id="org.apache.hadoop.eclipse.cheatsheet.Examples"
name="MapReduce"/>
<cheatsheet
category="org.apache.hadoop.eclipse.cheatsheet.Examples"
composite="true"
contentFile="resources/HelloWorld.xml"
id="org.apache.hadoop.eclipse.cheatsheet"
name="Write a MapReduce application"/>
</extension -->
<!-- DFS Browser -->
<extension
point="org.eclipse.ui.navigator.navigatorContent">
<navigatorContent
activeByDefault="true"
contentProvider="org.apache.hadoop.eclipse.dfs.DFSContentProvider"
icon="resources/elephantblue16x16.gif"
id="org.apache.hadoop.eclipse.views.dfscontent"
labelProvider="org.apache.hadoop.eclipse.dfs.DFSContentProvider"
name="Hadoop Distributed File Systems"
priority="normal"
providesSaveables="false">
<triggerPoints>
<or>
<instanceof value="org.apache.hadoop.eclipse.dfs.DFSPath"/>
<adapt type="org.eclipse.core.resources.IResource">
<test
forcePluginActivation="true"
property="mapreduce.deployable"/>
</adapt>
</or>
</triggerPoints>
<actionProvider class="org.apache.hadoop.eclipse.dfs.ActionProvider">
</actionProvider>
<possibleChildren>
<or>
<instanceof value="org.eclipse.wst.server.core.IServer"/>
<instanceof value="org.apache.hadoop.eclipse.dfs.DFSLocationsRoot"/>
<instanceof value="org.apache.hadoop.eclipse.dfs.DFSLocation"/>
<instanceof value="org.apache.hadoop.eclipse.dfs.DFSPath"/>
</or>
</possibleChildren>
</navigatorContent>
</extension>
<!-- DFS Actions -->
<extension
point="org.eclipse.ui.navigator.viewer">
<viewer
viewerId="org.apache.hadoop.eclipse.dfs.DFSViewer">
<popupMenu
allowsPlatformContributions="true"
id="org.apache.hadoop.eclipse.dfs.DFSViewer#PopupMenu">
<insertionPoint name="group.new"/>
<insertionPoint
name="group.open"
separator="true"/>
<insertionPoint name="group.openWith"/>
<insertionPoint name="group.edit"
separator="true"/>
<insertionPoint name="group.reorganize" />
<insertionPoint
name="group.port"
separator="true"/>
<insertionPoint
name="group.build"
separator="true"/>
<insertionPoint
name="group.generate"
separator="true"/>
<insertionPoint
name="group.search"
separator="true"/>
<insertionPoint
name="additions"
separator="true"/>
<insertionPoint
name="group.properties"
separator="true"/>
</popupMenu>
</viewer>
<viewerContentBinding viewerId="org.eclipse.ui.navigator.ProjectExplorer">
<includes>
<contentExtension
isRoot="false"
pattern="org.apache.hadoop.eclipse.views.dfscontent"/>
<actionExtension pattern="org.apache.hadoop.eclipse.views.dfscontent.*"/>
</includes>
</viewerContentBinding>
</extension>
<!-- HDFS FileSystem registration [TODO] -->
<!-- extension
point="org.eclipse.core.filesystem.filesystems">
<filesystem scheme="hdfs">
<run class="org.apache.hadoop.eclipse.dfs.FileSystem"/>
</filesystem>
</extension -->
<!--
<extension
point="org.eclipse.ui.popupMenus">
<viewerContribution
id="Hadouken.viewerContribution1"
targetID="org.eclipse.ui.navigator.ProjectExplorer#PopupMenu"/>
</extension> -->
<!-- Preferences -->
<extension
point="org.eclipse.ui.preferencePages">
<page
class="org.apache.hadoop.eclipse.preferences.MapReducePreferencePage"
id="org.apache.hadoop.eclipse.preferences.MapReducePreferencePage"
name="Hadoop Map/Reduce"/>
</extension>
<extension
point="org.eclipse.core.runtime.preferences">
<initializer class="org.apache.hadoop.eclipse.preferences.PreferenceInitializer"/>
</extension>
</plugin>

Binary file not shown.

After

Width:  |  Height:  |  Size: 1006 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 359 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 853 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

View File

@ -0,0 +1,32 @@
<?xml version="1.0" encoding="UTF-8"?>
<cheatsheet title="Set default Hadoop path tutorial">
<intro>
<description>
This tutorial informs you how to set the default Hadoop
directory for the plugin.
</description>
</intro>
<item title="Create MapReduce Cluster" skip="true">
<description>
Define a MapReduce cluster [if you have not done so already]
by opening the MapReduce Servers view and clicking on the
blue elephant in the upper right.
Use the following embedded command to create a new Hadoop Server:
</description>
<action pluginId="com.ibm.hipods.mapreduce"
class="org.apache.hadoop.eclipse.actions.NewServerAction" />
</item>
<item title="Open and Explore DFS Tree">
<description>
Project Explorer view shows an elephant icon for each defined
server. Opening a server entry will open a connection to
the root of that server's DFS tree. You can then explore the
DFS tree.
</description>
</item>
</cheatsheet>

View File

@ -0,0 +1,62 @@
<?xml version="1.0" encoding="UTF-8"?>
<cheatsheet title="MapReduce project creation tutorial">
<intro>
<description>
This tutorial guides you through the creation of a simple
MapReduce project with three MapReduce classes: a Mapper, a
Reducer, and a Driver.
</description>
</intro>
<item title="Open the MapReduce Perspective">
<action pluginId="org.eclipse.ui.cheatsheets"
class="org.eclipse.ui.internal.cheatsheets.actions.OpenPerspective"
param1="org.apache.hadoop.eclipse.Perspective" />
<description>
Select <b>Window->Open Perspective->MapReduce</b> in the menubar at
the top of the workbench. This step changes the perspective
to set up the Eclipse workbench for MapReduce development.
</description>
</item>
<item title="Create a MapReduce project" skip="true">
<action pluginId="com.ibm.hipods.mapreduce"
class="org.apache.hadoop.eclipse.actions.OpenNewMRProjectAction" />
<description>
The first thing you will need is a MapReduce Project. If you
already have a MapReduce project in your workspace that you
would like to use, you may skip this step by clicking the
"Click to Skip" button. If not, select <b>File->New->Project</b>
and choose MapReduce Project in the list. Complete the
subsequent pages as required.
</description>
</item>
<item title="Create a MapReduce package" skip="true">
<action pluginId="org.eclipse.jdt.ui"
class="org.eclipse.jdt.ui.actions.OpenNewPackageWizardAction" />
<description>
You should now have a MapReduce project in your workspace.
The next thing to do is creating a package. Use the Eclipse
tools by selecting <b>File -> New ->Package</b> action. Specify the
source folder (the project containing the package). Then,
give the package a name, such as "mapreduce.test", and click
the "Finish" button. If you already have a project with a
package you might as well skip this step.
</description>
</item>
<item title="Create the MapReduce application classes" skip="true">
<description>
Now you should be set up for creating your MapReduce
application. The MapReduce application consists of three
classes: a Mapper class, a Reducer class and a Driver class.
In this step you will create the three classes. Use the
class wizard by selecting <b>File -> New -> Class</b>.
Repeat this for every class.
</description>
<repeated-subitem values="Mapper,Reducer,Driver">
<subitem label="Create the class ${this}.">
<action pluginId="com.ibm.hipods.mapreduce"
class="org.apache.hadoop.eclipse.actions.OpenNewMRClassWizardAction"
param1="${this}" />
</subitem>
</repeated-subitem>
</item>
</cheatsheet>

Binary file not shown.

After

Width:  |  Height:  |  Size: 866 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 809 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 790 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 777 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

@ -0,0 +1,121 @@
<?xml version="1.0" encoding="UTF-8"?>
<compositeCheatsheet name="IBM MapReduce Tools for Eclipse">
<taskGroup name="Develop Hadoop Applications" kind="set">
<intro
href="http://www.alphaworks.ibm.com/tech/mapreducetools">
IBM(R) MapReduce Tools for Eclipse enables you to write
distributed applications based on the MapReduce paradigm
using the Apache Hadoop runtime. This cheat sheet will walk
you through the steps needed to write a MapReduce
application and run it on a Hadoop server.
</intro>
<onCompletion>
</onCompletion>
<taskGroup name="Initial Setup" kind="sequence" skip="true">
<intro>
This task takes you through the steps to setup the
Hadoop environment with the MapReduce Tools. If you
already have Hadoop installed and linked to Eclipse, you
can skip this task.
</intro>
<onCompletion>
Congratulations! You have now installed Hadoop on your
computer and linked it with the MapReduce Tools.
</onCompletion>
<task kind="cheatsheet"
name="Download and unzip Apache Hadoop distribution">
<intro>
Hadoop must be downloaded to a place where Eclipse
can access its libraries. This task covers the steps
needed to execute this task.
</intro>
<param name="showIntro" value="false" />
<param name="path" value="Setup.xml" />
<onCompletion>
The plugin currently supports Hadoop v0.7.2 through
0.12.2. Now click on the top-most link that you feel
comfortable installing.
</onCompletion>
</task>
<task kind="cheatsheet"
name="Specify path to Apache Hadoop distribution">
...
<intro>
This tutorial informs you how to set the default
Hadoop directory for the plugin.
</intro>
<param name="showIntro" value="false" />
<param name="path" value="SetHadoopPath.xml" />
</task>
</taskGroup>
<taskGroup name="Create and run a MapReduce project"
kind="sequence" skip="true">
<intro>
This section walks you through the steps to create and
run your MapReduce project.
</intro>
<task kind="cheatsheet" name="Create a MapReduce project"
skip="true">
<intro>
This tutorial guides you through the creation of a
simple MapReduce project with three MapReduce
classes: a Mapper, a Reducer, and a Driver.
</intro>
<param name="showIntro" value="false" />
<param name="path" value="CreateProj.xml" />
<onCompletion>
Congratulations! You have now mastered the steps for
creating a Hadoop project.
</onCompletion>
</task>
<task kind="cheatsheet"
name="Run a MapReduce application">
<param name="path" value="RunProj.xml" />
<onCompletion>
Congratulations! You have now mastered the steps for
implementing a Hadoop application.
</onCompletion>
</task>
</taskGroup>
<taskGroup name="Using a MapReduce cluster" kind="set"
skip="true">
<intro>
The MapReduce Tools for Eclipse plugin lets you
browse and upload files to the DFS of a MapReduce cluster.
</intro>
<onCompletion>
Congratulations! You have completed the tutorials on using a
MapReduce Cluster.
</onCompletion>
<task kind="cheatsheet"
name="Connect to a MapReduce cluster" skip="true">
<intro>
This tutorial explains how to show files in the DFS of a
MapReduce cluster.
</intro>
<param name="showIntro" value="false" />
<param name="path" value="ConnectDFS.xml" />
</task>
<task kind="cheatsheet" id="viewFiles"
name="Viewing file contents on the Hadoop Distributed File System (HDFS)">
<intro>
Simply double-click on any file in the DFS in the Project
Explorer view.
</intro>
</task>
<task kind="cheatsheet"
name="Transfer files to the Hadoop Distributed File System (HDFS)">
<intro>
Right-click on an existing directory in the DFS.<br />
Choose the <b>Import from local directory option.</b>
<br />
Note that files can only be uploaded to the HDFS at this time.
</intro>
</task>
</taskGroup>
</taskGroup>
</compositeCheatsheet>

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

View File

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<cheatsheet title="MapReduce project run tutorial">
<intro>
<description>
This tutorial informs you how to run your newly created
MapReduce Project in one of two fashions: locally as a Java
Application, or on a Hadoop Server.
</description>
</intro>
<item title="Run as Java Application">
<description>
To run your MapReduce application locally, right-click on
your Driver class in the Package Explorer and select <b>Run as
/ Java Application</b>.
</description>
</item>
<item title="Run on Hadoop Server">
<description>
To run your MapReduce application on a Hadoop server, right-click on
your Driver class in the Package Explorer and select <b>Run as
/ Run on Hadoop</b>.
</description>
</item>
</cheatsheet>

View File

@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<cheatsheet title="Set default Hadoop path tutorial">
<intro>
<description>
This tutorial informs you how to set the default Hadoop
directory for the plugin.
</description>
</intro>
<item title="Open Plugin Preferences window">
<description>
To set the default Hadoop directory, open the plugin
preferences from the menu option
<b>Window > Preferences</b>. <br />
Go to the <b>Hadoop Home Directory</b>
preference, and enter the installation directory there.
Use the following embedded command to open the Preferences
window:
</description>
<action pluginId="org.eclipse.jdt.ui"
class="org.eclipse.ui.internal.OpenPreferencesAction" />
</item>
</cheatsheet>

View File

@ -0,0 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<cheatsheet title="Open Browser">
<intro>
<description>This cheat sheet launches a browser to the Hadoop website.</description>
</intro>
<item title="Open Browser">
<description>
Go to http://hadoop.apache.org/core/, and follow
links to download the latest stable distribution of
Hadoop.
Use the following embedded command to launch the Hadoop Web site
in a browser</description>
<command serialization=
"org.eclipse.ui.browser.openBrowser(url=http://hadoop.apache.org/core)"/>
</item>
</cheatsheet>

Binary file not shown.

After

Width:  |  Height:  |  Size: 661 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 820 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 339 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 808 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 930 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 888 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 851 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 456 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 988 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 853 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 986 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 838 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 395 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 745 B

View File

@ -0,0 +1,77 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.eclipse;
import org.apache.hadoop.eclipse.servers.ServerRegistry;
import org.eclipse.ui.plugin.AbstractUIPlugin;
import org.osgi.framework.BundleContext;
/**
* The activator class controls the plug-in life cycle
*/
public class Activator extends AbstractUIPlugin {
/**
* The plug-in ID
*/
public static final String PLUGIN_ID = "org.apache.hadoop.eclipse";
/**
* The shared unique instance (singleton)
*/
private static Activator plugin;
/**
* Constructor
*/
public Activator() {
synchronized (Activator.class) {
if (plugin != null) {
// Not a singleton!?
throw new RuntimeException("Activator for " + PLUGIN_ID
+ " is not a singleton");
}
plugin = this;
}
}
/* @inheritDoc */
@Override
public void start(BundleContext context) throws Exception {
super.start(context);
}
/* @inheritDoc */
@Override
public void stop(BundleContext context) throws Exception {
ServerRegistry.getInstance().dispose();
plugin = null;
super.stop(context);
}
/**
* Returns the shared unique instance (singleton)
*
* @return the shared unique instance (singleton)
*/
public static Activator getDefault() {
return plugin;
}
}

View File

@ -0,0 +1,45 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.eclipse;
import org.eclipse.jface.dialogs.MessageDialog;
import org.eclipse.swt.widgets.Display;
/**
* Error dialog helper
*/
public class ErrorMessageDialog {
public static void display(final String title, final String message) {
Display.getDefault().syncExec(new Runnable() {
public void run() {
MessageDialog.openError(Display.getDefault().getActiveShell(),
title, message);
}
});
}
public static void display(Exception e) {
display("An exception has occured!", "Exception description:\n"
+ e.getLocalizedMessage());
}
}

View File

@ -0,0 +1,95 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.eclipse;
import org.eclipse.debug.ui.IDebugUIConstants;
import org.eclipse.jdt.ui.JavaUI;
import org.eclipse.ui.IFolderLayout;
import org.eclipse.ui.IPageLayout;
import org.eclipse.ui.IPerspectiveFactory;
import org.eclipse.ui.console.IConsoleConstants;
/**
* Creates links to the new MapReduce-based wizards and views for a MapReduce
* perspective
*
*/
public class HadoopPerspectiveFactory implements IPerspectiveFactory {
public void createInitialLayout(IPageLayout layout) {
layout.addNewWizardShortcut("org.apache.hadoop.eclipse.NewDriverWizard");
layout.addNewWizardShortcut("org.apache.hadoop.eclipse.NewMapperWizard");
layout
.addNewWizardShortcut("org.apache.hadoop.eclipse.NewReducerWizard");
IFolderLayout left =
layout.createFolder("org.apache.hadoop.eclipse.perspective.left",
IPageLayout.LEFT, 0.2f, layout.getEditorArea());
left.addView("org.eclipse.ui.navigator.ProjectExplorer");
IFolderLayout bottom =
layout.createFolder("org.apache.hadoop.eclipse.perspective.bottom",
IPageLayout.BOTTOM, 0.7f, layout.getEditorArea());
bottom.addView(IPageLayout.ID_PROBLEM_VIEW);
bottom.addView(IPageLayout.ID_TASK_LIST);
bottom.addView(JavaUI.ID_JAVADOC_VIEW);
bottom.addView("org.apache.hadoop.eclipse.view.servers");
bottom.addPlaceholder(JavaUI.ID_SOURCE_VIEW);
bottom.addPlaceholder(IPageLayout.ID_PROGRESS_VIEW);
bottom.addPlaceholder(IConsoleConstants.ID_CONSOLE_VIEW);
bottom.addPlaceholder(IPageLayout.ID_BOOKMARKS);
IFolderLayout right =
layout.createFolder("org.apache.hadoop.eclipse.perspective.right",
IPageLayout.RIGHT, 0.8f, layout.getEditorArea());
right.addView(IPageLayout.ID_OUTLINE);
right.addView("org.eclipse.ui.cheatsheets.views.CheatSheetView");
// right.addView(layout.ID); .. cheat sheet here
layout.addActionSet(IDebugUIConstants.LAUNCH_ACTION_SET);
layout.addActionSet(JavaUI.ID_ACTION_SET);
layout.addActionSet(JavaUI.ID_CODING_ACTION_SET);
layout.addActionSet(JavaUI.ID_ELEMENT_CREATION_ACTION_SET);
layout.addActionSet(IPageLayout.ID_NAVIGATE_ACTION_SET);
layout.addActionSet(JavaUI.ID_SEARCH_ACTION_SET);
layout
.addNewWizardShortcut("org.eclipse.jdt.ui.wizards.NewPackageCreationWizard");
layout
.addNewWizardShortcut("org.eclipse.jdt.ui.wizards.NewClassCreationWizard");
layout
.addNewWizardShortcut("org.eclipse.jdt.ui.wizards.NewInterfaceCreationWizard");
layout
.addNewWizardShortcut("org.eclipse.jdt.ui.wizards.NewEnumCreationWizard");
layout
.addNewWizardShortcut("org.eclipse.jdt.ui.wizards.NewAnnotationCreationWizard");
layout
.addNewWizardShortcut("org.eclipse.jdt.ui.wizards.NewSourceFolderCreationWizard");
layout
.addNewWizardShortcut("org.eclipse.jdt.ui.wizards.NewSnippetFileCreationWizard");
layout.addNewWizardShortcut("org.eclipse.ui.wizards.new.folder");
layout.addNewWizardShortcut("org.eclipse.ui.wizards.new.file");
layout
.addNewWizardShortcut("org.eclipse.ui.editors.wizards.UntitledTextFileWizard");
// CheatSheetViewerFactory.createCheatSheetView().setInput("org.apache.hadoop.eclipse.cheatsheet");
}
}

View File

@ -0,0 +1,252 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.eclipse;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import org.eclipse.core.runtime.FileLocator;
import org.eclipse.core.runtime.Path;
import org.eclipse.jface.resource.ImageDescriptor;
import org.eclipse.swt.graphics.Image;
import org.eclipse.ui.ISharedImages;
import org.eclipse.ui.PlatformUI;
import org.eclipse.ui.plugin.AbstractUIPlugin;
import org.osgi.framework.Bundle;
/**
* Icons manager
*/
public class ImageLibrary {
private final Bundle bundle = Activator.getDefault().getBundle();
/**
* Singleton instance
*/
private static volatile ImageLibrary instance = null;
private ISharedImages sharedImages =
PlatformUI.getWorkbench().getSharedImages();
/**
* Where resources (icons, images...) are available in the Bundle
*/
private static final String RESOURCE_DIR = "resources/";
/**
* Public access to image descriptors
*
* @param name
* @return the image descriptor
*/
public static ImageDescriptor get(String name) {
return getInstance().getImageDescriptorByName(name);
}
/**
* Public access to images
*
* @param name
* @return the image
*/
public static Image getImage(String name) {
return getInstance().getImageByName(name);
}
/**
* Singleton access
*
* @return the Image library
*/
public static ImageLibrary getInstance() {
if (instance == null) {
synchronized (ImageLibrary.class) {
if (instance == null)
instance = new ImageLibrary();
}
}
return instance;
}
/**
* Map of registered resources (ImageDescriptor and Image)
*/
private Map<String, ImageDescriptor> descMap =
new HashMap<String, ImageDescriptor>();
private Map<String, Image> imageMap = new HashMap<String, Image>();
/**
* Image library constructor: put image definitions here.
*/
private ImageLibrary() {
/*
* Servers view
*/
newImage("server.view.location.entry", "Elephant-24x24.png");
newImage("server.view.job.entry", "job.gif");
newImage("server.view.action.location.new", "location-new-16x16.png");
newImage("server.view.action.location.edit", "location-edit-16x16.png");
newSharedImage("server.view.action.delete",
ISharedImages.IMG_TOOL_DELETE);
/*
* DFS Browser
*/
newImage("dfs.browser.root.entry", "files.gif");
newImage("dfs.browser.location.entry", "Elephant-16x16.png");
newSharedImage("dfs.browser.folder.entry", ISharedImages.IMG_OBJ_FOLDER);
newSharedImage("dfs.browser.file.entry", ISharedImages.IMG_OBJ_FILE);
// DFS files in editor
newSharedImage("dfs.file.editor", ISharedImages.IMG_OBJ_FILE);
// Actions
newImage("dfs.browser.action.mkdir", "new-folder.png");
newImage("dfs.browser.action.download", "download.png");
newImage("dfs.browser.action.upload_files", "upload.png");
newImage("dfs.browser.action.upload_dir", "upload.png");
newSharedImage("dfs.browser.action.delete",
ISharedImages.IMG_TOOL_DELETE);
newImage("dfs.browser.action.refresh", "refresh.png");
/*
* Wizards
*/
newImage("wizard.mapper.new", "mapwiz.png");
newImage("wizard.reducer.new", "reducewiz.png");
newImage("wizard.driver.new", "driverwiz.png");
newImage("wizard.mapreduce.project.new", "projwiz.png");
}
/**
* Accessor to images
*
* @param name
* @return
*/
private ImageDescriptor getImageDescriptorByName(String name) {
return this.descMap.get(name);
}
/**
* Accessor to images
*
* @param name
* @return
*/
private Image getImageByName(String name) {
return this.imageMap.get(name);
}
/**
* Access to platform shared images
*
* @param name
* @return
*/
private ImageDescriptor getSharedByName(String name) {
return sharedImages.getImageDescriptor(name);
}
/**
* Load and register a new image. If the image resource does not exist or
* fails to load, a default "error" resource is supplied.
*
* @param name name of the image
* @param filename name of the file containing the image
* @return whether the image has correctly been loaded
*/
private boolean newImage(String name, String filename) {
ImageDescriptor id;
boolean success;
try {
URL fileURL =
FileLocator.find(bundle, new Path(RESOURCE_DIR + filename), null);
id = ImageDescriptor.createFromURL(FileLocator.toFileURL(fileURL));
success = true;
} catch (Exception e) {
e.printStackTrace();
id = ImageDescriptor.getMissingImageDescriptor();
// id = getSharedByName(ISharedImages.IMG_OBJS_ERROR_TSK);
success = false;
}
descMap.put(name, id);
imageMap.put(name, id.createImage(true));
return success;
}
/**
* Register an image from the workspace shared image pool. If the image
* resource does not exist or fails to load, a default "error" resource is
* supplied.
*
* @param name name of the image
* @param sharedName name of the shared image ({@link ISharedImages})
* @return whether the image has correctly been loaded
*/
private boolean newSharedImage(String name, String sharedName) {
boolean success = true;
ImageDescriptor id = getSharedByName(sharedName);
if (id == null) {
id = ImageDescriptor.getMissingImageDescriptor();
// id = getSharedByName(ISharedImages.IMG_OBJS_ERROR_TSK);
success = false;
}
descMap.put(name, id);
imageMap.put(name, id.createImage(true));
return success;
}
/**
* Register an image from the workspace shared image pool. If the image
* resource does not exist or fails to load, a default "error" resource is
* supplied.
*
* @param name name of the image
* @param sharedName name of the shared image ({@link ISharedImages})
* @return whether the image has correctly been loaded
*/
private boolean newPluginImage(String name, String pluginId,
String filename) {
boolean success = true;
ImageDescriptor id =
AbstractUIPlugin.imageDescriptorFromPlugin(pluginId, filename);
if (id == null) {
id = ImageDescriptor.getMissingImageDescriptor();
// id = getSharedByName(ISharedImages.IMG_OBJS_ERROR_TSK);
success = false;
}
descMap.put(name, id);
imageMap.put(name, id.createImage(true));
return success;
}
}

View File

@ -0,0 +1,146 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.eclipse;
import java.io.File;
import java.io.FileFilter;
import java.net.URL;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.eclipse.core.resources.IProject;
import org.eclipse.core.resources.IProjectNature;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.NullProgressMonitor;
import org.eclipse.core.runtime.Path;
import org.eclipse.core.runtime.QualifiedName;
import org.eclipse.jdt.core.IClasspathEntry;
import org.eclipse.jdt.core.IJavaProject;
import org.eclipse.jdt.core.JavaCore;
/**
* Class to configure and deconfigure an Eclipse project with the MapReduce
* project nature.
*/
public class MapReduceNature implements IProjectNature {
public static final String ID = "org.apache.hadoop.eclipse.Nature";
private IProject project;
static Logger log = Logger.getLogger(MapReduceNature.class.getName());
/**
* Configures an Eclipse project as a Map/Reduce project by adding the
* Hadoop libraries to a project's classpath.
*/
public void configure() throws CoreException {
String path =
project.getPersistentProperty(new QualifiedName(Activator.PLUGIN_ID,
"hadoop.runtime.path"));
File dir = new File(path);
final ArrayList<File> coreJars = new ArrayList<File>();
dir.listFiles(new FileFilter() {
public boolean accept(File pathname) {
String fileName = pathname.getName();
// get the hadoop core jar without touching test or examples
// older version of hadoop don't use the word "core" -- eyhung
if ((fileName.indexOf("hadoop") != -1) && (fileName.endsWith("jar"))
&& (fileName.indexOf("test") == -1)
&& (fileName.indexOf("examples") == -1)) {
coreJars.add(pathname);
}
return false; // we don't care what this returns
}
});
File dir2 = new File(path + File.separatorChar + "lib");
if (dir2.exists() && dir2.isDirectory()) {
dir2.listFiles(new FileFilter() {
public boolean accept(File pathname) {
if ((!pathname.isDirectory())
&& (pathname.getName().endsWith("jar"))) {
coreJars.add(pathname);
}
return false; // we don't care what this returns
}
});
}
// Add Hadoop libraries onto classpath
IJavaProject javaProject = JavaCore.create(getProject());
// Bundle bundle = Activator.getDefault().getBundle();
try {
IClasspathEntry[] currentCp = javaProject.getRawClasspath();
IClasspathEntry[] newCp =
new IClasspathEntry[currentCp.length + coreJars.size()];
System.arraycopy(currentCp, 0, newCp, 0, currentCp.length);
final Iterator<File> i = coreJars.iterator();
int count = 0;
while (i.hasNext()) {
// for (int i = 0; i < s_coreJarNames.length; i++) {
final File f = (File) i.next();
// URL url = FileLocator.toFileURL(FileLocator.find(bundle, new
// Path("lib/" + s_coreJarNames[i]), null));
URL url = f.toURI().toURL();
log.finer("hadoop library url.getPath() = " + url.getPath());
newCp[newCp.length - 1 - count] =
JavaCore.newLibraryEntry(new Path(url.getPath()), null, null);
count++;
}
javaProject.setRawClasspath(newCp, new NullProgressMonitor());
} catch (Exception e) {
log.log(Level.SEVERE, "IOException generated in "
+ this.getClass().getCanonicalName(), e);
}
}
/**
* Deconfigure a project from MapReduce status. Currently unimplemented.
*/
public void deconfigure() throws CoreException {
// TODO Auto-generated method stub
}
/**
* Returns the project to which this project nature applies.
*/
public IProject getProject() {
return this.project;
}
/**
* Sets the project to which this nature applies. Used when instantiating
* this project nature runtime.
*/
public void setProject(IProject project) {
this.project = project;
}
}

View File

@ -0,0 +1,99 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.eclipse;
import org.eclipse.core.resources.IFile;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.jdt.core.IJavaElement;
import org.eclipse.jdt.internal.ui.wizards.NewElementWizard;
import org.eclipse.jface.operation.IRunnableWithProgress;
import org.eclipse.jface.viewers.IStructuredSelection;
import org.eclipse.ui.INewWizard;
import org.eclipse.ui.IWorkbench;
/**
* Wizard for creating a new Driver class (a class that runs a MapReduce job).
*
*/
public class NewDriverWizard extends NewElementWizard implements INewWizard,
IRunnableWithProgress {
private NewDriverWizardPage page;
/*
* @Override public boolean performFinish() { }
*/
public void run(IProgressMonitor monitor) {
try {
page.createType(monitor);
} catch (CoreException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public NewDriverWizard() {
setWindowTitle("New MapReduce Driver");
}
@Override
public void init(IWorkbench workbench, IStructuredSelection selection) {
super.init(workbench, selection);
page = new NewDriverWizardPage();
addPage(page);
page.setSelection(selection);
}
@Override
/**
* Performs any actions appropriate in response to the user having pressed the
* Finish button, or refuse if finishing now is not permitted.
*/
public boolean performFinish() {
if (super.performFinish()) {
if (getCreatedElement() != null) {
selectAndReveal(page.getModifiedResource());
openResource((IFile) page.getModifiedResource());
}
return true;
} else {
return false;
}
}
@Override
/**
*
*/
protected void finishPage(IProgressMonitor monitor)
throws InterruptedException, CoreException {
this.run(monitor);
}
@Override
public IJavaElement getCreatedElement() {
return page.getCreatedType().getPrimaryElement();
}
}

View File

@ -0,0 +1,263 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.eclipse;
import java.io.IOException;
import java.util.ArrayList;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.FileLocator;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.core.runtime.IStatus;
import org.eclipse.core.runtime.Path;
import org.eclipse.jdt.core.IType;
import org.eclipse.jdt.core.JavaModelException;
import org.eclipse.jdt.core.search.SearchEngine;
import org.eclipse.jdt.ui.IJavaElementSearchConstants;
import org.eclipse.jdt.ui.JavaUI;
import org.eclipse.jdt.ui.wizards.NewTypeWizardPage;
import org.eclipse.jface.dialogs.ProgressMonitorDialog;
import org.eclipse.jface.resource.ImageDescriptor;
import org.eclipse.jface.viewers.IStructuredSelection;
import org.eclipse.jface.window.Window;
import org.eclipse.swt.SWT;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.layout.GridLayout;
import org.eclipse.swt.widgets.Button;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Event;
import org.eclipse.swt.widgets.Label;
import org.eclipse.swt.widgets.Listener;
import org.eclipse.swt.widgets.Text;
import org.eclipse.ui.dialogs.SelectionDialog;
/**
* Pre-fills the new MapReduce driver class with a template.
*
*/
public class NewDriverWizardPage extends NewTypeWizardPage {
private Button isCreateMapMethod;
private Text reducerText;
private Text mapperText;
private final boolean showContainerSelector;
public NewDriverWizardPage() {
this(true);
}
public NewDriverWizardPage(boolean showContainerSelector) {
super(true, "MapReduce Driver");
this.showContainerSelector = showContainerSelector;
setTitle("MapReduce Driver");
setDescription("Create a new MapReduce driver.");
setImageDescriptor(ImageLibrary.get("wizard.driver.new"));
}
public void setSelection(IStructuredSelection selection) {
initContainerPage(getInitialJavaElement(selection));
initTypePage(getInitialJavaElement(selection));
}
@Override
/**
* Creates the new type using the entered field values.
*/
public void createType(IProgressMonitor monitor) throws CoreException,
InterruptedException {
super.createType(monitor);
}
@Override
protected void createTypeMembers(final IType newType, ImportsManager imports,
final IProgressMonitor monitor) throws CoreException {
super.createTypeMembers(newType, imports, monitor);
imports.addImport("org.apache.hadoop.fs.Path");
imports.addImport("org.apache.hadoop.io.Text");
imports.addImport("org.apache.hadoop.io.IntWritable");
imports.addImport("org.apache.hadoop.mapred.JobClient");
imports.addImport("org.apache.hadoop.mapred.JobConf");
imports.addImport("org.apache.hadoop.mapred.Reducer");
imports.addImport("org.apache.hadoop.mapred.Mapper");
/**
* TODO(jz) - move most code out of the runnable
*/
getContainer().getShell().getDisplay().syncExec(new Runnable() {
public void run() {
String method = "public static void main(String[] args) {\n JobClient client = new JobClient();";
method += "JobConf conf = new JobConf("
+ newType.getFullyQualifiedName() + ".class);\n\n";
method += "// TODO: specify output types\nconf.setOutputKeyClass(Text.class);\nconf.setOutputValueClass(IntWritable.class);\n\n";
method += "// TODO: specify input and output DIRECTORIES (not files)\nconf.setInputPath(new Path(\"src\"));\nconf.setOutputPath(new Path(\"out\"));\n\n";
if (mapperText.getText().length() > 0) {
method += "conf.setMapperClass(" + mapperText.getText()
+ ".class);\n\n";
} else {
method += "// TODO: specify a mapper\nconf.setMapperClass(org.apache.hadoop.mapred.lib.IdentityMapper.class);\n\n";
}
if (reducerText.getText().length() > 0) {
method += "conf.setReducerClass(" + reducerText.getText()
+ ".class);\n\n";
} else {
method += "// TODO: specify a reducer\nconf.setReducerClass(org.apache.hadoop.mapred.lib.IdentityReducer.class);\n\n";
}
method += "client.setConf(conf);\n";
method += "try {\n\tJobClient.runJob(conf);\n} catch (Exception e) {\n"
+ "\te.printStackTrace();\n}\n";
method += "}\n";
try {
newType.createMethod(method, null, false, monitor);
} catch (JavaModelException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
});
}
public void createControl(Composite parent) {
// super.createControl(parent);
initializeDialogUnits(parent);
Composite composite = new Composite(parent, SWT.NONE);
GridLayout layout = new GridLayout();
layout.numColumns = 4;
composite.setLayout(layout);
createContainerControls(composite, 4);
createPackageControls(composite, 4);
createSeparator(composite, 4);
createTypeNameControls(composite, 4);
createSuperClassControls(composite, 4);
createSuperInterfacesControls(composite, 4);
createSeparator(composite, 4);
createMapperControls(composite);
createReducerControls(composite);
if (!showContainerSelector) {
setPackageFragmentRoot(null, false);
setSuperClass("java.lang.Object", false);
setSuperInterfaces(new ArrayList(), false);
}
setControl(composite);
setFocus();
handleFieldChanged(CONTAINER);
// setSuperClass("org.apache.hadoop.mapred.MapReduceBase", true);
// setSuperInterfaces(Arrays.asList(new String[]{
// "org.apache.hadoop.mapred.Mapper" }), true);
}
@Override
protected void handleFieldChanged(String fieldName) {
super.handleFieldChanged(fieldName);
validate();
}
private void validate() {
if (showContainerSelector) {
updateStatus(new IStatus[] { fContainerStatus, fPackageStatus,
fTypeNameStatus, fSuperClassStatus, fSuperInterfacesStatus });
} else {
updateStatus(new IStatus[] { fTypeNameStatus, });
}
}
private void createMapperControls(Composite composite) {
this.mapperText = createBrowseClassControl(composite, "Ma&pper:",
"&Browse...", "org.apache.hadoop.mapred.Mapper", "Mapper Selection");
}
private void createReducerControls(Composite composite) {
this.reducerText = createBrowseClassControl(composite, "&Reducer:",
"Browse&...", "org.apache.hadoop.mapred.Reducer", "Reducer Selection");
}
private Text createBrowseClassControl(final Composite composite,
final String string, String browseButtonLabel,
final String baseClassName, final String dialogTitle) {
Label label = new Label(composite, SWT.NONE);
GridData data = new GridData(GridData.FILL_HORIZONTAL);
label.setText(string);
label.setLayoutData(data);
final Text text = new Text(composite, SWT.SINGLE | SWT.BORDER);
GridData data2 = new GridData(GridData.FILL_HORIZONTAL);
data2.horizontalSpan = 2;
text.setLayoutData(data2);
Button browse = new Button(composite, SWT.NONE);
browse.setText(browseButtonLabel);
GridData data3 = new GridData(GridData.FILL_HORIZONTAL);
browse.setLayoutData(data3);
browse.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event event) {
IType baseType;
try {
baseType = getPackageFragmentRoot().getJavaProject().findType(
baseClassName);
// edit this to limit the scope
SelectionDialog dialog = JavaUI.createTypeDialog(
composite.getShell(), new ProgressMonitorDialog(composite
.getShell()), SearchEngine.createHierarchyScope(baseType),
IJavaElementSearchConstants.CONSIDER_CLASSES, false);
dialog.setMessage("&Choose a type:");
dialog.setBlockOnOpen(true);
dialog.setTitle(dialogTitle);
dialog.open();
if ((dialog.getReturnCode() == Window.OK)
&& (dialog.getResult().length > 0)) {
IType type = (IType) dialog.getResult()[0];
text.setText(type.getFullyQualifiedName());
}
} catch (JavaModelException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
});
if (!showContainerSelector) {
label.setEnabled(false);
text.setEnabled(false);
browse.setEnabled(false);
}
return text;
}
}

View File

@ -0,0 +1,411 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.eclipse;
import java.io.File;
import java.io.FilenameFilter;
import java.lang.reflect.InvocationTargetException;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.hadoop.eclipse.preferences.MapReducePreferencePage;
import org.apache.hadoop.eclipse.preferences.PreferenceConstants;
import org.eclipse.core.resources.IProject;
import org.eclipse.core.resources.IProjectDescription;
import org.eclipse.core.resources.ResourcesPlugin;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IConfigurationElement;
import org.eclipse.core.runtime.IExecutableExtension;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.core.runtime.NullProgressMonitor;
import org.eclipse.core.runtime.Path;
import org.eclipse.core.runtime.QualifiedName;
import org.eclipse.core.runtime.SubProgressMonitor;
import org.eclipse.jdt.ui.wizards.NewJavaProjectWizardPage;
import org.eclipse.jface.dialogs.IDialogConstants;
import org.eclipse.jface.operation.IRunnableWithProgress;
import org.eclipse.jface.preference.PreferenceDialog;
import org.eclipse.jface.preference.PreferenceManager;
import org.eclipse.jface.preference.PreferenceNode;
import org.eclipse.jface.viewers.IStructuredSelection;
import org.eclipse.jface.wizard.IWizardPage;
import org.eclipse.jface.wizard.Wizard;
import org.eclipse.swt.SWT;
import org.eclipse.swt.events.SelectionEvent;
import org.eclipse.swt.events.SelectionListener;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.layout.GridLayout;
import org.eclipse.swt.widgets.Button;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.DirectoryDialog;
import org.eclipse.swt.widgets.Group;
import org.eclipse.swt.widgets.Link;
import org.eclipse.swt.widgets.Text;
import org.eclipse.ui.IWorkbench;
import org.eclipse.ui.IWorkbenchWizard;
import org.eclipse.ui.PlatformUI;
import org.eclipse.ui.dialogs.WizardNewProjectCreationPage;
import org.eclipse.ui.wizards.newresource.BasicNewProjectResourceWizard;
/**
* Wizard for creating a new MapReduce Project
*
*/
public class NewMapReduceProjectWizard extends Wizard implements
IWorkbenchWizard, IExecutableExtension {
static Logger log =
Logger.getLogger(NewMapReduceProjectWizard.class.getName());
private HadoopFirstPage firstPage;
private NewJavaProjectWizardPage javaPage;
public NewDriverWizardPage newDriverPage;
private IConfigurationElement config;
public NewMapReduceProjectWizard() {
setWindowTitle("New MapReduce Project Wizard");
}
public void init(IWorkbench workbench, IStructuredSelection selection) {
}
@Override
public boolean canFinish() {
return firstPage.isPageComplete() && javaPage.isPageComplete()
// && ((!firstPage.generateDriver.getSelection())
// || newDriverPage.isPageComplete()
;
}
@Override
public IWizardPage getNextPage(IWizardPage page) {
// if (page == firstPage
// && firstPage.generateDriver.getSelection()
// )
// {
// return newDriverPage; // if "generate mapper" checked, second page is
// new driver page
// }
// else
// {
IWizardPage answer = super.getNextPage(page);
if (answer == newDriverPage) {
return null; // dont flip to new driver page unless "generate
// driver" is checked
} else if (answer == javaPage) {
return answer;
} else {
return answer;
}
// }
}
@Override
public IWizardPage getPreviousPage(IWizardPage page) {
if (page == newDriverPage) {
return firstPage; // newDriverPage, if it appears, is the second
// page
} else {
return super.getPreviousPage(page);
}
}
static class HadoopFirstPage extends WizardNewProjectCreationPage
implements SelectionListener {
public HadoopFirstPage() {
super("New Hadoop Project");
setImageDescriptor(ImageLibrary.get("wizard.mapreduce.project.new"));
}
private Link openPreferences;
private Button workspaceHadoop;
private Button projectHadoop;
private Text location;
private Button browse;
private String path;
public String currentPath;
// private Button generateDriver;
@Override
public void createControl(Composite parent) {
super.createControl(parent);
setTitle("MapReduce Project");
setDescription("Create a MapReduce project.");
Group group = new Group((Composite) getControl(), SWT.NONE);
group.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
group.setText("Hadoop MapReduce Library Installation Path");
GridLayout layout = new GridLayout(3, true);
layout.marginLeft =
convertHorizontalDLUsToPixels(IDialogConstants.HORIZONTAL_MARGIN);
layout.marginRight =
convertHorizontalDLUsToPixels(IDialogConstants.HORIZONTAL_MARGIN);
layout.marginTop =
convertHorizontalDLUsToPixels(IDialogConstants.VERTICAL_MARGIN);
layout.marginBottom =
convertHorizontalDLUsToPixels(IDialogConstants.VERTICAL_MARGIN);
group.setLayout(layout);
workspaceHadoop = new Button(group, SWT.RADIO);
GridData d =
new GridData(GridData.BEGINNING, GridData.BEGINNING, false, false);
d.horizontalSpan = 2;
workspaceHadoop.setLayoutData(d);
// workspaceHadoop.setText("Use default workbench Hadoop library
// location");
workspaceHadoop.setSelection(true);
updateHadoopDirLabelFromPreferences();
openPreferences = new Link(group, SWT.NONE);
openPreferences
.setText("<a>Configure Hadoop install directory...</a>");
openPreferences.setLayoutData(new GridData(GridData.END,
GridData.CENTER, false, false));
openPreferences.addSelectionListener(this);
projectHadoop = new Button(group, SWT.RADIO);
projectHadoop.setLayoutData(new GridData(GridData.BEGINNING,
GridData.CENTER, false, false));
projectHadoop.setText("Specify Hadoop library location");
location = new Text(group, SWT.SINGLE | SWT.BORDER);
location.setText("");
d = new GridData(GridData.END, GridData.CENTER, true, false);
d.horizontalSpan = 1;
d.widthHint = 250;
d.grabExcessHorizontalSpace = true;
location.setLayoutData(d);
location.setEnabled(false);
browse = new Button(group, SWT.NONE);
browse.setText("Browse...");
browse.setLayoutData(new GridData(GridData.BEGINNING, GridData.CENTER,
false, false));
browse.setEnabled(false);
browse.addSelectionListener(this);
projectHadoop.addSelectionListener(this);
workspaceHadoop.addSelectionListener(this);
// generateDriver = new Button((Composite) getControl(), SWT.CHECK);
// generateDriver.setText("Generate a MapReduce driver");
// generateDriver.addListener(SWT.Selection, new Listener()
// {
// public void handleEvent(Event event) {
// getContainer().updateButtons(); }
// });
}
@Override
public boolean isPageComplete() {
boolean validHadoop = validateHadoopLocation();
if (!validHadoop && isCurrentPage()) {
setErrorMessage("Invalid Hadoop Runtime specified; please click 'Configure Hadoop install directory' or fill in library location input field");
} else {
setErrorMessage(null);
}
return super.isPageComplete() && validHadoop;
}
private boolean validateHadoopLocation() {
FilenameFilter gotHadoopJar = new FilenameFilter() {
public boolean accept(File dir, String name) {
return (name.startsWith("hadoop") && name.endsWith(".jar")
&& (name.indexOf("test") == -1) && (name.indexOf("examples") == -1));
}
};
if (workspaceHadoop.getSelection()) {
this.currentPath = path;
return new Path(path).toFile().exists()
&& (new Path(path).toFile().list(gotHadoopJar).length > 0);
} else {
this.currentPath = location.getText();
File file = new Path(location.getText()).toFile();
return file.exists()
&& (new Path(location.getText()).toFile().list(gotHadoopJar).length > 0);
}
}
private void updateHadoopDirLabelFromPreferences() {
path =
Activator.getDefault().getPreferenceStore().getString(
PreferenceConstants.P_PATH);
if ((path != null) && (path.length() > 0)) {
workspaceHadoop.setText("Use default Hadoop");
} else {
workspaceHadoop.setText("Use default Hadoop (currently not set)");
}
}
public void widgetDefaultSelected(SelectionEvent e) {
}
public void widgetSelected(SelectionEvent e) {
if (e.getSource() == openPreferences) {
PreferenceManager manager = new PreferenceManager();
manager.addToRoot(new PreferenceNode(
"Hadoop Installation Directory", new MapReducePreferencePage()));
PreferenceDialog dialog =
new PreferenceDialog(this.getShell(), manager);
dialog.create();
dialog.setMessage("Select Hadoop Installation Directory");
dialog.setBlockOnOpen(true);
dialog.open();
updateHadoopDirLabelFromPreferences();
} else if (e.getSource() == browse) {
DirectoryDialog dialog = new DirectoryDialog(this.getShell());
dialog
.setMessage("Select a hadoop installation, containing hadoop-X-core.jar");
dialog.setText("Select Hadoop Installation Directory");
String directory = dialog.open();
if (directory != null) {
location.setText(directory);
if (!validateHadoopLocation()) {
setErrorMessage("No Hadoop jar found in specified directory");
} else {
setErrorMessage(null);
}
}
} else if (projectHadoop.getSelection()) {
location.setEnabled(true);
browse.setEnabled(true);
} else {
location.setEnabled(false);
browse.setEnabled(false);
}
getContainer().updateButtons();
}
}
@Override
public void addPages() {
/*
* firstPage = new HadoopFirstPage(); addPage(firstPage ); addPage( new
* JavaProjectWizardSecondPage(firstPage) );
*/
firstPage = new HadoopFirstPage();
javaPage =
new NewJavaProjectWizardPage(ResourcesPlugin.getWorkspace()
.getRoot(), firstPage);
// newDriverPage = new NewDriverWizardPage(false);
// newDriverPage.setPageComplete(false); // ensure finish button
// initially disabled
addPage(firstPage);
addPage(javaPage);
// addPage(newDriverPage);
}
@Override
public boolean performFinish() {
try {
PlatformUI.getWorkbench().getProgressService().runInUI(
this.getContainer(), new IRunnableWithProgress() {
public void run(IProgressMonitor monitor) {
try {
monitor.beginTask("Create Hadoop Project", 300);
javaPage.getRunnable().run(
new SubProgressMonitor(monitor, 100));
// if( firstPage.generateDriver.getSelection())
// {
// newDriverPage.setPackageFragmentRoot(javaPage.getNewJavaProject().getAllPackageFragmentRoots()[0],
// false);
// newDriverPage.getRunnable().run(new
// SubProgressMonitor(monitor,100));
// }
IProject project =
javaPage.getNewJavaProject().getResource().getProject();
IProjectDescription description = project.getDescription();
String[] existingNatures = description.getNatureIds();
String[] natures = new String[existingNatures.length + 1];
for (int i = 0; i < existingNatures.length; i++) {
natures[i + 1] = existingNatures[i];
}
natures[0] = MapReduceNature.ID;
description.setNatureIds(natures);
project.setPersistentProperty(new QualifiedName(
Activator.PLUGIN_ID, "hadoop.runtime.path"),
firstPage.currentPath);
project.setDescription(description,
new NullProgressMonitor());
String[] natureIds = project.getDescription().getNatureIds();
for (int i = 0; i < natureIds.length; i++) {
log.fine("Nature id # " + i + " > " + natureIds[i]);
}
monitor.worked(100);
monitor.done();
BasicNewProjectResourceWizard.updatePerspective(config);
} catch (CoreException e) {
// TODO Auto-generated catch block
log.log(Level.SEVERE, "CoreException thrown.", e);
} catch (InvocationTargetException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}, null);
} catch (InvocationTargetException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return true;
}
public void setInitializationData(IConfigurationElement config,
String propertyName, Object data) throws CoreException {
this.config = config;
}
}

Some files were not shown because too many files have changed in this diff Show More