设为首页 加入收藏

TOP

MapReduce统计结果输出到hbase
2019-03-29 01:44:21 】 浏览:77
Tags:MapReduce 统计 结果 输出 hbase
MapReduce统计结果直接输出hbase,我使用的是hadoop1.0.4版本和hbase 0.94版本,hadoop和hbase安装伪分布式。1.hadoop安装这里就不讲了。
2.hbase安装我这里将一下。
首页解压habase安装包到/home/hadoop目录。
配置hosts文件如下:
  1. 192.168.0.101 hadoop.master
复制代码
配置hbase-site.xml,配置内容如下:
  1. <configuration>
  2. <property>
  3. <name>hbase.rootdir</name>
  4. <value>hdfs://hadoop.master:9000/hbase</value>
  5. </property>
  6. <property>
  7. <name>hbase.cluster.distributed</name>
  8. <value>true</value>
  9. </property>
  10. <property>
  11. <name>hbase.zookeeper.quorum</name>
  12. <value>hadoop.master</value>
  13. </property>
  14. <property>
  15. <name>hbase.zookeeper.property.dataDir</name>
  16. <value>/home/hadoop/zookeeper</value>
  17. </property>
  18. <property>
  19. <name>hbase.regionserver.handler.count</name>
  20. <value>100</value>
  21. </property>
  22. <property>
  23. <name>hbase.hregion.max.filesize</name>
  24. <value>8589934592</value>
  25. </property>
  26. <property>
  27. <name>hfile.block.cache.size</name>
  28. <value>0.3</value>
  29. </property>
  30. <property>
  31. <name>dfs.replication</name>
  32. <value>1</value>
  33. </property>
  34. </configuration>
复制代码
hbase-site.xml配置完后,在配置hbase-env.sh,我只把其中配置的如下显示:
  1. export JAVA_HOME=/usr/jdk1.6.0_22
  2. export HBASE_OPTS="-XX:+UseConcMarkSweepGC"
  3. export HBASE_MANAGES_ZK=true
复制代码
上面最后一项一定要打开。设置zookeeper管理hbase。

最后配置regionservers 如下:
  1. hadoop.master
复制代码
注意:如果需要在本地连接hbase,需要关闭防火墙,执行命令/sbin/service iptables stop

接下来启动hbase,创建表:TestCars,列族Car:
准备数据:
  1. Acura,Integra,Small
  2. Acura,Legend,Midsize
  3. Audi,90,Compact
  4. Audi,100,Midsize
  5. BMW,535i,Midsize
  6. Buick,Century,Midsize
  7. Buick,LeSabre,Large
  8. Buick,Roadmaster,Large
  9. Buick,Riviera,Midsize
  10. Cadillac,DeVille,Large
  11. Cadillac,Seville,Midsize
复制代码
将数据上传hadoop文件系统:
  1. hadoop fs -copyfromLocal /home/hadoop/Car.txt /home/hadoop/input
复制代码
在运行mapreduce时需要将hbase-0.94.6.jar 、zookeeper-3.4.5.jar、protobuf-java-2.4.0a.jar添加到hadoop lib目录下,或者另一种方式在执行mapreduce时,导入前面三个包。
下面是实现的具体代码:
  1. package com.duplicate;

  2. import java.io.IOException;
  3. import java.util.ArrayList;
  4. import java.util.List;

  5. import org.apache.hadoop.conf.Configuration;
  6. import org.apache.hadoop.fs.Path;
  7. import org.apache.hadoop.hbase.HBaseConfiguration;
  8. import org.apache.hadoop.hbase.client.HTableInterface;
  9. import org.apache.hadoop.hbase.client.HTablePool;
  10. import org.apache.hadoop.hbase.client.Put;
  11. import org.apache.hadoop.hbase.util.Bytes;
  12. import org.apache.hadoop.io.Text;
  13. import org.apache.hadoop.mapreduce.Job;
  14. import org.apache.hadoop.mapreduce.Mapper;
  15. import org.apache.hadoop.mapreduce.Reducer;
  16. import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  17. import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
  18. import org.slf4j.Logger;
  19. import org.slf4j.LoggerFactory;

  20. public class OutputHbase {

  21. private static Logger logger = LoggerFactory.getLogger(OutputHbase.class);

  22. public static class Map extends Mapper<Object,Text,Text,Text>{
  23. private Text outKey = new Text();
  24. private Text outVal = new Text();

  25. public void map(Object key,Text value,Context context) throws IOException,InterruptedException{
  26. String[] valueSplitted = value.toString().split(",");
  27. if(valueSplitted.length == 3){
  28. String brand = valueSplitted[0];
  29. String model = valueSplitted[1];
  30. String size = valueSplitted[2];
  31. outKey.set(brand);
  32. outVal.set(model + "," + size);
  33. context.write(outKey, outVal);
  34. }
  35. }
  36. }

  37. public static class Reduce extends Reducer<Text,Text,Text,Text>{
  38. private HTablePool pool = null;
  39. private HTableInterface testHTable = null;
  40. private List<Put> testListPut = new ArrayList<Put>();

  41. @Override
  42. public void setup(Context context){
  43. Configuration conf = HBaseConfiguration.create();
  44. conf.set("hbase.zookeeper.quorum", "192.168.0.101");
  45. pool = new HTablePool(conf, 10);
  46. testHTable = pool.getTable("TestCars");
  47. }

  48. @Override
  49. public void reduce(Text key,Iterable<Text> values,Context context)throws IOException,InterruptedException{
  50. String brand = key.toString();

  51. for(Text tx : values){
  52. String[] valueSplitted = tx.toString().split(",");
  53. if(valueSplitted.length == 2){
  54. String model = valueSplitted[0];
  55. String size = valueSplitted[1];

  56. byte[] putKey = Bytes.toBytes(brand+","+model);
  57. byte[] putFmaily = Bytes.toBytes("Car");
  58. Put put = new Put(putKey);

  59. byte[] putQ = Bytes.toBytes("brand");
  60. byte[] putVal = Bytes.toBytes(brand);
  61. put.add(putFmaily,putQ,putVal);

  62. putQ = Bytes.toBytes("model");
  63. putVal = Bytes.toBytes(model);
  64. put.add(putFmaily,putQ,putVal);

  65. putQ = Bytes.toBytes("size");
  66. putVal = Bytes.toBytes(size);
  67. put.add(putFmaily,putQ,putVal);
  68. testListPut.add(put);
  69. }
  70. }// End for

  71. testHTable.put(testListPut);
  72. testHTable.flushCommits();
  73. }

  74. @Override
  75. public void cleanup(Context context)throws IOException{
  76. if(null != testHTable){
  77. testHTable.close();
  78. }

  79. if(null != pool){
  80. pool.close();
  81. }
  82. }
  83. }

  84. /**
  85. * @param args
  86. * @throws IOException
  87. * @throws ClassNotFoundException
  88. * @throws InterruptedException
  89. */
  90. public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
  91. // TODO Auto-generated method stub
  92. Configuration conf = new Configuration();


  93. Job job = new Job(conf,"OutputHbase");
  94. //TableMapReduceUtil.addDependencyJars(job);
  95. job.setJarByClass(OutputHbase.class);
  96. job.setMapperClass(Map.class);
  97. job.setReducerClass(Reduce.class);

  98. job.setOutputKeyClass(Text.class);
  99. job.setOutputValueClass(Text.class);
  100. FileInputFormat.addInputPath(job, new Path(args[0]));
  101. FileOutputFormat.setOutputPath(job, new Path(args[1]));
  102. System.exit(job.waitForCompletion(true)0:1);
  103. }

  104. }
复制代码
执行方式:
  1. hadoop jar /home/hadoop/dedup.jar com.duplicate.OutputHbase /home/hadoop/input/* /home/hadoop/output
复制代码
查看结果:两种方式一种直接在hbase客户查看,另一种是用程序直接读出来:
hbase客户端查询:
  1. scan 'TestCars','Car';
复制代码
java代码查询,我下面只查询了主键key值:
  1. package com.duplicate.local;

  2. import java.io.IOException;

  3. import org.apache.hadoop.conf.Configuration;
  4. import org.apache.hadoop.hbase.HBaseConfiguration;
  5. import org.apache.hadoop.hbase.client.HTableInterface;
  6. import org.apache.hadoop.hbase.client.HTablePool;
  7. import org.apache.hadoop.hbase.client.Result;
  8. import org.apache.hadoop.hbase.client.ResultScanner;
  9. import org.apache.hadoop.hbase.client.Scan;
  10. import org.apache.hadoop.hbase.util.Bytes;

  11. public class ConnectionHbase {
  12. private static HTablePool pool = null;

  13. /**
  14. * @param args
  15. */
  16. public static void main(String[] args) {
  17. ConnectionHbase hbase = new ConnectionHbase();
  18. hbase.run();
  19. }

  20. public void run() {
  21. // TODO Auto-generated method stub
  22. Configuration conf = HBaseConfiguration.create();
  23. HTableInterface testHTable = null;
  24. conf.set("hbase.zookeeper.quorum", "192.168.0.101");
  25. pool = new HTablePool(conf, 10);

  26. testHTable = pool.getTable("TestCars");
  27. Scan scan = new Scan();
  28. try {
  29. ResultScanner res = testHTable.getScanner(scan);
  30. for(Result rs : res){
  31. System.out.println(Bytes.toString(rs.getRow()));
  32. }
  33. } catch (IOException e) {
  34. // TODO Auto-generated catch block
  35. e.printStackTrace();
  36. }
  37. }

  38. }
复制代码
】【打印繁体】【投稿】【收藏】 【推荐】【举报】【评论】 【关闭】 【返回顶部
上一篇HBase分页查询---指定PageNumber.. 下一篇HBase: Coprocessor Endpoint :s..

最新文章

热门文章

Hot 文章

Python

C 语言

C++基础

大数据基础

linux编程基础

C/C++面试题目