设为首页 加入收藏

TOP

(6)基于hadoop的简单网盘应用实现2
2019-02-12 12:36:03 】 浏览:23
Tags:基于 hadoop 简单 应用 实现

本文出自https://shuwoom.com博客,欢迎访问!

一、调用hadoop api实现文件的上传、下载、删除、创建目录和显示功能

(1)添加必要的hadoop jar包。

A、首先将Hadoop1.1.2.tar.gz解压到某一个磁盘下。

B、右键选择工程,选择build path...., build configure path;

C、将hadoop1.1.2文件夹下的jar包添加进去;

还有lib文件夹下的所有jar包(注意:jasper-compiler-5.5.12.jarjasper-runtime-5.5.12.jar不要引进,否则会报错)

注意:在build path引入这些jar包后,还需要将这些jar包复制到WEB-INF/lib目录下,可以通过下面操作实现:

选择工程,右键“Properties”,选择Deployment Assembly。

点击Add,选择Java Build Path Entries。

然后把你刚刚引进的Jar包全部选上,点击finishes。

D 、创建java工程

创建HdfsDAO类:

package com.model;

import java.io.IOException;
import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;

public class HdfsDAO {

    //HDFS访问地址
    private static final String HDFS = "hdfs://192.168.1.104:9000";
 
 
    
    public HdfsDAO(Configuration conf) {
        this(HDFS, conf);
    }

    public HdfsDAO(String hdfs, Configuration conf) {
        this.hdfsPath = hdfs;
        this.conf = conf;
    }

    //hdfs路径
    private String hdfsPath;
    //Hadoop系统配置
    private Configuration conf;

     
    
    //启动函数
    public static void main(String[] args) throws IOException {
        JobConf conf = config();
        HdfsDAO hdfs = new HdfsDAO(conf);
        //hdfs.mkdirs("/Tom");
        //hdfs.copyFile("C:\\files", "/wgc/");
         hdfs.ls("hdfs://192.168.1.104:9000/wgc/files");
        //hdfs.rmr("/wgc/files");
        //hdfs.download("/wgc/(3)windows下hadoop+eclipse环境搭建.docx", "c:\\");
        //System.out.println("success!");
    }        
    
    //加载Hadoop配置文件
    public  static JobConf config(){
        JobConf conf = new JobConf(HdfsDAO.class);
        conf.setJobName("HdfsDAO");
        conf.addResource("classpath:/hadoop/core-site.xml");
        conf.addResource("classpath:/hadoop/hdfs-site.xml");
        conf.addResource("classpath:/hadoop/mapred-site.xml");
        return conf;
    }

    //在根目录下创建文件夹
    public void mkdirs(String folder) throws IOException {
        Path path = new Path(folder);
        FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
        if (!fs.exists(path)) {
            fs.mkdirs(path);
            System.out.println("Create: " + folder);
        }
        fs.close();
    }
    
    //某个文件夹的文件列表
    public FileStatus[] ls(String folder) throws IOException {
        Path path = new Path(folder);
        FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
        FileStatus[] list = fs.listStatus(path);
        System.out.println("ls: " + folder);
        System.out.println("==========================================================");
        if(list != null)
        for (FileStatus f : list) {
            //System.out.printf("name: %s, folder: %s, size: %d\n", f.getPath(), f.isDir(), f.getLen());
        	System.out.printf("%s, folder: %s, 大小: %dK\n", f.getPath().getName(), (f.isDir()"目录":"文件"), f.getLen()/1024);
        }
        System.out.println("==========================================================");
        fs.close();
        
        return  list;
    }
    
    
    public void copyFile(String local, String remote) throws IOException {
        FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
        //remote---/用户/用户下的文件或文件夹
        fs.copyFromLocalFile(new Path(local), new Path(remote));
         System.out.println("copy from: " + local + " to " + remote);
        fs.close();
    }
    
    //删除文件或文件夹
    public void rmr(String folder) throws IOException {
        Path path = new Path(folder);
        FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
        fs.deleteOnExit(path);
        System.out.println("Delete: " + folder);
        fs.close();
    }
    
    
    //下载文件到本地系统
    public void download(String remote, String local) throws IOException {
        Path path = new Path(remote);
        FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
        fs.copyToLocalFile(path, new Path(local));
        System.out.println("download: from" + remote + " to " + local);
        fs.close();
    }
    
    
}

在测试前,请启动hadoop;

运行测试该程序:

其他函数测试也成功,这里就不一一列举了。

二、结合web前台和hadoop api

打开Uploadservlet文件,修改:

package com.controller;

import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;

import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.jsp.PageContext;

import org.apache.commons.fileupload.DiskFileUpload;
import org.apache.commons.fileupload.FileItem;
import org.apache.commons.fileupload.disk.DiskFileItemFactory;
import org.apache.commons.fileupload.servlet.ServletFileUpload;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.mapred.JobConf;

import com.model.HdfsDAO;
 

/**
 * Servlet implementation class UploadServlet
 */
public class UploadServlet extends HttpServlet {
 
	/**
	 * @see HttpServlet#doGet(HttpServletRequest request, HttpServletResponse response)
	 */
	protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
		this.doPost(request, response);
	}

	/**
	 * @see HttpServlet#doPost(HttpServletRequest request, HttpServletResponse response)
	 */
	protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
		   request.setCharacterEncoding("UTF-8");
		   File file ;
		   int maxFileSize = 50 * 1024 *1024;  //50M
		   int maxMemSize = 50 * 1024 *1024;    //50M
		   ServletContext context = getServletContext();
		   String filePath = context.getInitParameter("file-upload");
			System.out.println("source file path:"+filePath+"");
		   // 验证上传内容了类型
		   String contentType = request.getContentType();
		   if ((contentType.indexOf("multipart/form-data") >= 0)) {

		      DiskFileItemFactory factory = new DiskFileItemFactory();
		      // 设置内存中存储文件的最大值
		      factory.setSizeThreshold(maxMemSize);
		      // 本地存储的数据大于 maxMemSize.
		      factory.setRepository(new File("c:\\temp"));

		      // 创建一个新的文件上传处理程序
		      ServletFileUpload upload = new ServletFileUpload(factory);
		      // 设置最大上传的文件大小
		      upload.setSizeMax( maxFileSize );
		      try{ 
		         // 解析获取的文件
		         List fileItems = upload.parseRequest(request);

		         // 处理上传的文件
		         Iterator i = fileItems.iterator();

		         System.out.println("begin to upload file to tomcat server</p>"); 
		         while ( i.hasNext () ) 
		         {
		            FileItem fi = (FileItem)i.next();
		            if ( !fi.isFormField () )	
		            {
		            // 获取上传文件的参数
		            String fieldName = fi.getFieldName();
		            String fileName = fi.getName();
		            
		            String fn = fileName.substring( fileName.lastIndexOf("\\")+1);
		            System.out.println("<br>"+fn+"<br>");
		            boolean isInMemory = fi.isInMemory();
		            long sizeInBytes = fi.getSize();
		            // 写入文件
		            if( fileName.lastIndexOf("\\") >= 0 ){
		            file = new File( filePath , 
		            fileName.substring( fileName.lastIndexOf("\\"))) ;
		            //out.println("filename"+fileName.substring( fileName.lastIndexOf("\\"))+"||||||");
		            }else{
		            file = new File( filePath ,
		            fileName.substring(fileName.lastIndexOf("\\")+1)) ;
		            }
		            fi.write( file ) ;
		            System.out.println("upload file to tomcat server success!");
		            
		           <span style="color:#ff0000;"> System.out.println("begin to upload file to hadoop hdfs</p>"); 
		            //将tomcat上的文件上传到hadoop上
		            
		            JobConf conf = HdfsDAO.config();
		            HdfsDAO hdfs = new HdfsDAO(conf);
		            hdfs.copyFile(filePath+"\\"+fn, "/wgc/"+fn);
		            System.out.println("upload file to hadoop hdfs success!");</span>
		            	
		            request.getRequestDispatcher("index.jsp").forward(request, response);
		            
		            }
		         }
		      }catch(Exception ex) {
		         System.out.println(ex);
		      }
		   }else{
		      System.out.println("<p>No file uploaded</p>"); 
		
		   }
	 
	      
		 
	}

}


启动tomcat服务器测试:

在上传前,hdfs下的wgc文件夹列表如下:

接下来我们上传:(4)通过调用hadoop的java api实现本地文件上传到hadoop文件系统上.docx

在tomcat服务器上,我们可以看到刚刚上传的文件:

打开http://hadoop:50070/查看文件系统,可以看到新上传的文件:

那么,到此,一个简陋的网盘上传功能就实现了,接下来我们就对这个简陋的网盘做一些美工,让它看起来更漂亮些。

参考资料:

http://blog.fens.me/hadoop-hdfs-api/


编程开发网
】【打印繁体】【投稿】【收藏】 【推荐】【举报】【评论】 【关闭】 【返回顶部
上一篇hadoop本地库版本问题 下一篇Mac配置单机版:Hadoop和Spark环境

评论

帐  号: 密码: (新用户注册)
验 证 码:
表  情:
内  容:

array(4) { ["type"]=> int(8) ["message"]=> string(24) "Undefined variable: jobs" ["file"]=> string(32) "/mnt/wp/cppentry/do/bencandy.php" ["line"]=> int(214) }