zoukankan      html  css  js  c++  java
  • hadoop的hdfs中的javaAPI操作

    package cn.itcast.bigdata.hdfs;
    
    import java.net.URI;
    import java.util.Iterator;
    import java.util.Map.Entry;
    
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.FileStatus;
    import org.apache.hadoop.fs.FileSystem;
    import org.apache.hadoop.fs.LocatedFileStatus;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.fs.RemoteIterator;
    import org.junit.Before;
    import org.junit.Test;
    /**
     * 
     * 客户端去操作hdfs时,是有一个用户身份的
     * 默认情况下,hdfs客户端api会从jvm中获取一个参数来作为自己的用户身份:-DHADOOP_USER_NAME=hadoop
     * 
     * 也可以在构造客户端fs对象时,通过参数传递进去
     * @author
     *
     */
    public class HdfsClientDemo {
        FileSystem fs = null;
        Configuration conf = null;
        @Before
        public void init() throws Exception{
            
            conf = new Configuration();
            conf.set("fs.defaultFS", "hdfs://master:9000");
            
            //拿到一个文件系统操作的客户端实例对象
            /*fs = FileSystem.get(conf);*/
            //可以直接传入 uri和用户身份
            fs = FileSystem.get(new URI("hdfs://master:9000"),conf,"hadoop"); //最后一个参数为用户名
        }
    
        @Test
        public void testUpload() throws Exception {
            
            Thread.sleep(2000);
            fs.copyFromLocalFile(new Path("G:/access.log"), new Path("/access.log.copy"));
            fs.close();
        }
        
        
        @Test
        public void testDownload() throws Exception {
            
            fs.copyToLocalFile(new Path("/access.log.copy"), new Path("d:/"));
            fs.close();
        }
        
        @Test
        public void testConf(){
            Iterator<Entry<String, String>> iterator = conf.iterator();
            while (iterator.hasNext()) {
                Entry<String, String> entry = iterator.next();
                System.out.println(entry.getValue() + "--" + entry.getValue());//conf加载的内容
            }
        }
        
        /**
         * 创建目录
         */
        @Test
        public void makdirTest() throws Exception {
            boolean mkdirs = fs.mkdirs(new Path("/aaa/bbb"));
            System.out.println(mkdirs);
        }
        
        /**
         * 删除
         */
        @Test
        public void deleteTest() throws Exception{
            boolean delete = fs.delete(new Path("/aaa"), true);//true, 递归删除
            System.out.println(delete);
        }
        
        @Test
        public void listTest() throws Exception{
            
            FileStatus[] listStatus = fs.listStatus(new Path("/"));
            for (FileStatus fileStatus : listStatus) {
                System.err.println(fileStatus.getPath()+"================="+fileStatus.toString());
            }
            //会递归找到所有的文件
            RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path("/"), true);
            while(listFiles.hasNext()){
                LocatedFileStatus next = listFiles.next();
                String name = next.getPath().getName();
                Path path = next.getPath();
                System.out.println(name + "---" + path.toString());
            }
        }
        
        public static void main(String[] args) throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", "hdfs://master:9000");
            //拿到一个文件系统操作的客户端实例对象
            FileSystem fs = FileSystem.get(conf);
            
            fs.copyFromLocalFile(new Path("G:/access.log"), new Path("/access.log.copy"));
            fs.close();
        }
        
    
    }
    package cn.itcast.bigdata.hdfs;
    
    import java.net.URI;
    import java.util.Iterator;
    import java.util.Map.Entry;
    
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.FileStatus;
    import org.apache.hadoop.fs.FileSystem;
    import org.apache.hadoop.fs.LocatedFileStatus;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.fs.RemoteIterator;
    import org.junit.Before;
    import org.junit.Test;
    /**
     * 
     * 客户端去操作hdfs时,是有一个用户身份的
     * 默认情况下,hdfs客户端api会从jvm中获取一个参数来作为自己的用户身份:-DHADOOP_USER_NAME=hadoop
     * 
     * 也可以在构造客户端fs对象时,通过参数传递进去
     * @author
     *
     */
    public class HdfsClientDemo {
        FileSystem fs = null;
        Configuration conf = null;
        @Before
        public void init() throws Exception{
            
            conf = new Configuration();
            conf.set("fs.defaultFS", "hdfs://master:9000");
            
            //拿到一个文件系统操作的客户端实例对象
            /*fs = FileSystem.get(conf);*/
            //可以直接传入 uri和用户身份
            fs = FileSystem.get(new URI("hdfs://master:9000"),conf,"hadoop"); //最后一个参数为用户名
        }
    
        @Test
        public void testUpload() throws Exception {
            
            Thread.sleep(2000);
            fs.copyFromLocalFile(new Path("G:/access.log"), new Path("/access.log.copy"));
            fs.close();
        }
        
        
        @Test
        public void testDownload() throws Exception {
            
            fs.copyToLocalFile(new Path("/access.log.copy"), new Path("d:/"));
            fs.close();
        }
        
        @Test
        public void testConf(){
            Iterator<Entry<String, String>> iterator = conf.iterator();
            while (iterator.hasNext()) {
                Entry<String, String> entry = iterator.next();
                System.out.println(entry.getValue() + "--" + entry.getValue());//conf加载的内容
            }
        }
        
        /**
         * 创建目录
         */
        @Test
        public void makdirTest() throws Exception {
            boolean mkdirs = fs.mkdirs(new Path("/aaa/bbb"));
            System.out.println(mkdirs);
        }
        
        /**
         * 删除
         */
        @Test
        public void deleteTest() throws Exception{
            boolean delete = fs.delete(new Path("/aaa"), true);//true, 递归删除
            System.out.println(delete);
        }
        
        @Test
        public void listTest() throws Exception{
            
            FileStatus[] listStatus = fs.listStatus(new Path("/"));
            for (FileStatus fileStatus : listStatus) {
                System.err.println(fileStatus.getPath()+"================="+fileStatus.toString());
            }
            //会递归找到所有的文件
            RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path("/"), true);
            while(listFiles.hasNext()){
                LocatedFileStatus next = listFiles.next();
                String name = next.getPath().getName();
                Path path = next.getPath();
                System.out.println(name + "---" + path.toString());
            }
        }
        
        public static void main(String[] args) throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", "hdfs://master:9000");
            //拿到一个文件系统操作的客户端实例对象
            FileSystem fs = FileSystem.get(conf);
            
            fs.copyFromLocalFile(new Path("G:/access.log"), new Path("/access.log.copy"));
            fs.close();
        }
        
    
    }
  • 相关阅读:
    [ZOJ 4062][2018ICPC青岛站][Plants vs. Zombies]
    [Wannafly挑战赛28][B msc和mcc][预处理+枚举]
    [codeforces Mail.Ru Cup 2018 Round 1 D][ xor 操作]
    [codeforces round#475 div2 ][C Alternating Sum ]
    [zoj4045][思维+dfs]
    [zoj4046][树状数组求逆序(强化版)]
    费马大定理以及求解a^2+b^2=c^2的奇偶数列法则
    【HDOJ3567】【预处理bfs+映射+康拓展开hash】
    POJ1279 Art Gallery 多边形的核
    第八周 Leetcode 44. Wildcard Matching 水题 (HARD)
  • 原文地址:https://www.cnblogs.com/Eddyer/p/6641778.html
Copyright © 2011-2022 走看看