设为首页 加入收藏

TOP

【喜加一】parquet文件读写(在HDFS上)
2019-02-12 00:16:35 】 浏览:441
Tags:加一 parquet 文件 读写 HDFS

parquet文件格式详解:https://www.infoq.cn/article/in-depth-analysis-of-parquet-column-storage-format

这个链接主要描述了parquet的原理。相比于应用,它有点过于底层了。用Java读写parquet文件需要的知识是它的子集,有部分压缩算法在轮子中被实现了,使用者只要调用就好。


Java读写HDFS上的parquet文件:

首先,

start-dfs.sh

然后,编写如下Java代码:

package cn.edu.nju.zyf.parquetDemo01;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.example.data.simple.SimpleGroupFactory;
import org.apache.parquet.hadoop.ParquetFileWriter;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.ParquetReader;
import org.apache.parquet.hadoop.example.GroupReadSupport;
import org.apache.parquet.hadoop.example.GroupWriteSupport;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import org.apache.parquet.schema.*;

import java.io.IOException;


/**
 * @author zhuyuanfu
 * @version 2018-01-03
 * @description just a simple demo for writing and reading parquet files.
 */
public class ParquetWriteAndReadDemo {

    //这是一种parse String来制造MessageType的办法
    private static Logger logger = Logger.getLogger(ParquetWriteAndReadDemo.class);
    private static String schemaString = "message schema {" + "optional int64 log_id;"
            + "optional binary idc_id;" + "optional int64 house_id;"
            + "optional int64 src_ip_long;" + "optional int64 dest_ip_long;"
            + "optional int64 src_port;" + "optional int64 dest_port;"
            + "optional int32 protocol_type;" + "optional binary url64;"
            + "optional binary access_time;}";
    private static MessageType schema = MessageTypeParser.parseMessageType(schemaString);

    private static MessageType getMessageTypeFromCode (){
        MessageType messageType = Types.buildMessage()
                        .required(PrimitiveType.PrimitiveTypeName.BINARY).as(OriginalType.UTF8).named("id")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY).as(OriginalType.UTF8).named("name")
                        .required(PrimitiveType.PrimitiveTypeName.INT32).named("age")
                        .requiredGroup()
                        .required(PrimitiveType.PrimitiveTypeName.BINARY).as(OriginalType.UTF8).named("test1")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY).as(OriginalType.UTF8).named("test2")
                        .named("group1")
                        .named("trigger");
        //System.out.println(messageType.toString());
        return messageType;
    }

    private static void writeParquetOnDisk(String fileName){

        //1、声明parquet的messageType
        MessageType messageType = getMessageTypeFromCode();

        //2、声明parquetWriter
        Path path = new Path("/tmp/"+fileName);
        System.out.println(path);
        Configuration configuration = new Configuration();
        GroupWriteSupport.setSchema(messageType,configuration);
        GroupWriteSupport writeSupport = new GroupWriteSupport();

        //3、写数据
        ParquetWriter<Group> writer = null;
        try{
            writer = new ParquetWriter<Group>(path,
                    ParquetFileWriter.Mode.OVERWRITE,
                    writeSupport,
                    CompressionCodecName.UNCOMPRESSED,
                    128*1024*1024,
                    5*1024*1024,
                    5*1024*1024,
                    ParquetWriter.DEFAULT_IS_DICTIONARY_ENABLED,
                    ParquetWriter.DEFAULT_IS_VALIDATING_ENABLED,
                    ParquetWriter.DEFAULT_WRITER_VERSION,
                    configuration);

            //4、构建parquet数据,封装成Group。
            for(int i = 0; i < 10; i ++){
                Group group = new SimpleGroupFactory(messageType).newGroup();
                group.append("name",i+"@ximalaya.com")
                        .append("id",i+"@id")
                        .append("age",i)
                        .addGroup("group1")
                        .append("test1","test1"+i)
                        .append("test2","test2"+i);
                writer.write(group);
            }
        }catch(IOException ioe){
            ioe.printStackTrace();
        }finally{
            if(writer != null) {
                try{
                    writer.close();
                }catch(IOException ioe){
                    ioe.printStackTrace();
                }
            }
        }
    }

    private static void readParquetFromDisk(String fileName){

        //1、声明readSupport
        GroupReadSupport groupReadSupport = new GroupReadSupport();
        Path path = new Path("/tmp/"+fileName);

        //2、通过parquetReader读文件
        ParquetReader<Group> reader = null;
        try{
            reader = ParquetReader.builder(groupReadSupport,path).build();
            Group group = null;
            while((group = reader.read()) !=null){
                System.out.println(group);
            }
        }catch(IOException ioe){
            ioe.printStackTrace();
        }finally{
            try{
                reader.close();
            }catch(IOException ioe){
                ioe.printStackTrace();
            }
        }
    }

    public static void writeParquetToHDFS(String ipAddr, String port, String filePath,String fileName){

        //1、声明parquet的messageType
        MessageType messageType = getMessageTypeFromCode();

        //2、声明parquetWriter
        Path path = new Path("hdfs://"+ipAddr+":"+port+"/"+filePath+"/"+fileName);
        System.out.println(path);
        Configuration configuration = new Configuration();
        GroupWriteSupport.setSchema(messageType,configuration);
        GroupWriteSupport writeSupport = new GroupWriteSupport();

        //3、写数据
        ParquetWriter<Group> writer = null;
        try{
            writer = new ParquetWriter<Group>(path,
                    ParquetFileWriter.Mode.OVERWRITE,
                    writeSupport,
                    CompressionCodecName.UNCOMPRESSED,
                    128*1024*1024,
                    5*1024*1024,
                    5*1024*1024,
                    ParquetWriter.DEFAULT_IS_DICTIONARY_ENABLED,
                    ParquetWriter.DEFAULT_IS_VALIDATING_ENABLED,
                    ParquetWriter.DEFAULT_WRITER_VERSION,
                    configuration);

            //4、构建parquet数据,封装成Group。
            for(int i = 0; i < 10; i ++){
                Group group = new SimpleGroupFactory(messageType).newGroup();
                group.append("name",i+"@ximalaya.com")
                        .append("id",i+"@id")
                        .append("age",i)
                        .addGroup("group1")
                        .append("test1","test1"+i)
                        .append("test2","test2"+i);
                writer.write(group);
            }
        }catch(IOException ioe){
            ioe.printStackTrace();
        }finally{
            if(writer != null) {
                try{
                    writer.close();
                }catch(IOException ioe){
                    ioe.printStackTrace();
                }
            }
        }
    }

    public static void readParquetFromHDFS(String ipAddr, String port, String filePath,String fileName){
        //1、声明readSupport
        GroupReadSupport groupReadSupport = new GroupReadSupport();
        Path path = new Path("hdfs://"+ipAddr+":"+port+"/"+filePath+"/"+fileName);

        //2、通过parquetReader读文件
        ParquetReader<Group> reader = null;
        try{
            reader = ParquetReader.builder(groupReadSupport,path).build();
            Group group = null;
            while((group = reader.read()) !=null){
                System.out.println(group);
            }
        }catch(IOException ioe){
            ioe.printStackTrace();
        }finally{
            try{
                reader.close();
            }catch(IOException ioe){
                ioe.printStackTrace();
            }
        }
    }

    public static void main(String[] args){
        //writeParquetOnDisk("test1.parq");
        //readParquetFromDisk("test1.parq");
        writeParquetToHDFS("127.0.0.1","9000","/tmp","test1.parq");
        readParquetFromHDFS("127.0.0.1","9000","/tmp","test1.parq");
    }
}

pom:

    <dependencies>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>2.8.1</version>
        </dependency>
        <dependency>
            <groupId>org.apache.parquet</groupId>
            <artifactId>parquet-hadoop</artifactId>
            <version>1.7.0</version>
        </dependency>
    </dependencies>

运行结果:

/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/bin/java "-javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=55908:/Applications/IntelliJ IDEA.app/Contents/bin" -Dfile.encoding=UTF-8 -classpath /Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/charsets.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/deploy.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/ext/cldrdata.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/ext/dnsns.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/ext/jaccess.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/ext/jfxrt.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/ext/localedata.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/ext/nashorn.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/ext/sunec.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/ext/sunjce_provider.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/ext/sunpkcs11.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/ext/zipfs.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/javaws.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/jce.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/jfr.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/jfxswt.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/jsse.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/management-agent.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/plugin.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/resources.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/jre/lib/rt.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/lib/ant-javafx.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/lib/dt.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/lib/javafx-mx.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/lib/jconsole.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/lib/packager.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/lib/sa-jdi.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_192.jdk/Contents/Home/lib/tools.jar:/Users/xmly/ximalaya/ParquetDemoByZyf/target/classes:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-client/2.8.1/hadoop-client-2.8.1.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-common/2.8.1/hadoop-common-2.8.1.jar:/Users/xmly/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/xmly/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/xmly/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/xmly/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/xmly/.m2/repository/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/Users/xmly/.m2/repository/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/Users/xmly/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/xmly/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/xmly/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/xmly/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/xmly/.m2/repository/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/Users/xmly/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/xmly/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/xmly/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/xmly/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/xmly/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/xmly/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/xmly/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/xmly/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/xmly/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/xmly/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/xmly/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/xmly/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-auth/2.8.1/hadoop-auth-2.8.1.jar:/Users/xmly/.m2/repository/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/Users/xmly/.m2/repository/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/Users/xmly/.m2/repository/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/Users/xmly/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/xmly/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/xmly/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/xmly/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/xmly/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/xmly/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/xmly/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/xmly/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/xmly/.m2/repository/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/Users/xmly/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/xmly/.m2/repository/io/netty/netty/3.7.0.Final/netty-3.7.0.Final.jar:/Users/xmly/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/xmly/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-hdfs/2.8.1/hadoop-hdfs-2.8.1.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-hdfs-client/2.8.1/hadoop-hdfs-client-2.8.1.jar:/Users/xmly/.m2/repository/com/squareup/okhttp/okhttp/2.4.0/okhttp-2.4.0.jar:/Users/xmly/.m2/repository/com/squareup/okio/okio/1.4.0/okio-1.4.0.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-mapreduce-client-app/2.8.1/hadoop-mapreduce-client-app-2.8.1.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-mapreduce-client-common/2.8.1/hadoop-mapreduce-client-common-2.8.1.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-yarn-client/2.8.1/hadoop-yarn-client-2.8.1.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-yarn-server-common/2.8.1/hadoop-yarn-server-common-2.8.1.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-mapreduce-client-shuffle/2.8.1/hadoop-mapreduce-client-shuffle-2.8.1.jar:/Users/xmly/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-yarn-api/2.8.1/hadoop-yarn-api-2.8.1.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-mapreduce-client-core/2.8.1/hadoop-mapreduce-client-core-2.8.1.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-yarn-common/2.8.1/hadoop-yarn-common-2.8.1.jar:/Users/xmly/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/xmly/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/xmly/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/xmly/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/xmly/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/xmly/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/xmly/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/xmly/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/xmly/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-mapreduce-client-jobclient/2.8.1/hadoop-mapreduce-client-jobclient-2.8.1.jar:/Users/xmly/.m2/repository/org/apache/hadoop/hadoop-annotations/2.8.1/hadoop-annotations-2.8.1.jar:/Users/xmly/.m2/repository/org/apache/parquet/parquet-hadoop/1.7.0/parquet-hadoop-1.7.0.jar:/Users/xmly/.m2/repository/org/apache/parquet/parquet-column/1.7.0/parquet-column-1.7.0.jar:/Users/xmly/.m2/repository/org/apache/parquet/parquet-common/1.7.0/parquet-common-1.7.0.jar:/Users/xmly/.m2/repository/org/apache/parquet/parquet-encoding/1.7.0/parquet-encoding-1.7.0.jar:/Users/xmly/.m2/repository/org/apache/parquet/parquet-generator/1.7.0/parquet-generator-1.7.0.jar:/Users/xmly/.m2/repository/org/apache/parquet/parquet-format/2.3.0-incubating/parquet-format-2.3.0-incubating.jar:/Users/xmly/.m2/repository/org/apache/parquet/parquet-jackson/1.7.0/parquet-jackson-1.7.0.jar:/Users/xmly/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.11/jackson-mapper-asl-1.9.11.jar:/Users/xmly/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.11/jackson-core-asl-1.9.11.jar:/Users/xmly/.m2/repository/org/xerial/snappy/snappy-java/1.1.1.6/snappy-java-1.1.1.6.jar:/Users/xmly/.m2/repository/org/apache/parquet/parquet-avro/1.8.1/parquet-avro-1.8.1.jar:/Users/xmly/.m2/repository/org/apache/avro/avro/1.7.6/avro-1.7.6.jar:/Users/xmly/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/xmly/.m2/repository/it/unimi/dsi/fastutil/6.5.7/fastutil-6.5.7.jar cn.edu.nju.zyf.parquetDemo01.ParquetWriteAndReadDemo
hdfs://127.0.0.1:9000/tmp/test1.parq
SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
SLF4J: Defaulting to no-operation (NOP) logger implementation
SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
id: 0@id
name: 0@ximalaya.com
age: 0
group1
  test1: test10
  test2: test20

id: 1@id
name: 1@ximalaya.com
age: 1
group1
  test1: test11
  test2: test21

id: 2@id
name: 2@ximalaya.com
age: 2
group1
  test1: test12
  test2: test22

id: 3@id
name: 3@ximalaya.com
age: 3
group1
  test1: test13
  test2: test23

id: 4@id
name: 4@ximalaya.com
age: 4
group1
  test1: test14
  test2: test24

id: 5@id
name: 5@ximalaya.com
age: 5
group1
  test1: test15
  test2: test25

id: 6@id
name: 6@ximalaya.com
age: 6
group1
  test1: test16
  test2: test26

id: 7@id
name: 7@ximalaya.com
age: 7
group1
  test1: test17
  test2: test27

id: 8@id
name: 8@ximalaya.com
age: 8
group1
  test1: test18
  test2: test28

id: 9@id
name: 9@ximalaya.com
age: 9
group1
  test1: test19
  test2: test29

2019-1-4 18:17:21 信息: org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore to file. allocated memory: 700
2019-1-4 18:17:21 信息: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 115B for [id] BINARY: 10 values, 80B raw, 80B comp, 1 pages, encodings: [BIT_PACKED, PLAIN]
2019-1-4 18:17:21 信息: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 235B for [name] BINARY: 10 values, 180B raw, 180B comp, 1 pages, encodings: [BIT_PACKED, PLAIN]
2019-1-4 18:17:21 信息: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 73B for [age] INT32: 10 values, 40B raw, 40B comp, 1 pages, encodings: [BIT_PACKED, PLAIN]
2019-1-4 18:17:21 信息: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 139B for [group1, test1] BINARY: 10 values, 100B raw, 100B comp, 1 pages, encodings: [BIT_PACKED, PLAIN]
2019-1-4 18:17:21 信息: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 139B for [group1, test2] BINARY: 10 values, 100B raw, 100B comp, 1 pages, encodings: [BIT_PACKED, PLAIN]
2019-1-4 18:17:21 信息: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
2019-1-4 18:17:21 信息: org.apache.parquet.hadoop.ParquetFileReader: reading another 1 footers
2019-1-4 18:17:21 信息: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
2019-1-4 18:17:21 信息: org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized will read a total of 10 records.
2019-1-4 18:17:21 信息: org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next block
2019-1-4 18:17:21 信息: org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in 14 ms. row count = 10

Process finished with exit code 0

海星。

】【打印繁体】【投稿】【收藏】 【推荐】【举报】【评论】 【关闭】 【返回顶部
上一篇HDFS副本放置策略及机架感知 下一篇greenplum对hdfs文件的支持

最新文章

热门文章

Hot 文章

Python

C 语言

C++基础

大数据基础

linux编程基础

C/C++面试题目