MapReduce高级案例③

对数据流的压缩和解压缩

CompressionCodec 有两个方法可以用于轻松地压缩或解压缩数据。要想对正在被写入一个输出流的数据进行压缩,我们可以使用createOutputStream(OutputStreamout)方法创建一个 CompressionOutputStream,将其以压缩格式写入底层的流。相反,要想对从输入流读取而来的数据进行解压缩,则调用 createInputStream(InputStreamin)函数,从而获得一个CompressionInputStream, 从而从底层的流读取未压缩的数据。

DEFLATE     org.apache.hadoop.io.compress.DefaultCodec
gzip        org.apache.hadoop.io.compress.GzipCodec
bzip2       org.apache.hadoop.io.compress.BZip2Codec

测试

package com.kami.dem03;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.util.ReflectionUtils;

import java.io.*;

/**
 * @version v 1.0
 * @Author kamisamak
 * @Date 2020/6/15
 */
public class TestCompress {
    public static void main(String[] args) throws IOException, ClassNotFoundException {
//        compress("data\\d03\\102468-106.jpg", "org.apache.hadoop.io.compress.BZip2Codec");
        decompress("data\\d03\\102468-106.jpg.bz2");
    }

    /**
     * 压缩
     * filername:要压缩文件的路径
     * method:欲使用的压缩的方法(org.apache.hadoop.io.compress.BZip2Codec)
     */
    public static void compress(String filename, String method) throws IOException, ClassNotFoundException {
        //创建压缩文件路径的输入流
        BufferedInputStream bis = new BufferedInputStream(new FileInputStream(new File(filename)));
        //获取压缩的方式的类
        Class<?> codecClass = Class.forName(method);
        Configuration conf = new Configuration();
        //通过名称找到对应的编码/解码器
        CompressionCodec compressionCodec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
        //该压缩方法对应的文件扩展名
        File fileOut = new File(filename + compressionCodec.getDefaultExtension());
        FileOutputStream fileOutputStream = new FileOutputStream(fileOut);
        CompressionOutputStream cout = compressionCodec.createOutputStream(fileOutputStream);
        //流对接,缓冲区设置10m
        IOUtils.copyBytes(bis, cout, 1024 * 1024 * 10, false);
        //资源关闭
        bis.close();
        cout.close();
        fileOutputStream.close();
    }

    /**
     * 解压缩
     * filename:希望解压的文件路径
     */
    public static void decompress(String filename) throws IOException {
        Configuration conf = new Configuration();
        CompressionCodecFactory factory = new CompressionCodecFactory(conf);
        //获取文件的压缩方法
        CompressionCodec codec = factory.getCodec(new Path(filename));
        //判断该压缩方法是否存在
        if (null == codec) {
            System.out.println("Cannot find codec for file " + filename);
            return;
        }
        //创建压缩文件的输入流
        BufferedInputStream bis = new BufferedInputStream(codec.createInputStream(new FileInputStream(filename)));
        //创建解压缩文件的输出流
        File fout = new File(filename + ".decoded");
        BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(fout));
        //流对接
        IOUtils.copyBytes(bis, bos, 1024 * 1024 * 10, false);
        bis.close();
        bos.close();
    }
}

在Map输出端采用压缩

即使你的MapReduce的输入输出文件都是未压缩的文件,你仍然可以对map任务的中间结果输出做压缩,因为它要写在硬盘并且通过网络传输到reduce节点,对其压缩可以提高很多性能,这些工作只要设置两个属性即可

ruaDriver01

package com.kami.demo03;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

/**
 * @version v 1.0
 * @Author kamisamak
 * @Date 2020/6/15
 */
public class ruaDriver01 {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration configuration = new Configuration();

        // 开启map端输出压缩
        configuration.setBoolean("mapreduce.map.output.compress", true);
        // 设置map端输出压缩方式
        configuration.setClass("mapreduce.map.output.compress.codec", BZip2Codec.class, CompressionCodec.class);

        Job job = Job.getInstance(configuration);

        job.setJarByClass(ruaDriver01.class);

        job.setMapperClass(ruaMap01.class);
        job.setReducerClass(ruaReduce01.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

//        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileInputFormat.setInputPaths(job, new Path("data\\d01"));
//        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        FileOutputFormat.setOutputPath(job, new Path("output\\d08"));

        boolean result = job.waitForCompletion(true);
        System.exit(result ? 1 : 0);
    }
}

ruaMap01

package com.kami.demo03;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * @version v 1.0
 * @Author kamisamak
 * @Date 2020/6/15
 */
public class ruaMap01 extends Mapper {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String line = value.toString();
        String[] words = line.split(" ");
        for(String word:words){
            context.write(new Text(word), new IntWritable(1));
        }
    }
}

ruaReduce01

package com.kami.demo03;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

/**
 * @version v 1.0
 * @Author Toenc
 * @Date 2020/6/15
 */
public class ruaReduce01 extends Reducer {
    @Override
    protected void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException {
        int count = 0;
        for(IntWritable value:values){
            count += value.get();
        }
        context.write(key, new IntWritable(count));
    }
}

在Reduce输出端采用压缩

ruaDriver02

package com.kami.demo03;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

/**
 * @version v 1.0
 * @Author kamisamak
 * @Date 2020/6/15
 */
public class ruaDriver02 {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration configuration = new Configuration();

        Job job = Job.getInstance(configuration);
        job.setJarByClass(ruaDriver02.class);

        job.setMapperClass(ruaMap01.class);
        job.setReducerClass(ruaReduce01.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

//        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileInputFormat.setInputPaths(job, new Path("data\\d01"));
//        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        FileOutputFormat.setOutputPath(job, new Path("output\\d09"));

        // 设置reduce端输出压缩开启
        FileOutputFormat.setCompressOutput(job, true);
        // 设置压缩的方式
        FileOutputFormat.setOutputCompressorClass(job, BZip2Codec.class);
//        FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class);
//        FileOutputFormat.setOutputCompressorClass(job, DefaultCodec.class);

        boolean result = job.waitForCompletion(true);
        System.exit(result ? 1 : 0);
    }
}

ruaMap01

package com.kami.demo03;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * @version v 1.0
 * @Author kamisamak
 * @Date 2020/6/15
 */
public class ruaMap01 extends Mapper {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String line = value.toString();
        String[] words = line.split(" ");
        for(String word:words){
            context.write(new Text(word), new IntWritable(1));
        }
    }
}

ruaReduce01

package com.kami.demo03;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

/**
 * @version v 1.0
 * @Author Toenc
 * @Date 2020/6/15
 */
public class ruaReduce01 extends Reducer {
    @Override
    protected void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException {
        int count = 0;
        for(IntWritable value:values){
            count += value.get();
        }
        context.write(key, new IntWritable(count));
    }
}

题目来源:https://www.cnblogs.com/frankdeng/p/9255935.html

点赞

发表评论

电子邮件地址不会被公开。必填项已用 * 标注