日期:2014-05-16 浏览次数:20445 次
[Hadoop] Hive 性能
?
http://wiki.apache.org/hadoop/Hive/HBaseBulkLoad
?
?
生成测试数据
/home/bmb/jdk1.6.0_16/bin/java? -cp examples.zip examples.CreateLogFile?1 1000000
/home/bmb/jdk1.6.0_16/bin/java? -cp examples.zip examples.CreateLogFile?1000000 2000000
/home/bmb/jdk1.6.0_16/bin/java? -cp examples.zip examples.CreateLogFile?2000000 3000000 ? 创建性能测试表
drop table??p_test_data; ? ?
CREATE TABLE p_test_data (
? id INT,
? content STRING,
? time??STRING)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TEXTFILE;
?
?
导入数据
LOAD DATA LOCAL INPATH '/home/iic/hadoop-0.20.2/test_0_1000000.log' INTO TABLE p_test_data;
?
LOAD DATA LOCAL INPATH '/home/iic/hadoop-0.20.2/test_1000000_2000000.log' INTO TABLE p_test_data;
?
LOAD DATA LOCAL INPATH '/home/iic/hadoop-0.20.2/test_2000000_3000000.log' INTO TABLE p_test_data;
?
set?mapred.reduce.tasks=1;
select count(a.id) from p_test_data a;
Time taken: 27.265 seconds?
?
select a.id,a.content,a.time from p_test_data a where a.id=1;
Time taken: 18.086 seconds
?
INSERT OVERWRITE DIRECTORY '/tmp/p_test_data_out'
select a.time,count(1) from p_test_data a group by a.time;
Time taken: 32.899 seconds
(框架检测到输入文件的后缀是.gz和.lzo,就会使用对应的CompressionCodec自动解压缩这些文件 ) ? drop table??p_com_test_data; ? ? ? CREATE TABLE p_com_test_data ( ? tar cvf 0_100W.tar test_0_1000000.log gzip 0_100W.tar ? tar cvf 100_200W.tar test_1000000_2000000.log gzip 100_200W.tar ? tar cvf 200_300W.tar test_2000000_3000000.log gzip 200_300W.tar ? 导入数据 LOAD DATA LOCAL INPATH '/home/iic/hadoop-0.20.2/0_100W.tar.gz' INTO TABLE p_com_test_data; ? LOAD DATA LOCAL INPATH '/home/iic/hadoop-0.20.2/100_200W.tar.gz' INTO TABLE p_com_test_data; ? LOAD DATA LOCAL INPATH '/home/iic/hadoop-0.20.2/200_300W.tar.gz' INTO TABLE p_com_test_data; ? ?
? id INT,
? content STRING,
? time??STRING)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TEXTFILE;
?
?
?
select a.time,count(1) from p_com_test_data a group by a.time;
Time taken: 26.31 seconds
?
?
此例子是针对小量文件的压缩和不压缩的性能测试,虽然不代表最终结果,但是从本次测试可以发现,压缩的效率更高,
可能是因为压缩文件是作为整个Block给Map,减少了InputSplit的检测和分析。