日期:2014-05-20  浏览次数:20951 次

Map/reduce出错Error in configuring object,求帮助!
小弟刚接触Map/reduce编程,想通过dbinputformat来读写Oracle数据库,但是出现了Error in configuring object的错误。

不过话说hadoop0.20.0的dbinputformat支不支持Oracle的啊?敬请各位大牛不吝赐教。.

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Iterator;
import java.util.Random;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.lib.LongSumReducer;
import org.apache.hadoop.mapred.lib.db.DBConfiguration;
import org.apache.hadoop.mapred.lib.db.DBInputFormat;
import org.apache.hadoop.mapred.lib.db.DBOutputFormat;
import org.apache.hadoop.mapred.lib.db.DBWritable;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.hsqldb.Server;

public class WordCountDB {


  private static final String DRIVER_CLASS = "com.oracle.jdbc.Driver"; //数据库引擎
  private static final String DB_URL = "jdbc:oracle://127.0.0.1:1521/BUS"; //数据库地址
  private static final String DB_USER="manage_bus"; //数据库名
  private static final String DB_PASSWD="its312"; //密码
  
  private static final String[] FieldNames = {"name", "age"};
   
  private static Connection connection; //数据库连接
  public static boolean initialized = false; //数据库连接判定

  public static class TokenizerMapper extends MapReduceBase implements  
  Mapper<LongWritable, TeacherRecord, Text, DoubleWritable>{
   


@Override  
  public void map(LongWritable key, TeacherRecord value, OutputCollector<Text, DoubleWritable> output, Reporter reporter) 
throws IOException
  {
System.out.println("进入map函数");
output.collect(new Text(value.name), new DoubleWritable(value.age));
  }
  }
  
  
  public static class IntSumReducer extends MapReduceBase implements 
  Reducer<Text,DoubleWritable,TeacherRecord,NullWritable> {
   
NullWritable n = NullWritable.get();
@Override
public void reduce(Text key, Iterator<DoubleWritable> values,
OutputCollector<TeacherRecord, NullWritable> output, Reporter arg3)
throws IOException {
// TODO Auto-generated method stub
System.out.println("进入reduce函数");
 
double sum = 0;

while(values.hasNext()) {
sum += values.next().get();
}

output.collect(new TeacherRecord(key.toString(),sum), n);

}
  }

  static class TeacherRecord implements Writable, DBWritable 
  {
String name;
double age;
 
public TeacherRecord(String m_name, double m_age) {