zl程序教程

您现在的位置是:首页 >  数据库

当前栏目

通过虚拟机的hdfs读取本地mysql上的文件

mysql文件虚拟机HDFS 通过 读取 本地
2023-09-27 14:27:54 时间

读取时会出现一些问题
比如 虚拟机上无法ping 本地ip
还有就是需要在类中加
job.addFileToClassPath(new Path("/mysql-connector-java-5.1.43-bin.jar"));
关于这个jar包需要放在hdfs中
在解压安装的hadoop中的share/hadoop/common/lib/下也要加入
且每个子节点都得配置一下
避免出错

public class ReadFormDB {

public static class ReadFromDBMap extends Mapper LongWritable, WordCountDBWritable, Text, NullWritable {

 private final NullWritable outValue = NullWritable.get();

 private Text outKey = new Text();

 @Override

 protected void map(LongWritable key, WordCountDBWritable value,

 Mapper LongWritable, WordCountDBWritable, Text, NullWritable .Context context)

 throws IOException, InterruptedException {

 outKey.set(value.toString());

 context.write(outKey, outValue);

//GRANT ALL PRIVILEGES ON *.* TO root@%IDENTIFIED BY 123456 WITH GRANT OPTION; 

public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

 Configuration configuration =new Configuration();

// DBConfiguration.configureDB(configuration, "com.mysql.jdbc.Driver", "jdbc:mysql://localhost/test","root","123456");

 DBConfiguration.configureDB(configuration, "com.mysql.jdbc.Driver", 

 "jdbc:mysql://192.168.111.1:3306/test?useSSL=false autoReconnect=true failOverReadOnly=false","root","123456");

 Job job=Job.getInstance(configuration);

 job.setJarByClass(ReadFormDB.class);

 job.setJobName("读取从mysql中");

 job.setMapperClass(ReadFromDBMap.class);

 job.setNumReduceTasks(0);

 job.setMapOutputKeyClass(Text.class);

 job.setMapOutputValueClass(NullWritable.class);

 job.addFileToClassPath(new Path("/mysql-connector-java-5.1.43-bin.jar"));

 DBInputFormat.setInput(job, WordCountDBWritable.class, "word_count", "wc_count","wc_count asc","*");

 Path outputPath =new Path("/ReadFormD");

 outputPath.getFileSystem(configuration).delete(outputPath, true);

 FileOutputFormat.setOutputPath(job, outputPath);

 System.exit(job.waitForCompletion(true)?0:1);

}

}

把wordcount 的结果写入到mysql中
public class WriteToDB {

// 对应表 word_count create table(wc_word varchar(255) )

public static class WordCountDBWritable implements DBWritable, Writable {

 private String word;

 private int count;

 public String getWord() {

 return word;

 public void setWord(String word) {

 this.word = word;

 public int getCount() {

 return count;

 public void setCount(int count) {

 this.count = count;


// insert into word_count(wc_word,wc_count)value(?,?) public void write(PreparedStatement statement) throws SQLException { statement.setString(1, this.word); statement.setInt(2, this.count); // 从数据库中读取数据 public void readFields(ResultSet resultSet) throws SQLException { this.word = resultSet.getString("wc_word"); this.count = resultSet.getInt("wc_count"); public void write(DataOutput out) throws IOException { out.writeUTF(this.word); out.writeInt(this.count); public void readFields(DataInput in) throws IOException { this.word = in.readUTF(); this.count = in.readInt(); public static class WriteToDBMap extends Mapper LongWritable, Text, Text, IntWritable { private final IntWritable ONE = new IntWritable(1); private Text oKey = new Text(); private String[] infos;

上面是读文件的方式

下面是关于如何写入到本地mysql中


通过Sqoop实现Mysql / Oracle 与HDFS / Hbase互导数据 下文将重点说明通过Sqoop实现Mysql与HDFS互导数据,Mysql与Hbase,Oracle与Hbase的互导最后给出命令。 一、Mysql与HDFS互导数据 宿主机器操作系统为Win7,Mysql安装在宿主机上,宿主机地址为192.168.66.96 3台虚拟机操作系统为Ubuntu-12.04.1-32位 三台虚拟机已成功安装hadoop,并实现免密钥互访,配hosts为: 192.168.66.91 masternode 192.168.66.92 slavenode1 192.168.66.93 slavenode2 /etc/profile已配置好必备环境变量HADOO