package hbaseCURD;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
public class test {
public static void main(String[] args) throws IOException {
TableManager tm = new TableManager();
tm.getConf();
// tm.createTable("testtable","cf1","cf2");
HTable mytable=tm.getTableObj("testtable");
// Put put = new Put(Bytes.toBytes("row1"));
// put.add(Bytes.toBytes("cf1"), Bytes.toBytes("c1"), Bytes.toBytes("cf1ddfddvalue"));
// put.add(Bytes.toBytes("cf2"), Bytes.toBytes("c2"), Bytes.toBytes("cf2v3333alue"));
// mytable.put(put);
//查询
Get get = new Get(Bytes.toBytes("row1"));
Result result = mytable.get(get);
System.out.println("get result:" + Bytes.toString(result.getValue(Bytes.toBytes("cf1"), Bytes.toBytes("c1"))));
//Result[] result = table.get(List<Get>);//查询指定Rowkey的多条记录
}
}
package hbaseCURD;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.util.Bytes;
public class TableManager {
private static Configuration conf;
// 设置集群的配置信息
public void getConf() {
conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "master,slave1,slave2");
conf.set("hbase.master", "master:60000");
}
// 获取表对象
public HTable getTableObj(String tablename) {
HTable mytable = null;
try {
mytable = new HTable(conf, tablename);
} catch (IOException e) {
e.printStackTrace();
}
return mytable;
}
//
public void createTable(String tableName, String... args) throws MasterNotRunningException, ZooKeeperConnectionException {
// args数组保存的是列族
HBaseAdmin admin = new HBaseAdmin(conf);
// 创建表
HTableDescriptor htd = new HTableDescriptor(tableName);
for (String st : args) {
htd.addFamily(new HColumnDescriptor(st));
}
try {
admin.createTable(htd);
} catch (IOException e) {
e.printStackTrace();
}
}
public void deleteTable(String tableName) {
try {
HBaseAdmin admin = new HBaseAdmin(conf);
// 创建表
HTableDescriptor htd = new HTableDescriptor(tableName);
admin.disableTable(Bytes.toBytes(tableName));
admin.deleteTable(Bytes.toBytes(tableName));
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public void modifyTable(String tableName,String newcoloumf) {
try {
HBaseAdmin admin = new HBaseAdmin(conf);
// 创建表
HTableDescriptor htd = new HTableDescriptor(tableName);
admin.disableTable(Bytes.toBytes(tableName));
admin.modifyColumn(tableName, new HColumnDescriptor("cf1"));
admin.enableTable(tableName);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
Hbase的访问方式
1、Native Java API:最常规和高效的访问方式;
2、HBase Shell:HBase的命令行工具,最简单的接口,适合HBase管理使用;
3、Thrift Gateway:利用Thrift序列化技术,支持C++,PHP,Python等多种语言,适合其他异构系统在线访问HBase表数据;
4、REST Gateway:支持REST 风格的Http API访问HBase, 解除了语言限制;
5、MapReduce:直接使用MapReduce作业处理Hbase数据;
6、使用Pig/hive处理Hbase数据。
常用Java API的用法:
1、加载配置
[java] view plaincopy在CODE上查看代码片派生到我的代码片
Configuration config = HBaseConfiguration.create();
//可以自定义配置,也可以从自定义配置文件中读取
/*config.set(“hbase.zookeeper.property.clientPort”, “4181”);
config.set(“hbase.zookeeper.quorum”, “hadoop.datanode5.com,hadoop.datanode2.com,hadoop.datanode3.com”);
config.set(“hbase.master”, “hadoop.datanode3.com:600000”);*/
2、表的创建、表信息修改、表删除
[java] view plaincopy在CODE上查看代码片派生到我的代码片
HBaseAdmin admin = new HBaseAdmin(config);
//创建表
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(“cf1”));
htd.addFamily(new HColumnDescriptor(“cf2”));
admin.createTable(htd);
//修改表信息
admin.disableTable(tableName);
// modifying existing ColumnFamily
admin.modifyColumn(tableName, new HColumnDescriptor(“cf1”));
admin.enableTable(tableName);
//删除表
admin.disableTable(Bytes.toBytes(tableName));
admin.deleteTable(Bytes.toBytes(tableName));
3、添加记录
[java] view plaincopy在CODE上查看代码片派生到我的代码片
/** 在多次使用时,建议用HTablePool
HTable table = new HTable(config, tableName);
=>
HTablePool pool = new HTablePool(config, 1000);
HTableInterface table = pool.getTable(tableName);*/
HTable table = new HTable(config, tableName);
/**
* 在插入操作时,默认不适用任何缓存
* 可自定义使用缓存,以及缓存大小
* 每个任务最后需要手工调用 flushCommits();
*/
/*table.setAutoFlush(false);
table.setWriteBufferSize(1024);*/
Put put1 = new Put(Bytes.toBytes(rowKey));
if (ts == 0) {
put1.add(Bytes.toBytes(family), Bytes.toBytes(qualifier), Bytes.toBytes(value));
} else {
//自定义版本时,从自定义的版本号,类型为long
put1.add(Bytes.toBytes(family), Bytes.toBytes(qualifier), ts,Bytes.toBytes(value));
}
table.put(put1);
//table.flushCommits();
4、查询,根据Rowkey查询
[java] view plaincopy在CODE上查看代码片派生到我的代码片
Get get1 = new Get(Bytes.toBytes(rowKey));
Result result = table.get(get1);
System.out.println(“get result:” + Bytes.toString(result.getValue(Bytes.toBytes(family), Bytes.toBytes(qualifier))));
Result[] result = table.get(List);//查询指定Rowkey的多条记录
5、查询,指定条件和rowkey区间查询
[java] view plaincopy在CODE上查看代码片派生到我的代码片
Scan scan = new Scan();
//默认缓存大小为1,设置成一个合理的值,可以减少scan过程中next()的时间开销,代价是客户端的内存
scan.setCaching(500);
scan.setCacheBlocks(false);
//根据startRowKey、endRowKey查询
//Scan scan = new Scan(Bytes.toBytes(“startRowKey”), Bytes.toBytes(“endRowKey”));
//rowKey之外的过滤条件,在List中可以add;
/**List filters = new ArrayList();
Filter filter = new SingleColumnValueFilter(“familyName”.getBytes(),
“qualifierName”.getBytes(),
CompareOp.EQUAL,
Bytes.toBytes(“value”));
filters.add(filter);
scan.setFilter(new FilterList(filters));*/
ResultScanner scanner = table.getScanner(scan);
System.out.println(“scan result list:”);
for (Result result : scanner) {
System.out.println(Bytes.toString(result.getRow()));
System.out.println(Bytes.toString(result.getValue(Bytes.toBytes(“data”), Bytes.toBytes(“data1”))));
System.out.println(Bytes.toString(result.getValue(Bytes.toBytes(“data”), Bytes.toBytes(“data2”))));
}
scanner.close();
参考:
1、http://www.taobaotest.com/blogs/1605
2、http://abloz.com/hbase/book.html#data_model_operations(官网示例)