您好,登錄后才能下訂單哦!
這篇文章主要介紹“spark讀取hbase的數據實例代碼”,在日常操作中,相信很多人在spark讀取hbase的數據實例代碼問題上存在疑惑,小編查閱了各式資料,整理出簡單好用的操作方法,希望對大家解答”spark讀取hbase的數據實例代碼”的疑惑有所幫助!接下來,請跟著小編一起來學習吧!
package hgs.spark.hbase //https://blog.csdn.net/mlljava1111/article/details/52675901 import org.apache.spark.SparkConf import org.apache.spark.SparkContext import org.apache.hadoop.hbase.HBaseConfiguration import org.apache.hadoop.hbase.mapreduce.TableInputFormat import org.apache.hadoop.hbase.client.Scan import org.apache.hadoop.hbase.filter.FilterList import org.apache.hadoop.hbase.filter.FilterList.Operator import org.apache.hadoop.hbase.filter.RowFilter import org.apache.hadoop.hbase.filter.RegexStringComparator import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp import org.apache.hadoop.hbase.protobuf.ProtobufUtil import org.apache.hadoop.hbase.util.Base64 import org.apache.hadoop.hbase.util.Bytes import org.apache.hadoop.hbase.filter.LongComparator object HbaseToSpark { def main(args: Array[String]): Unit = { //System.setProperty("spark.serializer", "org.apache.spark.serializer.KryoSerializer"); val conf = new SparkConf conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") conf.setMaster("local").setAppName("hbasedata") val context = new SparkContext(conf) //hbase配置 val hconf = new HBaseConfiguration hconf.set("hbase.zookeeper.quorum", "bigdata00:2181,bigdata01:2181,bigdata02:2181") hconf.set("hbase.zookeeper.property.clientPort", "2181") hconf.set(TableInputFormat.INPUT_TABLE, "test") val scan = new Scan //掃描的表rowkey的開始和結束 scan.setStartRow("1991".getBytes) scan.setStopRow("3000".getBytes) //val list = new FilterList(Operator.MUST_PASS_ALL) //val filter1 = new RowFilter(CompareOp.GREATER_OR_EQUAL,new LongComparator(1991)) //val filter2 = new RowFilter(CompareOp.LESS_OR_EQUAL,new RegexStringComparator("3000*")) // list.addFilter(filter1) // list.addFilter(filter2) //scan.setFilter(list) //添加scan hconf.set(TableInputFormat.SCAN, convertScanToString(scan)) val hrdd = context.newAPIHadoopRDD(hconf, classOf[TableInputFormat], classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable], classOf[org.apache.hadoop.hbase.client.Result]) val resultrdd = hrdd.repartition(2) //打印結果 resultrdd.foreach{case(_,value)=>{ val key = Bytes.toString(value.getRow) val name = Bytes.toString(value.getValue("cf1".getBytes, "name".getBytes)) val age = Bytes.toString(value.getValue("cf1".getBytes, "age".getBytes)) println("rowkey:"+key+" "+"name:"+name+" "+"age:"+age) } } context.stop() } def convertScanToString(scan: Scan) = { val proto = ProtobufUtil.toScan(scan) Base64.encodeBytes(proto.toByteArray) } }
到此,關于“spark讀取hbase的數據實例代碼”的學習就結束了,希望能夠解決大家的疑惑。理論與實踐的搭配能更好的幫助大家學習,快去試試吧!若想繼續學習更多相關知識,請繼續關注億速云網站,小編會繼續努力為大家帶來更多實用的文章!
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。