Skip to main content

Deployment Validation

Hadoop

hadoop fs -ls jfs://${JFS_NAME}/
hadoop fs -mkdir jfs://${JFS_NAME}/jfs-test
hadoop fs -rm -r jfs://${JFS_NAME}/jfs-test

Hive, SparkSQL, Impala

create table if not exists person(
name string,
age int
)

location 'jfs://${JFS_NAME}/tmp/person';
insert into table person values('tom',25);
insert overwrite table person select name, age from person;
select name, age from person;
drop table person;

HBase

create 'test', 'cf'
list 'test'
put 'test', 'row1', 'cf:a', 'value1'
scan 'test'
get 'test', 'row1'
disable 'test'
drop 'test'

Flume

jfs.sources =r1
jfs.sources.r1.type = org.apache.flume.source.StressSource
jfs.sources.r1.size = 10240
jfs.sources.r1.maxTotalEvents=10
jfs.sources.r1.batchSize=10
jfs.sources.r1.channels = c1

jfs.channels = c1
jfs.channels.c1.type = memory
jfs.channels.c1.capacity = 100
jfs.channels.c1.transactionCapacity = 100

jfs.sinks = k1
jfs.sinks.k1.type = hdfs
jfs.sinks.k1.channel = c1
jfs.sinks.k1.hdfs.path =jfs://${JFS_NAME}/tmp/flume
jfs.sinks.k1.hdfs.writeFormat= Text
jfs.sinks.k1.hdfs.fileType= DataStream
echo 'hello world' > /tmp/jfs_test
hadoop fs -put /tmp/jfs_test jfs://${JFS_NAME}/tmp/
rm -f /tmp/jfs_test
./bin/flink run -m yarn-cluster ./examples/batch/WordCount.jar --input jfs://${JFS_NAME}/tmp/jfs_test --output jfs://${JFS_NAME}/tmp/result