Java源码示例:storm.trident.TridentTopology
示例1
public StormTopology getTopology(Config config) {
this.spout = new FixedBatchSpout(new Fields("sentence"), 20,
new Values("one two"),
new Values("two three"),
new Values("three four"),
new Values("four five"),
new Values("five six")
);
TridentTopology trident = new TridentTopology();
trident.newStream("wordcount", spout).name("sentence").parallelismHint(1).shuffle()
.each(new Fields("sentence"), new Split(), new Fields("word"))
.parallelismHint(1)
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
.parallelismHint(1);
return trident.build();
}
示例2
@Override
protected StormTopology buildTopology() {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3,
new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"),
new Values("four score and seven years ago"),
new Values("how many apples can you eat"),
new Values("to be or not to be the person"));
spout.setCycle(true);
ESIndexState.Factory<Tweet> factory = new ESIndexState.Factory<>(getLocalClient(), Tweet.class);
TridentTopology topology = new TridentTopology();
TridentState state = topology.newStream("tweets", spout)
.partitionPersist(factory, new Fields("sentence"), new ESIndexUpdater(new MyTridentTupleMapper()));
topology.newDRPCStream("search", drpc)
.each(new Fields("args"), new ExtractSearchArgs(), new Fields("query", "indices", "types"))
.groupBy(new Fields("query", "indices", "types"))
.stateQuery(state, new Fields("query", "indices", "types"), new QuerySearchIndexQuery(), new Fields("tweet"))
.each(new Fields("tweet"), new FilterNull())
.each(new Fields("tweet"), new CreateJson(), new Fields("json"))
.project(new Fields("json"));
return topology.build();
}
示例3
public static StormTopology buildTopology() {
LOG.info("Building topology.");
TridentTopology topology = new TridentTopology();
GameState exampleRecursiveState = GameState.playAtRandom(new Board(), "X");
LOG.info("SIMULATED LEAF NODE : [" + exampleRecursiveState.getBoard() + "] w/ state [" + exampleRecursiveState + "]");
// Scoring Queue / Spout
LocalQueueEmitter<GameState> scoringSpoutEmitter = new LocalQueueEmitter<GameState>("ScoringQueue");
scoringSpoutEmitter.enqueue(exampleRecursiveState);
LocalQueueSpout<GameState> scoringSpout = new LocalQueueSpout<GameState>(scoringSpoutEmitter);
Stream inputStream = topology.newStream("scoring", scoringSpout);
inputStream.each(new Fields("gamestate"), new isEndGame())
.each(new Fields("gamestate"),
new ScoreFunction(),
new Fields("board", "score", "player"))
.each(new Fields("board", "score", "player"), new ScoreUpdater(), new Fields());
return topology.build();
}
示例4
public static void main(String[] args) throws Exception {
final LocalCluster cluster = new LocalCluster();
final Config conf = new Config();
LocalDRPC client = new LocalDRPC();
TridentTopology drpcTopology = new TridentTopology();
drpcTopology.newDRPCStream("drpc", client)
.each(new Fields("args"), new ArgsFunction(), new Fields("gamestate"))
.each(new Fields("gamestate"), new GenerateBoards(), new Fields("children"))
.each(new Fields("children"), new ScoreFunction(), new Fields("board", "score", "player"))
.groupBy(new Fields("gamestate"))
.aggregate(new Fields("board", "score"), new FindBestMove(), new Fields("bestMove"))
.project(new Fields("bestMove"));
cluster.submitTopology("drpcTopology", conf, drpcTopology.build());
Board board = new Board();
board.board[1][1] = "O";
board.board[2][2] = "X";
board.board[0][1] = "O";
board.board[0][0] = "X";
LOG.info("Determing best move for O on:" + board.toString());
LOG.info("RECEIVED RESPONSE [" + client.execute("drpc", board.toKey()) + "]");
}
示例5
public static StormTopology buildTopology() {
LOG.info("Building topology.");
TridentTopology topology = new TridentTopology();
StateFactory clickThruMemory = new MemoryMapState.Factory();
ClickThruSpout spout = new ClickThruSpout();
Stream inputStream = topology.newStream("clithru", spout);
TridentState clickThruState = inputStream.each(new Fields("username", "campaign", "product", "click"), new Filter("click", "true"))
.each(new Fields("username", "campaign", "product", "click"), new Distinct())
.groupBy(new Fields("campaign"))
.persistentAggregate(clickThruMemory, new Count(), new Fields("click_thru_count"));
inputStream.groupBy(new Fields("campaign"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("impression_count"))
.newValuesStream()
.stateQuery(clickThruState, new Fields("campaign"), new MapGet(), new Fields("click_thru_count"))
.each(new Fields("campaign", "impression_count", "click_thru_count"), new CampaignEffectiveness(), new Fields(""));
return topology.build();
}
示例6
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int splitNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SPLIT_BOLT_NUM);
final int countNum = BenchmarkUtils.getInt(config, COUNT_NUM, DEFAULT_COUNT_BOLT_NUM);
spout = new TransactionalTridentKafkaSpout(
KafkaUtils.getTridentKafkaConfig(config, new SchemeAsMultiScheme(new StringScheme())));
TridentTopology trident = new TridentTopology();
trident.newStream("wordcount", spout).name("sentence").parallelismHint(spoutNum).shuffle()
.each(new Fields(StringScheme.STRING_SCHEME_KEY), new WordSplit(), new Fields("word"))
.parallelismHint(splitNum)
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
.parallelismHint(countNum);
/* trident.newStream("wordcount", spout)
.each(new Fields(StringScheme.STRING_SCHEME_KEY), new WordSplit(), new Fields("word"))
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"));*/
return trident.build();
}
示例7
public static StormTopology buildTopology(LocalDRPC drpc) {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"),
new Values("how many apples can you eat"), new Values("to be or not to be the person"));
spout.setCycle(true);
TridentTopology topology = new TridentTopology();
TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"),
new Split(), new Fields("word")).groupBy(new Fields("word")).persistentAggregate(new MemoryMapState.Factory(),
new Count(), new Fields("count")).parallelismHint(16);
topology.newDRPCStream("words", drpc).each(new Fields("args"), new Split(), new Fields("word")).groupBy(new Fields(
"word")).stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")).each(new Fields("count"),
new FilterNull()).aggregate(new Fields("count"), new Sum(), new Fields("sum"));
return topology.build();
}
示例8
public static StormTopology buildTopology(TransactionalTridentKafkaSpout spout) throws IOException {
TridentTopology topology = new TridentTopology();
TridentState count =
topology
.newStream("tweets", spout)
.each(new Fields("str"), new ParseTweet(), new Fields("text", "content", "user"))
.project(new Fields("content", "user"))
.each(new Fields("content"), new OnlyHashtags())
.each(new Fields("user"), new OnlyEnglish())
.each(new Fields("content", "user"), new ExtractFollowerClassAndContentName(), new Fields("followerClass", "contentName"))
.groupBy(new Fields("followerClass", "contentName"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
;
topology
.newDRPCStream("hashtag_count")
.stateQuery(count, new TupleCollectionGet(), new Fields("followerClass", "contentName"))
.stateQuery(count, new Fields("followerClass", "contentName"), new MapGet(), new Fields("count"))
.groupBy(new Fields("followerClass"))
.aggregate(new Fields("contentName", "count"), new FirstN.FirstNSortedAgg(1,"count", true), new Fields("contentName", "count"))
;
return topology.build();
}
示例9
public static StormTopology buildTopology(TransactionalTridentKafkaSpout spout) throws IOException {
TridentTopology topology = new TridentTopology();
TridentState count =
topology
.newStream("tweets", spout)
.each(new Fields("str"), new ParseTweet(), new Fields("text", "content", "user"))
.project(new Fields("content", "user"))
.each(new Fields("content"), new OnlyHashtags())
.each(new Fields("user"), new OnlyEnglish())
.each(new Fields("content", "user"), new ExtractFollowerClassAndContentName(), new Fields("followerClass", "contentName"))
.groupBy(new Fields("followerClass", "contentName"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
;
topology
.newDRPCStream("top_hashtags")
.stateQuery(count, new TupleCollectionGet(), new Fields("followerClass", "contentName"))
.stateQuery(count, new Fields("followerClass", "contentName"), new MapGet(), new Fields("count"))
.aggregate(new Fields("contentName", "count"), new FirstN.FirstNSortedAgg(5,"count", true), new Fields("contentName", "count"))
;
return topology.build();
}
示例10
public static StormTopology buildTopology(TransactionalTridentKafkaSpout spout) throws IOException {
TridentTopology topology = new TridentTopology();
TridentState count =
topology
.newStream("tweets", spout)
.each(new Fields("str"), new ParseTweet(), new Fields("status", "content", "user"))
.project(new Fields("content", "user", "status"))
.each(new Fields("content"), new OnlyHashtags())
.each(new Fields("status"), new OnlyGeo())
.each(new Fields("status", "content"), new ExtractLocation(), new Fields("country", "contentName"))
.groupBy(new Fields("country", "contentName"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
;
topology
.newDRPCStream("location_hashtag_count")
.stateQuery(count, new TupleCollectionGet(), new Fields("country", "contentName"))
.stateQuery(count, new Fields("country", "contentName"), new MapGet(), new Fields("count"))
.groupBy(new Fields("country"))
.aggregate(new Fields("contentName", "count"), new FirstN.FirstNSortedAgg(3,"count", true), new Fields("contentName", "count"))
;
return topology.build();
}
示例11
public static StormTopology buildTopology(TransactionalTridentKafkaSpout spout) throws IOException {
TridentTopology topology = new TridentTopology();
TridentState count =
topology
.newStream("tweets", spout)
.each(new Fields("str"), new ParseTweet(), new Fields("text", "content", "user"))
.project(new Fields("content", "user"))
.each(new Fields("content"), new OnlyHashtags())
.each(new Fields("user"), new OnlyEnglish())
.each(new Fields("content", "user"), new ExtractFollowerClassAndContentName(), new Fields("followerClass", "contentName"))
.parallelismHint(3)
.groupBy(new Fields("followerClass", "contentName"))
.persistentAggregate(new HazelCastStateFactory(), new Count(), new Fields("count"))
.parallelismHint(3)
;
topology
.newDRPCStream("hashtag_count")
.each(new Constants<String>("< 100", "< 10K", "< 100K", ">= 100K"), new Fields("followerClass"))
.stateQuery(count, new Fields("followerClass", "args"), new MapGet(), new Fields("count"))
;
return topology.build();
}
示例12
public static void main(String[] args) throws Exception {
Config conf = new Config();
// Submits the topology
String topologyName = args[0];
conf.setNumWorkers(8); // Our Vagrant environment has 8 workers
FakeTweetsBatchSpout fakeTweets = new FakeTweetsBatchSpout(10);
TridentTopology topology = new TridentTopology();
TridentState countState =
topology
.newStream("spout", fakeTweets)
.groupBy(new Fields("actor"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"));
topology
.newDRPCStream("count_per_actor")
.stateQuery(countState, new Fields("args"), new MapGet(), new Fields("count"));
StormSubmitter.submitTopology(topologyName, conf, topology.build());
}
示例13
public static StormTopology buildTopology(LocalDRPC drpc) {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3,
new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"),
new Values("four score and seven years ago"), new Values("how many apples can you eat"),
new Values("to be or not to be the person"));
spout.setCycle(true);
TridentTopology topology = new TridentTopology();
TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16)
.each(new Fields("sentence"), new Split(), new Fields("word")).groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
.parallelismHint(16);
topology.newDRPCStream("words", drpc).each(new Fields("args"), new Split(), new Fields("word"))
.groupBy(new Fields("word"))
.stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count"))
.each(new Fields("count"), new FilterNull())
.aggregate(new Fields("count"), new Sum(), new Fields("sum"));
return topology.build();
}
示例14
public static StormTopology buildTopology(LocalDRPC drpc) {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("word"), 3, new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"),
new Values("four score and seven years ago"), new Values("how many apples can you eat"),
new Values("to be or not to be the person"));
spout.setCycle(true);
TridentTopology topology = new TridentTopology();
TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16).flatMap(split).map(toUpper)
.filter(theFilter).peek(new Consumer() {
@Override
public void accept(TridentTuple input) {
System.out.println(input.getString(0));
}
}).groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
.parallelismHint(16);
topology.newDRPCStream("words", drpc).flatMap(split).groupBy(new Fields("args"))
.stateQuery(wordCounts, new Fields("args"), new MapGet(), new Fields("count")).filter(new FilterNull())
.aggregate(new Fields("count"), new Sum(), new Fields("sum"));
return topology.build();
}
示例15
public static StormTopology buildTopology(WindowsStoreFactory windowStore, WindowConfig windowConfig)
throws Exception {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3,
new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"),
new Values("four score and seven years ago"), new Values("how many apples can you eat"),
new Values("to be or not to be the person"));
spout.setCycle(true);
TridentTopology topology = new TridentTopology();
Stream stream = topology.newStream("spout1", spout).parallelismHint(16)
.each(new Fields("sentence"), new Split(), new Fields("word"))
.window(windowConfig, windowStore, new Fields("word"), new CountAsAggregator(), new Fields("count"))
.peek(new Consumer() {
@Override
public void accept(TridentTuple input) {
LOG.info("Received tuple: [{}]", input);
}
});
return topology.build();
}
示例16
/**
* Creates a topology with device-id and count (which are whole numbers) as
* tuple fields in a stream and it finally generates result stream based on
* min amd max with device-id and count values.
*/
public static StormTopology buildDevicesTopology() {
String deviceID = "device-id";
String count = "count";
Fields allFields = new Fields(deviceID, count);
RandomNumberGeneratorSpout spout = new RandomNumberGeneratorSpout(allFields, 10, 1000);
TridentTopology topology = new TridentTopology();
Stream devicesStream = topology.newStream("devicegen-spout", spout).each(allFields, new Debug("##### devices"));
devicesStream.minBy(deviceID).each(allFields, new Debug("#### device with min id"));
devicesStream.maxBy(count).each(allFields, new Debug("#### device with max count"));
return topology.build();
}
示例17
/**
* Creates a topology which demonstrates min/max operations on tuples of
* stream which contain vehicle and driver fields with values
* {@link TridentMinMaxOfDevicesTopology.Vehicle} and
* {@link TridentMinMaxOfDevicesTopology.Driver} respectively.
*/
public static StormTopology buildVehiclesTopology() {
Fields driverField = new Fields(Driver.FIELD_NAME);
Fields vehicleField = new Fields(Vehicle.FIELD_NAME);
Fields allFields = new Fields(Vehicle.FIELD_NAME, Driver.FIELD_NAME);
FixedBatchSpout spout = new FixedBatchSpout(allFields, 10, Vehicle.generateVehicles(20));
spout.setCycle(true);
TridentTopology topology = new TridentTopology();
Stream vehiclesStream = topology.newStream("spout1", spout).each(allFields, new Debug("##### vehicles"));
Stream slowVehiclesStream = vehiclesStream.min(new SpeedComparator()).each(vehicleField,
new Debug("#### slowest vehicle"));
Stream slowDriversStream = slowVehiclesStream.project(driverField).each(driverField,
new Debug("##### slowest driver"));
vehiclesStream.max(new SpeedComparator()).each(vehicleField, new Debug("#### fastest vehicle"))
.project(driverField).each(driverField, new Debug("##### fastest driver"));
vehiclesStream.max(new EfficiencyComparator()).each(vehicleField, new Debug("#### efficient vehicle"));
return topology.build();
}
示例18
@Test
public void testTridentSlidingCountWindow()
{
WindowsStoreFactory windowsStoreFactory = new InMemoryWindowsStoreFactory();
FixedLimitBatchSpout spout = new FixedLimitBatchSpout(SPOUT_LIMIT, new Fields("sentence"), SPOUT_BATCH_SIZE,
new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"),
new Values("four score and seven years ago"), new Values("how many apples can you eat"),
new Values("to be or not to be the person"));
TridentTopology tridentTopology = new TridentTopology();
Stream stream = tridentTopology.newStream("spout1", spout).parallelismHint(16)
.each(new Fields("sentence"), new Split(), new Fields("word"))
.window(windowConfig, windowsStoreFactory, new Fields("word"), new CountAsAggregator(), new Fields("count"))
.peek(new ValidateConsumer());
Map config = new HashMap();
config.put(Config.TOPOLOGY_NAME, "TridentSlidingCountWindowTest");
JStormUnitTestRunner.submitTopology(tridentTopology.build(), null, 120, null);
}
示例19
@Test
public void testTridentMinMaxOfDevices()
{
Fields fields = new Fields("device-id", "count");
List<Values> content = new ArrayList<Values>();
for(int i=0; i<SPOUT_BATCH_SIZE; i++)
content.add(new Values(i+1));
ShuffleValuesBatchSpout spout = new ShuffleValuesBatchSpout(fields, content, content);
TridentTopology tridentTopology = new TridentTopology();
Stream stream = tridentTopology.newStream("device-gen-spout", spout)
.each(fields, new Debug("#### devices"));
stream.minBy("device-id").each(fields, new AssertMinDebug());
stream.maxBy("count").each(fields, new AssertMaxDebug());
Map config = new HashMap();
config.put(Config.TOPOLOGY_NAME, "TridentMinMaxOfDevicesTest");
//the test can pass if the 2 AssertDebug pass throughout the test
JStormUnitTestRunner.submitTopology(tridentTopology.build(), config, 120, null);
}
示例20
@Test
public void testTridentTumblingCountWindow()
{
WindowsStoreFactory windowsStoreFactory = new InMemoryWindowsStoreFactory();
FixedLimitBatchSpout spout = new FixedLimitBatchSpout(SPOUT_LIMIT, new Fields("sentence"), SPOUT_BATCH_SIZE,
new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"),
new Values("four score and seven years ago"), new Values("how many apples can you eat"),
new Values("to be or not to be the person"));
TridentTopology tridentTopology = new TridentTopology();
Stream stream = tridentTopology.newStream("spout1", spout).parallelismHint(16)
.each(new Fields("sentence"), new Split(), new Fields("word"))
.window(windowConfig, windowsStoreFactory, new Fields("word"), new CountAsAggregator(), new Fields("count"))
.peek(new ValidateConsumer());
Map config = new HashMap();
config.put(Config.TOPOLOGY_NAME, "TridentTumblingCountWindowTest");
JStormUnitTestRunner.submitTopology(tridentTopology.build(), null, 120, null);
}
示例21
@Test
public void testTridentTumblingDurationWindow()
{
WindowsStoreFactory windowsStoreFactory = new InMemoryWindowsStoreFactory();
FixedLimitBatchSpout spout = new FixedLimitBatchSpout(SPOUT_LIMIT, new Fields("sentence"), SPOUT_BATCH_SIZE,
new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"),
new Values("four score and seven years ago"), new Values("how many apples can you eat"),
new Values("to be or not to be the person"));
TridentTopology tridentTopology = new TridentTopology();
Stream stream = tridentTopology.newStream("spout1", spout).parallelismHint(16)
.each(new Fields("sentence"), new Split(), new Fields("word"))
.window(windowConfig, windowsStoreFactory, new Fields("word"), new CountAsAggregator(), new Fields("count"))
.peek(new ValidateConsumer());
Map config = new HashMap();
config.put(Config.TOPOLOGY_NAME, "TridentTumblingDurationWindowTest");
JStormUnitTestRunner.submitTopology(tridentTopology.build(), null, 120, null);
}
示例22
@Test
public void testTridentSlidingDurationWindow()
{
WindowsStoreFactory windowsStoreFactory = new InMemoryWindowsStoreFactory();
FixedLimitBatchSpout spout = new FixedLimitBatchSpout(SPOUT_LIMIT, new Fields("sentence"), SPOUT_BATCH_SIZE,
new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"),
new Values("four score and seven years ago"), new Values("how many apples can you eat"),
new Values("to be or not to be the person"));
TridentTopology tridentTopology = new TridentTopology();
Stream stream = tridentTopology.newStream("spout1", spout).parallelismHint(16)
.each(new Fields("sentence"), new Split(), new Fields("word"))
.window(windowConfig, windowsStoreFactory, new Fields("word"), new CountAsAggregator(), new Fields("count"))
.peek(new ValidateConsumer());
Map config = new HashMap();
config.put(Config.TOPOLOGY_NAME, "TridentSlidingDurationWindowTest");
JStormUnitTestRunner.submitTopology(tridentTopology.build(), null, 120, null);
}
示例23
public StormTopology getTopology(Config config) {
this.spout = new FixedBatchSpout(new Fields("sentence"), 20,
new Values("one two"),
new Values("two three"),
new Values("three four"),
new Values("four five"),
new Values("five six")
);
TridentTopology trident = new TridentTopology();
trident.newStream("wordcount", spout).name("sentence").parallelismHint(1).shuffle()
.each(new Fields("sentence"), new Split(), new Fields("word"))
.parallelismHint(1)
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
.parallelismHint(1);
return trident.build();
}
示例24
public static void main(String[] args)
throws Exception {
Config conf = new Config();
LocalCluster cluster = new LocalCluster();
TridentTopology topology = new TridentTopology();
Stream movingAvgStream =
topology.newStream("ticks-spout", buildSpout())
.each(new Fields("stock-ticks"), new TickParser(), new Fields("price"))
.aggregate(new Fields("price"), new CalculateAverage(), new Fields("count"));
cluster.submitTopology("moving-avg", conf, topology.build());
}
示例25
@Override
public StormTopology buildTopology( ) {
ESIndexMapState.Factory<Tweet> state = ESIndexMapState.nonTransactional(getLocalClient(), Tweet.class);
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3,
new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"),
new Values("four score and seven years ago"),
new Values("how many apples can you eat"),
new Values("to be or not to be the person"));
spout.setCycle(true);
TridentTopology topology = new TridentTopology();
TridentState staticState = topology.newStaticState(new ESIndexState.Factory<>(getLocalClient(), Tweet.class));
topology.newStream("tweets", spout)
.each(new Fields("sentence"), new DocumentBuilder(), new Fields("document"))
.each(new Fields("document"), new ExtractDocumentInfo(), new Fields("id", "index", "type"))
.groupBy(new Fields("index", "type", "id"))
.persistentAggregate(state, new Fields("document"), new TweetBuilder(), new Fields("tweet"))
.parallelismHint(1);
topology.newDRPCStream("search", drpc)
.each(new Fields("args"), new ExtractSearchArgs(), new Fields("query", "indices", "types"))
.groupBy(new Fields("query", "indices", "types"))
.stateQuery(staticState, new Fields("query", "indices", "types"), new QuerySearchIndexQuery(), new Fields("tweet"))
.each(new Fields("tweet"), new FilterNull())
.each(new Fields("tweet"), new CreateJson(), new Fields("json"))
.project(new Fields("json"));
return topology.build();
}
示例26
public static StormTopology buildTopology() {
LOG.info("Building topology.");
TridentTopology topology = new TridentTopology();
// Work Queue / Spout
LocalQueueEmitter<GameState> workSpoutEmitter = new LocalQueueEmitter<GameState>("WorkQueue");
LocalQueueSpout<GameState> workSpout = new LocalQueueSpout<GameState>(workSpoutEmitter);
GameState initialState = new GameState(new Board(), new ArrayList<Board>(), "X");
workSpoutEmitter.enqueue(initialState);
// Scoring Queue / Spout
LocalQueueEmitter<GameState> scoringSpoutEmitter = new LocalQueueEmitter<GameState>("ScoringQueue");
Stream inputStream = topology.newStream("gamestate", workSpout);
inputStream.each(new Fields("gamestate"), new isEndGame())
.each(new Fields("gamestate"),
new LocalQueuerFunction<GameState>(scoringSpoutEmitter),
new Fields(""));
inputStream.each(new Fields("gamestate"), new GenerateBoards(), new Fields("children"))
.each(new Fields("children"),
new LocalQueuerFunction<GameState>(workSpoutEmitter),
new Fields());
return topology.build();
}
示例27
public static StormTopology buildTopology() {
LOG.info("Building topology.");
TridentTopology topology = new TridentTopology();
TwitterSpout spout = new TwitterSpout();
Stream inputStream = topology.newStream("nlp", spout);
try {
inputStream.each(new Fields("tweet"), new TweetSplitterFunction(), new Fields("word"))
.each(new Fields("searchphrase", "tweet", "word"), new WordFrequencyFunction(), new Fields("baseline"))
.each(new Fields("searchphrase", "tweet", "word", "baseline"), new PersistenceFunction(), new Fields("none"))
.partitionPersist(new DruidStateFactory(), new Fields("searchphrase", "tweet", "word", "baseline"), new DruidStateUpdater());
} catch (IOException e) {
throw new RuntimeException(e);
}
return topology.build();
}
示例28
public static StormTopology buildTopology() {
TridentTopology topology = new TridentTopology();
DiagnosisEventSpout spout = new DiagnosisEventSpout();
Stream inputStream = topology.newStream("event", spout);
inputStream.each(new Fields("event"), new DiseaseFilter())
.each(new Fields("event"), new CityAssignment(), new Fields("city"))
.each(new Fields("event", "city"), new HourAssignment(), new Fields("hour", "cityDiseaseHour"))
.groupBy(new Fields("cityDiseaseHour"))
.persistentAggregate(new OutbreakTrendFactory(), new Count(), new Fields("count")).newValuesStream()
.each(new Fields("cityDiseaseHour", "count"), new OutbreakDetector(), new Fields("alert"))
.each(new Fields("alert"), new DispatchAlert(), new Fields());
return topology.build();
}
示例29
public static StormTopology buildTopology() {
LOG.info("Building topology.");
TridentTopology topology = new TridentTopology();
FixEventSpout spout = new FixEventSpout();
Stream inputStream = topology.newStream("message", spout);
inputStream.each(new Fields("message"), new MessageTypeFilter())
.partitionPersist(new DruidStateFactory(), new Fields("message"), new DruidStateUpdater());
return topology.build();
}
示例30
public static StormTopology buildTopology() {
LOG.info("Building topology.");
TridentTopology topology = new TridentTopology();
SalesSpout spout = new SalesSpout();
Stream inputStream = topology.newStream("sales", spout);
SalesMapper mapper = new SalesMapper();
inputStream.partitionPersist(
new CassandraCqlIncrementalStateFactory<String, Number>(new Sum(), mapper),
new Fields("price", "state", "product"),
new CassandraCqlIncrementalStateUpdater<String, Number>());
return topology.build();
}