Java源码示例:com.jayway.restassured.response.Header
示例1
@Test(timeout = 10000)
public void whenAcceptEncodingGzipReceiveCompressedStream()
throws ExecutionException, InterruptedException {
// ARRANGE //
// push events to one of the partitions
final int eventsPushed = 2;
kafkaHelper.writeMultipleMessageToPartition(TEST_PARTITION, topicName, DUMMY_EVENT, eventsPushed);
// ACT //
final Response response = RestAssured.given()
.header(new Header("X-nakadi-cursors", xNakadiCursors))
.header(new Header("Accept-Encoding", "gzip"))
.param("batch_limit", "5")
.param("stream_timeout", "2")
.param("batch_flush_timeout", "2")
.when()
.get(streamEndpoint);
// ASSERT //
response.then().statusCode(HttpStatus.OK.value()).header(HttpHeaders.TRANSFER_ENCODING, "chunked");
response.then().header("Content-Encoding", "gzip");
}
示例2
@Test(timeout = 10000)
public void whenReadEventsConsumerIsBlocked() throws Exception {
// blocking streaming client after 3 seconds
new Thread(() -> {
try {
ThreadUtils.sleep(3000);
SettingsControllerAT.blacklist(eventType.getName(), BlacklistService.Type.CONSUMER_ET);
} catch (final Exception e) {
e.printStackTrace();
}
}).start();
try {
// read events from the stream until we are blocked otherwise TestTimedOutException will be thrown and test
// is considered to be failed
RestAssured.given()
.header(new Header("X-nakadi-cursors", xNakadiCursors))
.param("batch_limit", "1")
.param("stream_timeout", "60")
.param("batch_flush_timeout", "10")
.when()
.get(streamEndpoint);
} finally {
SettingsControllerAT.whitelist(eventType.getName(), BlacklistService.Type.CONSUMER_ET);
}
}
示例3
private static String[] readCursors(final String eventTypeName, final String startOffset, final int streamLimit)
throws IOException {
final Response response = given()
.header(new Header("X-nakadi-cursors", "[{\"partition\": \"0\", \"offset\": \"" + startOffset + "\"}]"))
.param("batch_limit", "1")
.param("batch_flush_timeout", "1")
.param("stream_limit", streamLimit)
.param("stream_timeout", 60)
.when()
.get("/event-types/" + eventTypeName + "/events");
response
.then()
.statusCode(HttpStatus.SC_OK);
final String[] events = response.print().split("\n");
final List<String> result = new ArrayList<>();
for (int i = 0; i < events.length; ++i) {
final ObjectNode batch = (ObjectNode) new ObjectMapper().readTree(events[i]);
if (batch.get("events") == null) {
continue;
}
final ObjectNode cursor = (ObjectNode) batch.get("cursor");
result.add(cursor.get("offset").asText());
}
return result.toArray(new String[result.size()]);
}
示例4
@Test(timeout = 5000)
public void whenIncorrectCursorsFormatThenBadRequest() {
RestAssured.given()
.header(new Header("X-nakadi-cursors", "this_is_definitely_not_a_json"))
.when()
.get(streamEndpoint)
.then()
.statusCode(HttpStatus.BAD_REQUEST.value())
.and()
.contentType(Matchers.equalTo("application/problem+json"))
.and()
.body("detail", Matchers.equalTo("incorrect syntax of X-nakadi-cursors header"));
}
示例5
@Test(timeout = 5000)
public void whenInvalidCursorsThenPreconditionFailed() {
RestAssured.given()
.header(new Header("X-nakadi-cursors", "[{\"partition\":\"very_wrong_partition\",\"offset\":\"3\"}]"))
.when()
.get(streamEndpoint)
.then()
.statusCode(HttpStatus.PRECONDITION_FAILED.value())
.and()
.contentType(Matchers.equalTo("application/problem+json"))
.and()
.body("detail", Matchers.equalTo("non existing partition very_wrong_partition"));
}
示例6
private Response readEvents() {
return RestAssured.given()
.header(new Header("X-nakadi-cursors", xNakadiCursors))
.param("batch_limit", "5")
.param("stream_timeout", "2")
.param("batch_flush_timeout", "2")
.when()
.get(streamEndpoint);
}
示例7
@Test
public void testConsumptionFromErroredPositionBlocked() {
given()
.header(new Header(
"X-nakadi-cursors", "[{\"partition\": \"0\", \"offset\": \"001-0001-000000000000000004\"}]"))
.param("batch_limit", "1")
.param("batch_flush_timeout", "1")
.param("stream_limit", "5")
.param("stream_timeout", "1")
.when()
.get("/event-types/" + eventType.getName() + "/events")
.then()
.statusCode(HttpStatus.SC_PRECONDITION_FAILED);
}
示例8
/**
* Upload a text dataset into dataprep.
*
* @param filename the file to upload
* @param datasetName the dataset basename
* @return the response
* @throws java.io.IOException if creation isn't possible
*/
public Response uploadTextDataset(String filename, String datasetName) throws java.io.IOException {
return given() //
.header(new Header(HttpHeaders.CONTENT_TYPE, "text/plain; charset=UTF-8")) //
.body(IOUtils.toString(OSDataPrepAPIHelper.class.getResourceAsStream(filename),
Charset.defaultCharset())) //
.queryParam(NAME, datasetName) //
.when() //
.post("/api/datasets");
}
示例9
/**
* Upload a binary dataset into dataprep.
*
* @param filename the file to upload
* @param datasetName the dataset basename
* @return the response
* @throws java.io.IOException if creation isn't possible
*/
public Response uploadBinaryDataset(String filename, String datasetName) throws java.io.IOException {
return given() //
.header(new Header(HttpHeaders.CONTENT_TYPE, HTTP.PLAIN_TEXT_TYPE)) //
.body(IOUtils.toByteArray(OSDataPrepAPIHelper.class.getResourceAsStream(filename))) //
.when() //
.queryParam(NAME, datasetName) //
.post("/api/datasets");
}
示例10
/**
* Update a existing dataset with current file
*
* @param datasetName the dataset name to update
* @param filename the file to use to update the dataset
* @return the response
*/
public Response updateDataset(String filename, String datasetName, String datasetId) throws IOException {
return given() //
.header(new Header(HttpHeaders.CONTENT_TYPE, HTTP.PLAIN_TEXT_TYPE)) //
.body(IOUtils.toString(OSDataPrepAPIHelper.class.getResourceAsStream(filename),
Charset.defaultCharset())) //
.when() //
.queryParam(NAME, datasetName) //
.put("/api/datasets/{datasetId}", datasetId);
}
示例11
public Response applyAggragate(Aggregate aggregate) throws Exception {
return given() //
.header(new Header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON)) //
.when() //
.body(mapper.writeValueAsString(aggregate)) //
.post("/api/aggregate");
}
示例12
protected Values headersToValues(Headers headers) {
final Values headerValues = new Values();
for (final Header header : headers) {
headerValues.addValue(header.getName(), header.getValue());
}
return headerValues;
}
示例13
@Test(timeout = 10000)
@SuppressWarnings("unchecked")
public void whenPushedAmountOfEventsMoreThanBatchSizeAndReadThenGetEventsInMultipleBatches()
throws ExecutionException, InterruptedException {
// ARRANGE //
// push events to one of the partitions so that they don't fit into one branch
final int batchLimit = 5;
final int eventsPushed = 8;
kafkaHelper.writeMultipleMessageToPartition(TEST_PARTITION, topicName, DUMMY_EVENT, eventsPushed);
// ACT //
final Response response = RestAssured.given()
.header(new Header("X-nakadi-cursors", xNakadiCursors))
.param("batch_limit", batchLimit)
.param("stream_timeout", "2")
.param("batch_flush_timeout", "2")
.when()
.get(streamEndpoint);
// ASSERT //
response.then().statusCode(HttpStatus.OK.value()).header(HttpHeaders.TRANSFER_ENCODING, "chunked");
final String body = response.print();
final List<Map<String, Object>> batches = deserializeBatches(body);
// validate amount of batches and structure of each batch
// for partition with events we should get 2 batches
Assert.assertThat(batches, Matchers.hasSize(PARTITIONS_NUM + 1));
batches.forEach(batch -> validateBatchStructure(batch, DUMMY_EVENT));
// find the batches where we expect to see the messages we pushed
final List<Map<String, Object>> batchesToCheck = batches
.stream()
.filter(isForPartition(TEST_PARTITION))
.collect(Collectors.toList());
Assert.assertThat(batchesToCheck, Matchers.hasSize(2));
// calculate the offset we expect to see in this batch in a stream
final Cursor partitionCursor = kafkaInitialNextOffsets.stream()
.filter(cursor -> TEST_PARTITION.equals(cursor.getPartition()))
.findFirst()
.orElseThrow(() -> new AssertionError("Failed to find cursor for needed partition"));
final String expectedOffset1 =
TestUtils.toTimelineOffset(Long.parseLong(partitionCursor.getOffset()) - 1 + batchLimit);
final String expectedOffset2 =
TestUtils.toTimelineOffset(Long.parseLong(partitionCursor.getOffset()) - 1 + eventsPushed);
// check that batches have offset, partition and events number we expect
validateBatch(batchesToCheck.get(0), TEST_PARTITION, expectedOffset1, batchLimit);
validateBatch(batchesToCheck.get(1), TEST_PARTITION, expectedOffset2, eventsPushed - batchLimit);
}