-
Notifications
You must be signed in to change notification settings - Fork 0
/
Consumer.java
90 lines (76 loc) · 3.68 KB
/
Consumer.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
package org.example;
import com.opencsv.CSVWriter;
import org.apache.avro.AvroTypeException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.parquet.avro.AvroParquetWriter;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import org.example.model.Week;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.login.Configuration;
import java.awt.*;
import java.io.FileWriter;
import java.io.IOException;
import java.lang.reflect.Array;
import java.nio.file.Path;
import java.time.Duration;
import java.util.Arrays;
import java.util.Map;
import java.util.Properties;
public class Consumer {
private final static Logger log = LoggerFactory.getLogger(Consumer.class.getSimpleName());
public static void main(String[] args) throws IOException {
log.info("Kafka Simple Consumer");
String groupId = "my-application-bigdata-projekat";
String topic = "forex-rates-topic";
/// create Consumer Properties
Properties properties = new Properties();
properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092");
properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
properties.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
properties.setProperty(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, CooperativeStickyAssignor.class.getName());
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
consumer.subscribe(Arrays.asList(topic));
try {
CSVWriter writer = new CSVWriter(new FileWriter("C:\\Users\\matij\\OneDrive\\Desktop\\currency-data.csv", false));
// CSVWriter writer = new CSVWriter(new FileWriter("C:\\Users\\matij\\OneDrive\\Desktop\\rsd-to-eur-data.csv", false));
// Write CSV header
String[] header = {"Date", "Name", "Open", "High", "Low", "Close"};
writer.writeNext(header);
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
try {
for(int i = 0; i < 65; i ++){
writer.writeNext(new String[]{"0", "0", "0", "0", "0"});
}
consumer.close();
writer.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}));
/// poll new data
while (true) {
log.info("Polling... ");
ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofMillis(2000));
String[] columns;
String[] row;
for (ConsumerRecord<String, String> record : consumerRecords) {
columns = record.value().split("\t");
row = new String[]{record.key(), columns[0], columns[1], columns[2], columns[3], columns[4]};
writer.writeNext(row);
writer.flush();
log.info("Key: " + record.key() + ", Value: " + record.value()
+ "\nPartition: " + record.partition() + "\nOffset: " + record.offset());
}
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}