新增Kafka,api同步
This commit is contained in:
parent
f7ac52bb4c
commit
338287c995
11
pom.xml
11
pom.xml
@ -60,6 +60,17 @@
|
||||
<version>${lombok.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.alibaba</groupId>
|
||||
<artifactId>fastjson</artifactId>
|
||||
<version>1.2.73</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
|
@ -0,0 +1,60 @@
|
||||
package ink.wgink.gateway.component;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.alibaba.fastjson.JSONObject;
|
||||
import ink.wgink.gateway.dao.route.IRouteDao;
|
||||
import ink.wgink.gateway.pojo.route.Route;
|
||||
import ink.wgink.gateway.util.DateUtil;
|
||||
import ink.wgink.gateway.util.RegexUtil;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.springframework.cloud.gateway.event.RefreshRoutesEvent;
|
||||
import org.springframework.context.ApplicationEventPublisher;
|
||||
import org.springframework.context.ApplicationEventPublisherAware;
|
||||
import org.springframework.data.mongodb.core.MongoTemplate;
|
||||
import org.springframework.kafka.annotation.KafkaListener;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
/**
|
||||
* @ClassName: ApiPublishConsumer
|
||||
* @Description: api发布消费者
|
||||
* @Author: wanggeng
|
||||
* @Date: 2021/8/22 9:31 下午
|
||||
* @Version: 1.0
|
||||
*/
|
||||
@Component
|
||||
public class ApiPublishConsumer implements ApplicationEventPublisherAware {
|
||||
|
||||
private ApplicationEventPublisher applicationEventPublisher;
|
||||
private MongoTemplate mongoTemplate;
|
||||
|
||||
public ApiPublishConsumer(MongoTemplate mongoTemplate) {
|
||||
this.mongoTemplate = mongoTemplate;
|
||||
}
|
||||
|
||||
@KafkaListener(topics = {"apiPublish"})
|
||||
public void onPublish(ConsumerRecord<?, ?> record) {
|
||||
JSONObject publishObject = JSON.parseObject(record.value().toString());
|
||||
JSONObject apiObject = publishObject.getJSONObject("api");
|
||||
JSONObject systemObject = publishObject.getJSONObject("system");
|
||||
System.out.println(apiObject);
|
||||
Route route = new Route();
|
||||
route.setUuid(apiObject.getString("apiId"));
|
||||
route.setTitle(apiObject.getString("title"));
|
||||
route.setUri(String.format("%s://%s", systemObject.getString("requestSchema"), systemObject.getString("ipAddress")));
|
||||
route.setPath(String.format("%s%s", systemObject.getString("systemContext"), RegexUtil.replacePathParams(apiObject.getString("url"), "*")));
|
||||
|
||||
String datetime = DateUtil.getTime();
|
||||
route.setCreator("1");
|
||||
route.setGmtCreate(datetime);
|
||||
route.setModifier("1");
|
||||
route.setGmtModified(datetime);
|
||||
|
||||
mongoTemplate.save(route, IRouteDao.COLLECTION_NAME);
|
||||
this.applicationEventPublisher.publishEvent(new RefreshRoutesEvent(this));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher) {
|
||||
this.applicationEventPublisher = applicationEventPublisher;
|
||||
}
|
||||
}
|
@ -11,7 +11,27 @@ spring:
|
||||
data:
|
||||
mongodb:
|
||||
uri: mongodb://mongo:27017/gateway
|
||||
|
||||
kafka:
|
||||
bootstrap-servers: 127.0.0.1:9092
|
||||
producer:
|
||||
# 写入失败时,重试次数。当leader节点失效,一个repli节点会替代成为leader节点,此时可能出现写入失败,
|
||||
# 当retris为0时,produce不会重复。retirs重发,此时repli节点完全成为leader节点,不会产生消息丢失。
|
||||
retries: 0
|
||||
#procedure要求leader在考虑完成请求之前收到的确认数,用于控制发送记录在服务端的持久化,其值可以为如下:
|
||||
#acks = 0 如果设置为零,则生产者将不会等待来自服务器的任何确认,该记录将立即添加到套接字缓冲区并视为已发送。在这种情况下,无法保证服务器已收到记录,并且重试配置将不会生效(因为客户端通常不会知道任何故障),为每条记录返回的偏移量始终设置为-1。
|
||||
#acks = 1 这意味着leader会将记录写入其本地日志,但无需等待所有副本服务器的完全确认即可做出回应,在这种情况下,如果leader在确认记录后立即失败,但在将数据复制到所有的副本服务器之前,则记录将会丢失。
|
||||
#acks = all 这意味着leader将等待完整的同步副本集以确认记录,这保证了只要至少一个同步副本服务器仍然存活,记录就不会丢失,这是最强有力的保证,这相当于acks = -1的设置。
|
||||
#可以设置的值为:all, -1, 0, 1
|
||||
acks: 1
|
||||
consumer:
|
||||
group-id: WgGateway
|
||||
# smallest和largest才有效,如果smallest重新0开始读取,如果是largest从logfile的offset读取。一般情况下我们都是设置smallest
|
||||
auto-offset-reset: earliest
|
||||
# 设置自动提交offset
|
||||
enable-auto-commit: true
|
||||
# 如果'enable.auto.commit'为true,则消费者偏移自动提交给Kafka的频率(以毫秒为单位),默认值为5000。
|
||||
auto-commit-interval: 100
|
||||
max-poll-records: 5
|
||||
logging:
|
||||
level:
|
||||
org.springframework: error
|
||||
|
39
src/main/resources/application-test.yml
Normal file
39
src/main/resources/application-test.yml
Normal file
@ -0,0 +1,39 @@
|
||||
server:
|
||||
port: 8888
|
||||
spring:
|
||||
cloud:
|
||||
gateway:
|
||||
enabled: true
|
||||
httpclient:
|
||||
# 支持https转发
|
||||
ssl:
|
||||
use-insecure-trust-manager: true
|
||||
data:
|
||||
mongodb:
|
||||
uri: mongodb://127.0.0.1:27017/gateway
|
||||
kafka:
|
||||
bootstrap-servers: 127.0.0.1:9092
|
||||
producer:
|
||||
# 写入失败时,重试次数。当leader节点失效,一个repli节点会替代成为leader节点,此时可能出现写入失败,
|
||||
# 当retris为0时,produce不会重复。retirs重发,此时repli节点完全成为leader节点,不会产生消息丢失。
|
||||
retries: 0
|
||||
#procedure要求leader在考虑完成请求之前收到的确认数,用于控制发送记录在服务端的持久化,其值可以为如下:
|
||||
#acks = 0 如果设置为零,则生产者将不会等待来自服务器的任何确认,该记录将立即添加到套接字缓冲区并视为已发送。在这种情况下,无法保证服务器已收到记录,并且重试配置将不会生效(因为客户端通常不会知道任何故障),为每条记录返回的偏移量始终设置为-1。
|
||||
#acks = 1 这意味着leader会将记录写入其本地日志,但无需等待所有副本服务器的完全确认即可做出回应,在这种情况下,如果leader在确认记录后立即失败,但在将数据复制到所有的副本服务器之前,则记录将会丢失。
|
||||
#acks = all 这意味着leader将等待完整的同步副本集以确认记录,这保证了只要至少一个同步副本服务器仍然存活,记录就不会丢失,这是最强有力的保证,这相当于acks = -1的设置。
|
||||
#可以设置的值为:all, -1, 0, 1
|
||||
acks: 1
|
||||
consumer:
|
||||
group-id: WgGateway
|
||||
# smallest和largest才有效,如果smallest重新0开始读取,如果是largest从logfile的offset读取。一般情况下我们都是设置smallest
|
||||
auto-offset-reset: earliest
|
||||
# 设置自动提交offset
|
||||
enable-auto-commit: true
|
||||
# 如果'enable.auto.commit'为true,则消费者偏移自动提交给Kafka的频率(以毫秒为单位),默认值为5000。
|
||||
auto-commit-interval: 100
|
||||
max-poll-records: 5
|
||||
|
||||
logging:
|
||||
level:
|
||||
org.springframework: debug
|
||||
ink.wgink: debug
|
Loading…
Reference in New Issue
Block a user