前言
在Spring Boot集群部署中使用Nacos共用配置文件进行分布式系统的唯一ID生成,常见的做法是利用雪花算法(Snowflake)来生成唯一ID。雪花算法依赖于机器ID、数据中心ID等信息来保证在集群环境下生成的ID的唯一性。
如果微服务共享配置文件, workerId
需要根据实例有所差异
以下是配置和实现的步骤
方案1
对于集群中的每个节点,在启动时通过环境变量或命令行参数设置 WORKER_ID。
1 2 3
| snowflake: datacenter-id: 1 worker-id: ${WORKER_ID}
|
1
| java -DWORKER_ID=1 -jar your-app.jar
|
确保每个节点的 WORKER_ID 是唯一的。
方案2
通过在Nacos中动态管理workerId来实现节点间workerId的递增配置
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
| package cn.idea360.assistant.dev.nacos;
import cn.hutool.core.lang.Snowflake; import cn.hutool.core.util.IdUtil; import com.alibaba.boot.nacos.discovery.properties.NacosDiscoveryProperties; import com.alibaba.nacos.api.naming.NamingService; import com.alibaba.nacos.api.naming.pojo.Instance; import com.alibaba.nacos.client.naming.utils.NetUtils; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.context.event.ApplicationReadyEvent; import org.springframework.context.ApplicationListener; import org.springframework.stereotype.Component;
import javax.annotation.Resource; import java.util.List; import java.util.concurrent.ConcurrentHashMap;
@Slf4j @Component public class DistributedSnowflake implements ApplicationListener<ApplicationReadyEvent> {
@Getter private long workerId;
@Value("${snowflake.datacenterId:1}") private long datacenterId;
@Resource private NacosDiscoveryProperties nacosDiscoveryProperties;
private NamingService namingService;
private String serviceName;
private static final String WORKER_ID = "workerId";
private final ConcurrentHashMap<String, Snowflake> distributeIds = new ConcurrentHashMap<>();
private long allocateWorkerId() throws Exception { List<Instance> instances = namingService.getAllInstances(serviceName); for (long workerId = 0; workerId <= 31; workerId++) { long finalWorkerId = workerId; boolean isUsed = instances.stream() .anyMatch(instance -> instance.getMetadata().containsKey(WORKER_ID) && Long.parseLong(instance.getMetadata().get(WORKER_ID)) == finalWorkerId); if (!isUsed) { return workerId; } } throw new RuntimeException("No available workerId found"); }
@Override public void onApplicationEvent(ApplicationReadyEvent event) { try { this.workerId = allocateWorkerId();
String currentIp = nacosDiscoveryProperties.getRegister().getIp(); int currentPort = nacosDiscoveryProperties.getRegister().getPort();
Instance currentInstance = namingService.selectInstances(serviceName, true) .stream() .filter(instance -> instance.getIp().equals(currentIp) && instance.getPort() == currentPort) .findFirst() .orElseThrow(() -> new RuntimeException("Current instance not found"));
currentInstance.getMetadata().put(WORKER_ID, String.valueOf(workerId));
namingService.registerInstance(serviceName, currentInstance); log.info("实例[{}]注册workerId: {}", NetUtils.localIP(), workerId);
} catch (Exception e) { log.error("更新Metadata异常", e); } }
private Snowflake getSnowflake(String type) { return this.distributeIds.computeIfAbsent(type, k -> IdUtil.getSnowflake(workerId, datacenterId)); }
public long nextId(String type) { return this.getSnowflake(type).nextId(); }
}
|