设为首页 收藏本站
查看: 860|回复: 0

[经验分享] 使用zookeeper实现分布式共享锁

[复制链接]

尚未签到

发表于 2017-4-19 10:46:59 | 显示全部楼层 |阅读模式
        分布式系统中经常需要协调多进程,多个jvm,或者多台机器之间的同步问题,得益于zookeeper,实现了一个分布式的共享锁,方便在多台服务器之间竞争资源时,来协调各系统之间的协作和同步。

package com.zookeeper.lock.server;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.Stat;
/**
DistributedLock lock = null;
try {
lock = new DistributedLock("127.0.0.1:2182","test");
lock.lock();
//do something...
} catch (Exception e) {
e.printStackTrace();
}
finally {
if(lock != null)
lock.unlock();
}
* @author xueliang
*
*/
public class DistributedLock implements Lock, Watcher{
private ZooKeeper zk;
private String root = "/locks";//根
private String lockName;//竞争资源的标志
private String waitNode;//等待前一个锁
private String myZnode;//当前锁
private CountDownLatch latch;//计数器
private int sessionTimeout = 30000;
private List<Exception> exception = new ArrayList<Exception>();
/**
* 创建分布式锁,使用前请确认config配置的zookeeper服务可用
* @param config 127.0.0.1:2181
* @param lockName 竞争资源标志,lockName中不能包含单词lock
*/
public DistributedLock(String config, String lockName){
this.lockName = lockName;
// 创建一个与服务器的连接
try {
zk = new ZooKeeper(config, sessionTimeout, this);
Stat stat = zk.exists(root, false);
if(stat == null){
// 创建根节点
zk.create(root, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT);
}
} catch (IOException e) {
exception.add(e);
} catch (KeeperException e) {
exception.add(e);
} catch (InterruptedException e) {
exception.add(e);
}
}
/**
* zookeeper节点的监视器
*/
public void process(WatchedEvent event) {
if(this.latch != null) {  
this.latch.countDown();  
}
}
public void lock() {
if(exception.size() > 0){
throw new LockException(exception.get(0));
}
try {
if(this.tryLock()){
System.out.println("Thread " + Thread.currentThread().getId() + " " +myZnode + " get lock true");
return;
}
else{
waitForLock(waitNode, sessionTimeout);//等待锁
}
} catch (KeeperException e) {
throw new LockException(e);
} catch (InterruptedException e) {
throw new LockException(e);
}
}
public boolean tryLock() {
try {
String splitStr = "_lock_";
if(lockName.contains(splitStr))
throw new LockException("lockName can not contains \\u000B");
//创建临时子节点
myZnode = zk.create(root + "/" + lockName + splitStr, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL_SEQUENTIAL);
System.out.println(myZnode + " is created ");
//取出所有子节点
List<String> subNodes = zk.getChildren(root, false);
//取出所有lockName的锁
List<String> lockObjNodes = new ArrayList<String>();
for (String node : subNodes) {
String _node = node.split(splitStr)[0];
if(_node.equals(lockName)){
lockObjNodes.add(node);
}
}
Collections.sort(lockObjNodes);
System.out.println(myZnode + "==" + lockObjNodes.get(0));
if(myZnode.equals(root+"/"+lockObjNodes.get(0))){
//如果是最小的节点,则表示取得锁
return true;
}
//如果不是最小的节点,找到比自己小1的节点
String subMyZnode = myZnode.substring(myZnode.lastIndexOf("/") + 1);
waitNode = lockObjNodes.get(Collections.binarySearch(lockObjNodes, subMyZnode) - 1);
} catch (KeeperException e) {
throw new LockException(e);
} catch (InterruptedException e) {
throw new LockException(e);
}
return false;
}
public boolean tryLock(long time, TimeUnit unit) {
try {
if(this.tryLock()){
return true;
}
return waitForLock(waitNode,time);
} catch (Exception e) {
e.printStackTrace();
}
return false;
}
private boolean waitForLock(String lower, long waitTime) throws InterruptedException, KeeperException {
Stat stat = zk.exists(root + "/" + lower,true);
//判断比自己小一个数的节点是否存在,如果不存在则无需等待锁,同时注册监听
if(stat != null){
System.out.println("Thread " + Thread.currentThread().getId() + " waiting for " + root + "/" + lower);
this.latch = new CountDownLatch(1);
this.latch.await(waitTime, TimeUnit.MILLISECONDS);
this.latch = null;
}
return true;
}
public void unlock() {
try {
System.out.println("unlock " + myZnode);
zk.delete(myZnode,-1);
myZnode = null;
zk.close();
} catch (InterruptedException e) {
e.printStackTrace();
} catch (KeeperException e) {
e.printStackTrace();
}
}
public void lockInterruptibly() throws InterruptedException {
this.lock();
}
public Condition newCondition() {
return null;
}
public class LockException extends RuntimeException {
private static final long serialVersionUID = 1L;
public LockException(String e){
super(e);
}
public LockException(Exception e){
super(e);
}
}
}


package com.zookeeper.lock.client;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
/**
ConcurrentTask[] task = new ConcurrentTask[5];
for(int i=0;i<task.length;i++){
task = new ConcurrentTask(){
public void run() {
System.out.println("==============");
}};
}
new ConcurrentTest(task);
* @author xueliang
*
*/
public class ConcurrentClient {
private CountDownLatch startSignal = new CountDownLatch(1);//开始阀门
private CountDownLatch doneSignal = null;//结束阀门
private CopyOnWriteArrayList<Long> list = new CopyOnWriteArrayList<Long>();
private AtomicInteger err = new AtomicInteger();//原子递增
private ConcurrentTask[] task = null;
public ConcurrentClient(ConcurrentTask... task){
this.task = task;
if(task == null){
System.out.println("task can not null");
System.exit(1);
}
doneSignal = new CountDownLatch(task.length);
start();
}
/**
* @param args
* @throws ClassNotFoundException
*/
private void start(){
//创建线程,并将所有线程等待在阀门处
createThread();
//打开阀门
startSignal.countDown();//递减锁存器的计数,如果计数到达零,则释放所有等待的线程
try {
doneSignal.await();//等待所有线程都执行完毕
} catch (InterruptedException e) {
e.printStackTrace();
}
//计算执行时间
getExeTime();
}
/**
* 初始化所有线程,并在阀门处等待
*/
private void createThread() {
long len = doneSignal.getCount();
for (int i = 0; i < len; i++) {
final int j = i;
new Thread(new Runnable(){
public void run() {
try {
startSignal.await();//使当前线程在锁存器倒计数至零之前一直等待
long start = System.currentTimeMillis();
task[j].run();
long end = (System.currentTimeMillis() - start);
list.add(end);
} catch (Exception e) {
err.getAndIncrement();//相当于err++
}
doneSignal.countDown();
}
}).start();
}
}
/**
* 计算平均响应时间
*/
private void getExeTime() {
int size = list.size();
List<Long> _list = new ArrayList<Long>(size);
_list.addAll(list);
Collections.sort(_list);
long min = _list.get(0);
long max = _list.get(size-1);
long sum = 0L;
for (Long t : _list) {
sum += t;
}
long avg = sum/size;
System.out.println("min: " + min);
System.out.println("max: " + max);
System.out.println("avg: " + avg);
System.out.println("err: " + err.get());
}
public interface ConcurrentTask {
void run();
}
}


package com.zookeeper.lock.test;
import com.zookeeper.lock.client.ConcurrentClient;
import com.zookeeper.lock.client.ConcurrentClient.ConcurrentTask;
import com.zookeeper.lock.server.DistributedLock;

public class Test {
public static void main(String[] args) {
Runnable task1 = new Runnable(){
public void run() {
DistributedLock lock = null;
try {
lock = new DistributedLock("127.0.0.1:2182","test1");
//lock = new DistributedLock("127.0.0.1:2182","test2");
lock.lock();
Thread.sleep(3000);
System.out.println("===Thread " + Thread.currentThread().getId() + " running");
} catch (Exception e) {
e.printStackTrace();
}
finally {
if(lock != null)
lock.unlock();
}
}
};
new Thread(task1).start();
try {
Thread.sleep(1000);
} catch (InterruptedException e1) {
e1.printStackTrace();
}
ConcurrentTask[] tasks = new ConcurrentTask[5];
for(int i=0;i<tasks.length;i++){
ConcurrentTask task3 = new ConcurrentTask(){
public void run() {
DistributedLock lock = null;
try {
lock = new DistributedLock("127.0.0.1:2181","test2");
lock.lock();
System.out.println("Thread " + Thread.currentThread().getId() + " running");
} catch (Exception e) {
e.printStackTrace();
}
finally {
lock.unlock();
}
}
};
tasks = task3;
}
new ConcurrentClient(tasks);
}
}

  测试结果:

/locks/test1_lock_0000004356 is created
/locks/test1_lock_0000004356==test1_lock_0000004356
Thread 8 /locks/test1_lock_0000004356 get lock true
/locks/test2_lock_0000004357 is created
/locks/test2_lock_0000004359 is created
/locks/test2_lock_0000004358 is created
/locks/test2_lock_0000004363 is created
/locks/test2_lock_0000004361 is created
/locks/test2_lock_0000004360 is created
/locks/test2_lock_0000004362 is created
/locks/test2_lock_0000004366 is created
/locks/test2_lock_0000004365 is created
/locks/test2_lock_0000004364 is created
/locks/test2_lock_0000004357==test2_lock_0000004357
Thread 14 /locks/test2_lock_0000004357 get lock true
Thread 14 running
unlock /locks/test2_lock_0000004357
/locks/test2_lock_0000004358==test2_lock_0000004357
/locks/test2_lock_0000004361==test2_lock_0000004357
/locks/test2_lock_0000004359==test2_lock_0000004357
/locks/test2_lock_0000004362==test2_lock_0000004357
Thread 12 waiting for /locks/test2_lock_0000004360
/locks/test2_lock_0000004366==test2_lock_0000004357
Thread 18 waiting for /locks/test2_lock_0000004357
/locks/test2_lock_0000004363==test2_lock_0000004357
Thread 18 running
unlock /locks/test2_lock_0000004358
Thread 13 waiting for /locks/test2_lock_0000004362
/locks/test2_lock_0000004365==test2_lock_0000004358
Thread 16 waiting for /locks/test2_lock_0000004361
Thread 19 waiting for /locks/test2_lock_0000004358
/locks/test2_lock_0000004360==test2_lock_0000004358
Thread 15 waiting for /locks/test2_lock_0000004365
/locks/test2_lock_0000004364==test2_lock_0000004358
Thread 11 waiting for /locks/test2_lock_0000004364
Thread 20 waiting for /locks/test2_lock_0000004359
Thread 19 running
unlock /locks/test2_lock_0000004359
Thread 17 waiting for /locks/test2_lock_0000004363
Thread 20 running
unlock /locks/test2_lock_0000004360
Thread 12 running
unlock /locks/test2_lock_0000004361
Thread 16 running
unlock /locks/test2_lock_0000004362
Thread 13 running
unlock /locks/test2_lock_0000004363
Thread 17 running
unlock /locks/test2_lock_0000004364
Thread 11 running
unlock /locks/test2_lock_0000004365
Thread 15 running
unlock /locks/test2_lock_0000004366
min: 506
max: 1481
avg: 968
err: 0
===Thread 8 running
unlock /locks/test1_lock_0000004356
  关于zookeeper的很好的文章:
  https://www.ibm.com/developerworks/cn/opensource/os-cn-zookeeper/
  这个分布式共享锁就是参考这篇文章实现的。
  来自:http://my.oschina.net/shenxueliang/blog/135865

运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-366282-1-1.html 上篇帖子: zookeeper cluster管理solrcloud 个人实践 下篇帖子: zookeeper分布式锁的实现
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表