Redis数据持久化机制AOF原理分析一---转
http://blog.iyunv.com/acceptedxukai/article/details/18136903http://blog.iyunv.com/acceptedxukai/article/details/18181563
本文所引用的源码全部来自Redis2.8.2版本。
Redis AOF数据持久化机制的实现相关代码是redis.c, redis.h, aof.c, bio.c, rio.c, config.c
在阅读本文之前请先阅读Redis数据持久化机制AOF原理分析之配置详解文章,了解AOF相关参数的解析,文章链接
http://blog.iyunv.com/acceptedxukai/article/details/18135219
转载请注明,文章出自http://blog.iyunv.com/acceptedxukai/article/details/18136903
下面将介绍AOF数据持久化机制的实现
Server启动加载AOF文件数据
Server启动加载AOF文件数据的执行步骤为:main() -> initServerConfig() -> loadServerConfig() -> initServer() -> loadDataFromDisk()。initServerConfig()主要为初始化默认的AOF参数配置;loadServerConfig()加载配置文件redis.conf中AOF的参数配置,覆盖Server的默认AOF参数配置,如果配置appendonly on,那么AOF数据持久化功能将被激活,server.aof_state参数被设置为REDIS_AOF_ON;loadDataFromDisk()判断server.aof_state == REDIS_AOF_ON,结果为True就调用loadAppendOnlyFile函数加载AOF文件中的数据,加载的方法就是读取AOF文件中数据,由于AOF文件中存储的数据与客户端发送的请求格式相同完全符合Redis的通信协议,因此Server创建伪客户端fakeClient,将解析后的AOF文件数据像客户端请求一样调用各种指令,cmd->proc(fakeClient),将AOF文件中的数据重现到Redis Server数据库中。
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]/* Function called at startup to load RDB or AOF file in memory. */
[*]void loadDataFromDisk(void) {
[*] long long start = ustime();
[*] if (server.aof_state == REDIS_AOF_ON) {
[*] if (loadAppendOnlyFile(server.aof_filename) == REDIS_OK)
[*] redisLog(REDIS_NOTICE,"DB loaded from append only file: %.3f seconds",(float)(ustime()-start)/1000000);
[*] } else {
[*] if (rdbLoad(server.rdb_filename) == REDIS_OK) {
[*] redisLog(REDIS_NOTICE,"DB loaded from disk: %.3f seconds",
[*] (float)(ustime()-start)/1000000);
[*] } else if (errno != ENOENT) {
[*] redisLog(REDIS_WARNING,"Fatal error loading the DB: %s. Exiting.",strerror(errno));
[*] exit(1);
[*] }
[*] }
[*]}
Server首先判断加载AOF文件是因为AOF文件中的数据要比RDB文件中的数据要新。
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]int loadAppendOnlyFile(char *filename) {
[*] struct redisClient *fakeClient;
[*] FILE *fp = fopen(filename,"r");
[*] struct redis_stat sb;
[*] int old_aof_state = server.aof_state;
[*] long loops = 0;
[*]
[*] //redis_fstat就是fstat64函数,通过fileno(fp)得到文件描述符,获取文件的状态存储于sb中,
[*] //具体可以参考stat函数,st_size就是文件的字节数
[*] if (fp && redis_fstat(fileno(fp),&sb) != -1 && sb.st_size == 0) {
[*] server.aof_current_size = 0;
[*] fclose(fp);
[*] return REDIS_ERR;
[*] }
[*]
[*] if (fp == NULL) {//打开文件失败
[*] redisLog(REDIS_WARNING,"Fatal error: can't open the append log file for reading: %s",strerror(errno));
[*] exit(1);
[*] }
[*]
[*] /* Temporarily disable AOF, to prevent EXEC from feeding a MULTI
[*] * to the same file we're about to read. */
[*] server.aof_state = REDIS_AOF_OFF;
[*]
[*] fakeClient = createFakeClient(); //建立伪终端
[*] startLoading(fp); // 定义于 rdb.c ,更新服务器的载入状态
[*]
[*] while(1) {
[*] int argc, j;
[*] unsigned long len;
[*] robj **argv;
[*] char buf;
[*] sds argsds;
[*] struct redisCommand *cmd;
[*]
[*] /* Serve the clients from time to time */
[*] // 有间隔地处理外部请求,ftello()函数得到文件的当前位置,返回值为long
[*] if (!(loops++ % 1000)) {
[*] loadingProgress(ftello(fp));//保存aof文件读取的位置,ftellno(fp)获取文件当前位置
[*] aeProcessEvents(server.el, AE_FILE_EVENTS|AE_DONT_WAIT);//处理事件
[*] }
[*] //按行读取AOF数据
[*] if (fgets(buf,sizeof(buf),fp) == NULL) {
[*] if (feof(fp))//达到文件尾EOF
[*] break;
[*] else
[*] goto readerr;
[*] }
[*] //读取AOF文件中的命令,依照Redis的协议处理
[*] if (buf != '*') goto fmterr;
[*] argc = atoi(buf+1);//参数个数
[*] if (argc < 1) goto fmterr;
[*]
[*] argv = zmalloc(sizeof(robj*)*argc);//参数值
[*] for (j = 0; j < argc; j++) {
[*] if (fgets(buf,sizeof(buf),fp) == NULL) goto readerr;
[*] if (buf != '$') goto fmterr;
[*] len = strtol(buf+1,NULL,10);//每个bulk的长度
[*] argsds = sdsnewlen(NULL,len);//新建一个空sds
[*] //按照bulk的长度读取
[*] if (len && fread(argsds,len,1,fp) == 0) goto fmterr;
[*] argv = createObject(REDIS_STRING,argsds);
[*] if (fread(buf,2,1,fp) == 0) goto fmterr; /* discard CRLF 跳过\r\n*/
[*] }
[*]
[*] /* Command lookup */
[*] cmd = lookupCommand(argv->ptr);
[*] if (!cmd) {
[*] redisLog(REDIS_WARNING,"Unknown command '%s' reading the append only file", (char*)argv->ptr);
[*] exit(1);
[*] }
[*] /* Run the command in the context of a fake client */
[*] fakeClient->argc = argc;
[*] fakeClient->argv = argv;
[*] cmd->proc(fakeClient);//执行命令
[*]
[*] /* The fake client should not have a reply */
[*] redisAssert(fakeClient->bufpos == 0 && listLength(fakeClient->reply) == 0);
[*] /* The fake client should never get blocked */
[*] redisAssert((fakeClient->flags & REDIS_BLOCKED) == 0);
[*]
[*] /* Clean up. Command code may have changed argv/argc so we use the
[*] * argv/argc of the client instead of the local variables. */
[*] for (j = 0; j < fakeClient->argc; j++)
[*] decrRefCount(fakeClient->argv);
[*] zfree(fakeClient->argv);
[*] }
[*]
[*] /* This point can only be reached when EOF is reached without errors.
[*] * If the client is in the middle of a MULTI/EXEC, log error and quit. */
[*] if (fakeClient->flags & REDIS_MULTI) goto readerr;
[*]
[*] fclose(fp);
[*] freeFakeClient(fakeClient);
[*] server.aof_state = old_aof_state;
[*] stopLoading();
[*] aofUpdateCurrentSize(); //更新server.aof_current_size,AOF文件大小
[*] server.aof_rewrite_base_size = server.aof_current_size;
[*] return REDIS_OK;
[*] …………
[*]}
在前面一篇关于AOF参数配置的博客遗留了一个问题,server.aof_current_size参数的初始化,下面解决这个疑问。
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]void aofUpdateCurrentSize(void) {
[*] struct redis_stat sb;
[*]
[*] if (redis_fstat(server.aof_fd,&sb) == -1) {
[*] redisLog(REDIS_WARNING,"Unable to obtain the AOF file length. stat: %s",
[*] strerror(errno));
[*] } else {
[*] server.aof_current_size = sb.st_size;
[*] }
[*]}
redis_fstat是作者对Linux中fstat64函数的重命名,该还是就是获取文件相关的参数信息,具体可以Google之,sb.st_size就是当前AOF文件的大小。这里需要知道server.aof_fd即AOF文件描述符,该参数的初始化在initServer()函数中
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]/* Open the AOF file if needed. */
[*] if (server.aof_state == REDIS_AOF_ON) {
[*] server.aof_fd = open(server.aof_filename,O_WRONLY|O_APPEND|O_CREAT,0644);
[*] if (server.aof_fd == -1) {
[*] redisLog(REDIS_WARNING, "Can't open the append-only file: %s",strerror(errno));
[*] exit(1);
[*] }
[*] }
至此,Redis Server启动加载硬盘中AOF文件数据的操作就成功结束了。
Server数据库产生新数据如何持久化到硬盘
当客户端执行Set等修改数据库中字段的指令时就会造成Server数据库中数据被修改,这些修改的数据应该被实时更新到AOF文件中,并且也要按照一定的fsync机制刷新到硬盘中,保证数据不会丢失。
在上一篇博客中,提到了三种fsync方式:appendfsync always, appendfsync everysec, appendfsync no. 具体体现在server.aof_fsync参数中。
首先看当客户端请求的指令造成数据被修改,Redis是如何将修改数据的指令添加到server.aof_buf中的。
call() -> propagate() -> feedAppendOnlyFile(),call()函数判断执行指令后是否造成数据被修改。
feedAppendOnlyFile函数首先会判断Server是否开启了AOF,如果开启AOF,那么根据Redis通讯协议将修改数据的指令重现成请求的字符串,注意在超时设置的处理方式,接着将字符串append到server.aof_buf中即可。该函数最后两行代码需要注意,这才是重点,如果server.aof_child_pid != -1那么表明此时Server正在重写rewrite AOF文件,需要将被修改的数据追加到server.aof_rewrite_buf_blocks链表中,等待rewrite结束后,追加到AOF文件中。具体见下面代码的注释。
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]/* Propagate the specified command (in the context of the specified database id)
[*] * to AOF and Slaves.
[*] *
[*] * flags are an xor between:
[*] * + REDIS_PROPAGATE_NONE (no propagation of command at all)
[*] * + REDIS_PROPAGATE_AOF (propagate into the AOF file if is enabled)
[*] * + REDIS_PROPAGATE_REPL (propagate into the replication link)
[*] */
[*]void propagate(struct redisCommand *cmd, int dbid, robj **argv, int argc,
[*] int flags)
[*]{
[*] //将cmd指令变动的数据追加到AOF文件中
[*] if (server.aof_state != REDIS_AOF_OFF && flags & REDIS_PROPAGATE_AOF)
[*] feedAppendOnlyFile(cmd,dbid,argv,argc);
[*] if (flags & REDIS_PROPAGATE_REPL)
[*] replicationFeedSlaves(server.slaves,dbid,argv,argc);
[*]}
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]//cmd指令修改了数据,先将更新的数据写到server.aof_buf中
[*]void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int argc) {
[*] sds buf = sdsempty();
[*] robj *tmpargv;
[*]
[*] /* The DB this command was targeting is not the same as the last command
[*] * we appendend. To issue a SELECT command is needed. */
[*] // 当前 db 不是指定的 aof db,通过创建 SELECT 命令来切换数据库
[*] if (dictid != server.aof_selected_db) {
[*] char seldb;
[*]
[*] snprintf(seldb,sizeof(seldb),"%d",dictid);
[*] buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
[*] (unsigned long)strlen(seldb),seldb);
[*] server.aof_selected_db = dictid;
[*] }
[*]
[*] // 将 EXPIRE / PEXPIRE / EXPIREAT 命令翻译为 PEXPIREAT 命令
[*] if (cmd->proc == expireCommand || cmd->proc == pexpireCommand ||
[*] cmd->proc == expireatCommand) {
[*] /* Translate EXPIRE/PEXPIRE/EXPIREAT into PEXPIREAT */
[*] buf = catAppendOnlyExpireAtCommand(buf,cmd,argv,argv);
[*] }// 将 SETEX / PSETEX 命令翻译为 SET 和 PEXPIREAT 组合命令
[*] else if (cmd->proc == setexCommand || cmd->proc == psetexCommand) {
[*] /* Translate SETEX/PSETEX to SET and PEXPIREAT */
[*] tmpargv = createStringObject("SET",3);
[*] tmpargv = argv;
[*] tmpargv = argv;
[*] buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
[*] decrRefCount(tmpargv);
[*] buf = catAppendOnlyExpireAtCommand(buf,cmd,argv,argv);
[*] } else {//其他的指令直接追加
[*] /* All the other commands don't need translation or need the
[*] * same translation already operated in the command vector
[*] * for the replication itself. */
[*] buf = catAppendOnlyGenericCommand(buf,argc,argv);
[*] }
[*]
[*] /* Append to the AOF buffer. This will be flushed on disk just before
[*] * of re-entering the event loop, so before the client will get a
[*] * positive reply about the operation performed. */
[*] // 将 buf 追加到服务器的 aof_buf 末尾,在beforeSleep中写到AOF文件中,并且根据情况fsync刷新到硬盘
[*] if (server.aof_state == REDIS_AOF_ON)
[*] server.aof_buf = sdscatlen(server.aof_buf,buf,sdslen(buf));
[*]
[*] /* If a background append only file rewriting is in progress we want to
[*] * accumulate the differences between the child DB and the current one
[*] * in a buffer, so that when the child process will do its work we
[*] * can append the differences to the new append only file. */
[*] //如果server.aof_child_pid不为1,那就说明有快照进程正在写数据到临时文件(已经开始rewrite),
[*] //那么必须先将这段时间接收到的指令更新的数据先暂时存储起来,等到快照进程完成任务后,
[*] //将这部分数据写入到AOF文件末尾,保证数据不丢失
[*] //解释为什么需要aof_rewrite_buf_blocks,当server在进行rewrite时即读取所有数据库中的数据,
[*] //有些数据已经写到新的AOF文件,但是此时客户端执行指令又将该值修改了,因此造成了差异
[*] if (server.aof_child_pid != -1)
[*] aofRewriteBufferAppend((unsigned char*)buf,sdslen(buf));
[*] /*这里说一下server.aof_buf和server.aof_rewrite_buf_blocks的区别
[*] aof_buf是正常情况下aof文件打开的时候,会不断将这份数据写入到AOF文件中。
[*] aof_rewrite_buf_blocks 是如果用户主动触发了写AOF文件的命令时,比如 config set appendonly yes命令
[*] 那么redis会fork创建一个后台进程,也就是当时的数据快照,然后将数据写入到一个临时文件中去。
[*] 在此期间发送的命令,我们需要把它们记录起来,等后台进程完成AOF临时文件写后,serverCron定时任务
[*] 感知到这个退出动作,然后就会调用backgroundRewriteDoneHandler进而调用aofRewriteBufferWrite函数,
[*] 将aof_rewrite_buf_blocks上面的数据,也就是diff数据写入到临时AOF文件中,然后再unlink替换正常的AOF文件。
[*] 因此可以知道,aof_buf一般情况下比aof_rewrite_buf_blocks要少,
[*] 但开始的时候可能aof_buf包含一些后者不包含的前面部分数据。*/
[*]
[*] sdsfree(buf);
[*]}
Server在每次事件循环之前会调用一次beforeSleep函数,下面看看这个函数做了什么工作?
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]/* This function gets called every time Redis is entering the
[*] * main loop of the event driven library, that is, before to sleep
[*] * for ready file descriptors. */
[*]void beforeSleep(struct aeEventLoop *eventLoop) {
[*] REDIS_NOTUSED(eventLoop);
[*] listNode *ln;
[*] redisClient *c;
[*]
[*] /* Run a fast expire cycle (the called function will return
[*] * ASAP if a fast cycle is not needed). */
[*] if (server.active_expire_enabled && server.masterhost == NULL)
[*] activeExpireCycle(ACTIVE_EXPIRE_CYCLE_FAST);
[*]
[*] /* Try to process pending commands for clients that were just unblocked. */
[*] while (listLength(server.unblocked_clients)) {
[*] ln = listFirst(server.unblocked_clients);
[*] redisAssert(ln != NULL);
[*] c = ln->value;
[*] listDelNode(server.unblocked_clients,ln);
[*] c->flags &= ~REDIS_UNBLOCKED;
[*]
[*] /* Process remaining data in the input buffer. */
[*] //处理客户端在阻塞期间接收到的客户端发送的请求
[*] if (c->querybuf && sdslen(c->querybuf) > 0) {
[*] server.current_client = c;
[*] processInputBuffer(c);
[*] server.current_client = NULL;
[*] }
[*] }
[*]
[*] /* Write the AOF buffer on disk */
[*] //将server.aof_buf中的数据追加到AOF文件中并fsync到硬盘上
[*] flushAppendOnlyFile(0);
[*]}
通过上面的代码及注释可以发现,beforeSleep函数做了三件事:1、处理过期键,2、处理阻塞期间的客户端请求,3、将server.aof_buf中的数据追加到AOF文件中并fsync刷新到硬盘上,flushAppendOnlyFile函数给定了一个参数force,表示是否强制写入AOF文件,0表示非强制即支持延迟写,1表示强制写入。
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]void flushAppendOnlyFile(int force) {
[*] ssize_t nwritten;
[*] int sync_in_progress = 0;
[*] if (sdslen(server.aof_buf) == 0) return;
[*] // 返回后台正在等待执行的 fsync 数量
[*] if (server.aof_fsync == AOF_FSYNC_EVERYSEC)
[*] sync_in_progress = bioPendingJobsOfType(REDIS_BIO_AOF_FSYNC) != 0;
[*]
[*] // AOF 模式为每秒 fsync ,并且 force 不为 1 如果可以的话,推延冲洗
[*] if (server.aof_fsync == AOF_FSYNC_EVERYSEC && !force) {
[*] /* With this append fsync policy we do background fsyncing.
[*] * If the fsync is still in progress we can try to delay
[*] * the write for a couple of seconds. */
[*] // 如果 aof_fsync 队列里已经有正在等待的任务
[*] if (sync_in_progress) {
[*] // 上一次没有推迟冲洗过,记录推延的当前时间,然后返回
[*] if (server.aof_flush_postponed_start == 0) {
[*] /* No previous write postponinig, remember that we are
[*] * postponing the flush and return. */
[*] server.aof_flush_postponed_start = server.unixtime;
[*] return;
[*] } else if (server.unixtime - server.aof_flush_postponed_start < 2) {
[*] // 允许在两秒之内的推延冲洗
[*] /* We were already waiting for fsync to finish, but for less
[*] * than two seconds this is still ok. Postpone again. */
[*] return;
[*] }
[*] /* Otherwise fall trough, and go write since we can't wait
[*] * over two seconds. */
[*] server.aof_delayed_fsync++;
[*] redisLog(REDIS_NOTICE,"Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Redis.");
[*] }
[*] }
[*] /* If you are following this code path, then we are going to write so
[*] * set reset the postponed flush sentinel to zero. */
[*] server.aof_flush_postponed_start = 0;
[*]
[*] /* We want to perform a single write. This should be guaranteed atomic
[*] * at least if the filesystem we are writing is a real physical one.
[*] * While this will save us against the server being killed I don't think
[*] * there is much to do about the whole server stopping for power problems
[*] * or alike */
[*] // 将 AOF 缓存写入到文件,如果一切幸运的话,写入会原子性地完成
[*] nwritten = write(server.aof_fd,server.aof_buf,sdslen(server.aof_buf));
[*] if (nwritten != (signed)sdslen(server.aof_buf)) {//出错
[*] /* Ooops, we are in troubles. The best thing to do for now is
[*] * aborting instead of giving the illusion that everything is
[*] * working as expected. */
[*] if (nwritten == -1) {
[*] redisLog(REDIS_WARNING,"Exiting on error writing to the append-only file: %s",strerror(errno));
[*] } else {
[*] redisLog(REDIS_WARNING,"Exiting on short write while writing to "
[*] "the append-only file: %s (nwritten=%ld, "
[*] "expected=%ld)",
[*] strerror(errno),
[*] (long)nwritten,
[*] (long)sdslen(server.aof_buf));
[*]
[*] if (ftruncate(server.aof_fd, server.aof_current_size) == -1) {
[*] redisLog(REDIS_WARNING, "Could not remove short write "
[*] "from the append-only file.Redis may refuse "
[*] "to load the AOF the next time it starts."
[*] "ftruncate: %s", strerror(errno));
[*] }
[*] }
[*] exit(1);
[*] }
[*] server.aof_current_size += nwritten;
[*]
[*] /* Re-use AOF buffer when it is small enough. The maximum comes from the
[*] * arena size of 4k minus some overhead (but is otherwise arbitrary). */
[*] // 如果 aof 缓存不是太大,那么重用它,否则,清空 aof 缓存
[*] if ((sdslen(server.aof_buf)+sdsavail(server.aof_buf)) < 4000) {
[*] sdsclear(server.aof_buf);
[*] } else {
[*] sdsfree(server.aof_buf);
[*] server.aof_buf = sdsempty();
[*] }
[*]
[*] /* Don't fsync if no-appendfsync-on-rewrite is set to yes and there are
[*] * children doing I/O in the background. */
[*] //aof rdb子进程运行中不支持fsync并且aof rdb子进程正在运行,那么直接返回,
[*] //但是数据已经写到aof文件中,只是没有刷新到硬盘
[*] if (server.aof_no_fsync_on_rewrite &&
[*] (server.aof_child_pid != -1 || server.rdb_child_pid != -1))
[*] return;
[*]
[*] /* Perform the fsync if needed. */
[*] if (server.aof_fsync == AOF_FSYNC_ALWAYS) {//总是fsync,那么直接进行fsync
[*] /* aof_fsync is defined as fdatasync() for Linux in order to avoid
[*] * flushing metadata. */
[*] aof_fsync(server.aof_fd); /* Let's try to get this data on the disk */
[*] server.aof_last_fsync = server.unixtime;
[*] } else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
[*] server.unixtime > server.aof_last_fsync)) {
[*] if (!sync_in_progress) aof_background_fsync(server.aof_fd);//放到后台线程进行fsync
[*] server.aof_last_fsync = server.unixtime;
[*] }
[*]}
上述代码中请关注server.aof_fsync参数,即设置Redis fsync AOF文件到硬盘的策略,如果设置为AOF_FSYNC_ALWAYS,那么直接在主进程中fsync,如果设置为AOF_FSYNC_EVERYSEC,那么放入后台线程中fsync,后台线程的代码在bio.c中。
小结
文章写到这,已经解决的了Redis Server启动加载AOF文件和如何将客户端请求产生的新的数据追加到AOF文件中,对于追加数据到AOF文件中,根据fsync的配置策略如何将写入到AOF文件中的新数据刷新到硬盘中,直接在主进程中fsync或是在后台线程fsync。
至此,AOF数据持久化还剩下如何rewrite AOF,接受客户端发送的BGREWRITEAOF请求,此部分内容待下篇博客中解析。
感谢此篇博客给我在理解Redis AOF数据持久化方面的巨大帮助,http://chenzhenianqing.cn/articles/786.html
本人Redis-2.8.2的源码注释已经放到Github中,有需要的读者可以下载,我也会在后续的时间中更新,https://github.com/xkeyideal/annotated-redis-2.8.2
本人不怎么会使用Git,望有人能教我一下。
--------------------------------------------------------------------------------------------------------------------------------------------------------------
本文所引用的源码全部来自Redis2.8.2版本。
Redis AOF数据持久化机制的实现相关代码是redis.c, redis.h, aof.c, bio.c, rio.c, config.c
在阅读本文之前请先阅读Redis数据持久化机制AOF原理分析之配置详解文章,了解AOF相关参数的解析,文章链接
http://blog.iyunv.com/acceptedxukai/article/details/18135219
接着上一篇文章,本文将介绍Redis是如何实现AOF rewrite的。
转载请注明,文章出自http://blog.iyunv.com/acceptedxukai/article/details/18181563
AOF rewrite的触发机制
如果Redis只是将客户端修改数据库的指令重现存储在AOF文件中,那么AOF文件的大小会不断的增加,因为AOF文件只是简单的重现存储了客户端的指令,而并没有进行合并。对于该问题最简单的处理方式,即当AOF文件满足一定条件时就对AOF进行rewrite,rewrite是根据当前内存数据库中的数据进行遍历写到一个临时的AOF文件,待写完后替换掉原来的AOF文件即可。
Redis触发AOF rewrite机制有三种:
1、Redis Server接收到客户端发送的BGREWRITEAOF指令请求,如果当前AOF/RDB数据持久化没有在执行,那么执行,反之,等当前AOF/RDB数据持久化结束后执行AOF rewrite
2、在Redis配置文件redis.conf中,用户设置了auto-aof-rewrite-percentage和auto-aof-rewrite-min-size参数,并且当前AOF文件大小server.aof_current_size大于auto-aof-rewrite-min-size(server.aof_rewrite_min_size),同时AOF文件大小的增长率大于auto-aof-rewrite-percentage(server.aof_rewrite_perc)时,会自动触发AOF rewrite
3、用户设置“config set appendonly yes”开启AOF的时,调用startAppendOnly函数会触发rewrite
下面分别介绍上述三种机制的处理.
接收到BGREWRITEAOF指令
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]void bgrewriteaofCommand(redisClient *c) {
[*] //AOF rewrite正在执行,那么直接返回
[*] if (server.aof_child_pid != -1) {
[*] addReplyError(c,"Background append only file rewriting already in progress");
[*] } else if (server.rdb_child_pid != -1) {
[*] //AOF rewrite未执行,但RDB数据持久化正在执行,那么设置AOF rewrite状态为scheduled
[*] //待RDB结束后执行AOF rewrite
[*] server.aof_rewrite_scheduled = 1;
[*] addReplyStatus(c,"Background append only file rewriting scheduled");
[*] } else if (rewriteAppendOnlyFileBackground() == REDIS_OK) {
[*] //直接执行AOF rewrite
[*] addReplyStatus(c,"Background append only file rewriting started");
[*] } else {
[*] addReply(c,shared.err);
[*] }
[*]}
当AOF rewrite请求被挂起时,在serverCron函数中,会处理。
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]/* Start a scheduled AOF rewrite if this was requested by the user while
[*] * a BGSAVE was in progress. */
[*] // 如果用户执行 BGREWRITEAOF 命令的话,在后台开始 AOF 重写
[*] //当用户执行BGREWRITEAOF命令时,如果RDB文件正在写,那么将server.aof_rewrite_scheduled标记为1
[*] //当RDB文件写完后开启AOF rewrite
[*] if (server.rdb_child_pid == -1 && server.aof_child_pid == -1 &&
[*] server.aof_rewrite_scheduled)
[*] {
[*] rewriteAppendOnlyFileBackground();
[*] }
Server自动对AOF进行rewrite
在serverCron函数中会周期性判断
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]/* Trigger an AOF rewrite if needed */
[*] //满足一定条件rewrite AOF文件
[*] if (server.rdb_child_pid == -1 &&
[*] server.aof_child_pid == -1 &&
[*] server.aof_rewrite_perc &&
[*] server.aof_current_size > server.aof_rewrite_min_size)
[*] {
[*] long long base = server.aof_rewrite_base_size ?
[*] server.aof_rewrite_base_size : 1;
[*] long long growth = (server.aof_current_size*100/base) - 100;
[*] if (growth >= server.aof_rewrite_perc) {
[*] redisLog(REDIS_NOTICE,"Starting automatic rewriting of AOF on %lld%% growth",growth);
[*] rewriteAppendOnlyFileBackground();
[*] }
[*] }
config set appendonly yes
当客户端发送该指令时,config.c中的configSetCommand函数会做出响应,startAppendOnly函数会执行AOF rewrite
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]if (!strcasecmp(c->argv->ptr,"appendonly")) {
[*] int enable = yesnotoi(o->ptr);
[*]
[*] if (enable == -1) goto badfmt;
[*] if (enable == 0 && server.aof_state != REDIS_AOF_OFF) {//appendonly no 关闭AOF
[*] stopAppendOnly();
[*] } else if (enable && server.aof_state == REDIS_AOF_OFF) {//appendonly yes rewrite AOF
[*] if (startAppendOnly() == REDIS_ERR) {
[*] addReplyError(c,
[*] "Unable to turn on AOF. Check server logs.");
[*] return;
[*] }
[*] }
[*]}
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]int startAppendOnly(void) {
[*] server.aof_last_fsync = server.unixtime;
[*] server.aof_fd = open(server.aof_filename,O_WRONLY|O_APPEND|O_CREAT,0644);
[*] redisAssert(server.aof_state == REDIS_AOF_OFF);
[*] if (server.aof_fd == -1) {
[*] redisLog(REDIS_WARNING,"Redis needs to enable the AOF but can't open the append only file: %s",strerror(errno));
[*] return REDIS_ERR;
[*] }
[*] if (rewriteAppendOnlyFileBackground() == REDIS_ERR) {//rewrite
[*] close(server.aof_fd);
[*] redisLog(REDIS_WARNING,"Redis needs to enable the AOF but can't trigger a background AOF rewrite operation. Check the above logs for more info about the error.");
[*] return REDIS_ERR;
[*] }
[*] /* We correctly switched on AOF, now wait for the rerwite to be complete
[*] * in order to append data on disk. */
[*] server.aof_state = REDIS_AOF_WAIT_REWRITE;
[*] return REDIS_OK;
[*]}
Redis AOF rewrite机制的实现
从上述分析可以看出rewrite的实现全部依靠rewriteAppendOnlyFileBackground函数,下面分析该函数,通过下面的代码可以看出,Redis是fork出一个子进程来操作AOF rewrite,然后子进程调用rewriteAppendOnlyFile函数,将数据写到一个临时文件temp-rewriteaof-bg-%d.aof中。如果子进程完成会通过exit(0)函数通知父进程rewrite结束,在serverCron函数中使用wait3函数接收子进程退出状态,然后执行后续的AOF rewrite的收尾工作,后面将会分析。
父进程的工作主要包括清楚server.aof_rewrite_scheduled标志,记录子进程IDserver.aof_child_pid = childpid,记录rewrite的开始时间server.aof_rewrite_time_start = time(NULL)等。
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]int rewriteAppendOnlyFileBackground(void) {
[*] pid_t childpid;
[*] long long start;
[*]
[*] // 后台重写正在执行
[*] if (server.aof_child_pid != -1) return REDIS_ERR;
[*] start = ustime();
[*] if ((childpid = fork()) == 0) {
[*] char tmpfile;
[*]
[*] /* Child */
[*] closeListeningSockets(0);//
[*] redisSetProcTitle("redis-aof-rewrite");
[*] snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof", (int) getpid());
[*] if (rewriteAppendOnlyFile(tmpfile) == REDIS_OK) {
[*] size_t private_dirty = zmalloc_get_private_dirty();
[*]
[*] if (private_dirty) {
[*] redisLog(REDIS_NOTICE,
[*] "AOF rewrite: %zu MB of memory used by copy-on-write",
[*] private_dirty/(1024*1024));
[*] }
[*] exitFromChild(0);
[*] } else {
[*] exitFromChild(1);
[*] }
[*] } else {
[*] /* Parent */
[*] server.stat_fork_time = ustime()-start;
[*] if (childpid == -1) {
[*] redisLog(REDIS_WARNING,
[*] "Can't rewrite append only file in background: fork: %s",
[*] strerror(errno));
[*] return REDIS_ERR;
[*] }
[*] redisLog(REDIS_NOTICE,
[*] "Background append only file rewriting started by pid %d",childpid);
[*] server.aof_rewrite_scheduled = 0;
[*] server.aof_rewrite_time_start = time(NULL);
[*] server.aof_child_pid = childpid;
[*] updateDictResizePolicy();
[*] /* We set appendseldb to -1 in order to force the next call to the
[*] * feedAppendOnlyFile() to issue a SELECT command, so the differences
[*] * accumulated by the parent into server.aof_rewrite_buf will start
[*] * with a SELECT statement and it will be safe to merge. */
[*] server.aof_selected_db = -1;
[*] replicationScriptCacheFlush();
[*] return REDIS_OK;
[*] }
[*] return REDIS_OK; /* unreached */
[*]}
接下来介绍rewriteAppendOnlyFile函数,该函数的主要工作为:遍历所有数据库中的数据,将其写入到临时文件temp-rewriteaof-%d.aof中,写入函数定义在rio.c中,比较简单,然后将数据刷新到硬盘中,然后将文件名rename为其调用者给定的临时文件名,注意仔细看代码,这里并没有修改为正式的AOF文件名。
在写入文件时如果设置server.aof_rewrite_incremental_fsync参数,那么在rioWrite函数中fwrite部分数据就会将数据fsync到硬盘中,来保证数据的正确性。
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]int rewriteAppendOnlyFile(char *filename) {
[*] dictIterator *di = NULL;
[*] dictEntry *de;
[*] rio aof;
[*] FILE *fp;
[*] char tmpfile;
[*] int j;
[*] long long now = mstime();
[*]
[*] /* Note that we have to use a different temp name here compared to the
[*] * one used by rewriteAppendOnlyFileBackground() function. */
[*] snprintf(tmpfile,256,"temp-rewriteaof-%d.aof", (int) getpid());
[*] fp = fopen(tmpfile,"w");
[*] if (!fp) {
[*] redisLog(REDIS_WARNING, "Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s", strerror(errno));
[*] return REDIS_ERR;
[*] }
[*]
[*] rioInitWithFile(&aof,fp); //初始化读写函数,rio.c
[*] //设置r->io.file.autosync = bytes;每32M刷新一次
[*] if (server.aof_rewrite_incremental_fsync)
[*] rioSetAutoSync(&aof,REDIS_AOF_AUTOSYNC_BYTES);
[*] for (j = 0; j < server.dbnum; j++) {//遍历每个数据库
[*] char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
[*] redisDb *db = server.db+j;
[*] dict *d = db->dict;
[*] if (dictSize(d) == 0) continue;
[*] di = dictGetSafeIterator(d);
[*] if (!di) {
[*] fclose(fp);
[*] return REDIS_ERR;
[*] }
[*]
[*] /* SELECT the new DB */
[*] if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
[*] if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
[*]
[*] /* Iterate this DB writing every entry */
[*] while((de = dictNext(di)) != NULL) {
[*] sds keystr;
[*] robj key, *o;
[*] long long expiretime;
[*]
[*] keystr = dictGetKey(de);
[*] o = dictGetVal(de);
[*] initStaticStringObject(key,keystr);
[*]
[*] expiretime = getExpire(db,&key);
[*]
[*] /* If this key is already expired skip it */
[*] if (expiretime != -1 && expiretime < now) continue;
[*]
[*] /* Save the key and associated value */
[*] if (o->type == REDIS_STRING) {
[*] /* Emit a SET command */
[*] char cmd[]="*3\r\n$3\r\nSET\r\n";
[*] if (rioWrite(&aof,cmd,sizeof(cmd)-1) == 0) goto werr;
[*] /* Key and value */
[*] if (rioWriteBulkObject(&aof,&key) == 0) goto werr;
[*] if (rioWriteBulkObject(&aof,o) == 0) goto werr;
[*] } else if (o->type == REDIS_LIST) {
[*] if (rewriteListObject(&aof,&key,o) == 0) goto werr;
[*] } else if (o->type == REDIS_SET) {
[*] if (rewriteSetObject(&aof,&key,o) == 0) goto werr;
[*] } else if (o->type == REDIS_ZSET) {
[*] if (rewriteSortedSetObject(&aof,&key,o) == 0) goto werr;
[*] } else if (o->type == REDIS_HASH) {
[*] if (rewriteHashObject(&aof,&key,o) == 0) goto werr;
[*] } else {
[*] redisPanic("Unknown object type");
[*] }
[*] /* Save the expire time */
[*] if (expiretime != -1) {
[*] char cmd[]="*3\r\n$9\r\nPEXPIREAT\r\n";
[*] if (rioWrite(&aof,cmd,sizeof(cmd)-1) == 0) goto werr;
[*] if (rioWriteBulkObject(&aof,&key) == 0) goto werr;
[*] if (rioWriteBulkLongLong(&aof,expiretime) == 0) goto werr;
[*] }
[*] }
[*] dictReleaseIterator(di);
[*] }
[*]
[*] /* Make sure data will not remain on the OS's output buffers */
[*] fflush(fp);
[*] aof_fsync(fileno(fp));//将tempfile文件刷新到硬盘
[*] fclose(fp);
[*]
[*] /* Use RENAME to make sure the DB file is changed atomically only
[*] * if the generate DB file is ok. */
[*] if (rename(tmpfile,filename) == -1) {//重命名文件名,注意rename后的文件也是一个临时文件
[*] redisLog(REDIS_WARNING,"Error moving temp append only file on the final destination: %s", strerror(errno));
[*] unlink(tmpfile);
[*] return REDIS_ERR;
[*] }
[*] redisLog(REDIS_NOTICE,"SYNC append only file rewrite performed");
[*] return REDIS_OK;
[*]
[*]werr:
[*] fclose(fp);
[*] unlink(tmpfile);
[*] redisLog(REDIS_WARNING,"Write error writing append only file on disk: %s", strerror(errno));
[*] if (di) dictReleaseIterator(di);
[*] return REDIS_ERR;
[*]}
AOF rewrite工作到这里已经结束一半,上一篇文章提到如果server.aof_state != REDIS_AOF_OFF,那么就会将客户端请求指令修改的数据通过feedAppendOnlyFile函数追加到AOF文件中,那么此时AOF已经rewrite了,必须要处理此时出现的差异数据,记得在feedAppendOnlyFile函数中有这么一段代码
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]if (server.aof_child_pid != -1)
[*] aofRewriteBufferAppend((unsigned char*)buf,sdslen(buf));
如果AOF rewrite正在进行,那么就将修改数据的指令字符串存储到server.aof_rewrite_buf_blocks链表中,等待AOF rewrite子进程结束后处理,处理此部分数据的代码在serverCron函数中。需要指出的是wait3函数我不了解,可能下面注释会有点问题。
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]/* Check if a background saving or AOF rewrite in progress terminated. */
[*]//如果RDB bgsave或AOF rewrite子进程已经执行,通过获取子进程的退出状态,对后续的工作进行处理
[*]if (server.rdb_child_pid != -1 || server.aof_child_pid != -1) {//
[*] int statloc;
[*] pid_t pid;
[*]
[*] if ((pid = wait3(&statloc,WNOHANG,NULL)) != 0) {
[*] int exitcode = WEXITSTATUS(statloc);//获取退出的状态
[*] int bysignal = 0;
[*]
[*] if (WIFSIGNALED(statloc)) bysignal = WTERMSIG(statloc);
[*]
[*] if (pid == server.rdb_child_pid) {
[*] backgroundSaveDoneHandler(exitcode,bysignal);
[*] } else if (pid == server.aof_child_pid) {
[*] backgroundRewriteDoneHandler(exitcode,bysignal);
[*] } else {
[*] redisLog(REDIS_WARNING,
[*] "Warning, detected child with unmatched pid: %ld",
[*] (long)pid);
[*] }
[*] // 如果 BGSAVE 和 BGREWRITEAOF 都已经完成,那么重新开始 REHASH
[*] updateDictResizePolicy();
[*] }
[*]}
对于AOF rewrite期间出现的差异数据,Server通过backgroundSaveDoneHandler函数将server.aof_rewrite_buf_blocks链表中数据追加到新的AOF文件中。
backgroundSaveDoneHandler函数执行步骤:
1、通过判断子进程的退出状态,正确的退出状态为exit(0),即exitcode为0,bysignal我不清楚具体意义,如果退出状态正确,backgroundSaveDoneHandler函数才会开始处理
2、通过对rewriteAppendOnlyFileBackground函数的分析,可以知道rewrite后的AOF临时文件名为temp-rewriteaof-bg-%d.aof(%d=server.aof_child_pid)中,接着需要打开此临时文件
3、调用aofRewriteBufferWrite函数将server.aof_rewrite_buf_blocks中差异数据写到该临时文件中
4、如果旧的AOF文件未打开,那么打开旧的AOF文件,将文件描述符赋值给临时变量oldfd
5、将临时的AOF文件名rename为正常的AOF文件名
6、如果旧的AOF文件未打开,那么此时只需要关闭新的AOF文件,此时的server.aof_rewrite_buf_blocks数据应该为空;如果旧的AOF是打开的,那么将server.aof_fd指向newfd,然后根据相应的fsync策略将数据刷新到硬盘上
7、调用aofUpdateCurrentSize函数统计AOF文件的大小,更新server.aof_rewrite_base_size,为serverCron中自动AOF rewrite做相应判断
8、如果之前是REDIS_AOF_WAIT_REWRITE状态,则设置server.aof_state为REDIS_AOF_ON,因为只有“config set appendonly yes”指令才会设置这个状态,也就是需要写完快照后,立即打开AOF;而BGREWRITEAOF不需要打开AOF
9、调用后台线程去关闭旧的AOF文件
下面是backgroundSaveDoneHandler函数的注释代码
view plaincopyprint?http://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/CODE_ico.pnghttp://onexin.iyunv.com/source/plugin/onexin_bigdata/https://code.iyunv.com/assets/ico_fork.svg
[*]/* A background append only file rewriting (BGREWRITEAOF) terminated its work.
[*] * Handle this. */
[*]void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
[*] if (!bysignal && exitcode == 0) {//子进程退出状态正确
[*] int newfd, oldfd;
[*] char tmpfile;
[*] long long now = ustime();
[*]
[*] redisLog(REDIS_NOTICE,
[*] "Background AOF rewrite terminated with success");
[*]
[*] /* Flush the differences accumulated by the parent to the
[*] * rewritten AOF. */
[*] snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof",
[*] (int)server.aof_child_pid);
[*] newfd = open(tmpfile,O_WRONLY|O_APPEND);
[*] if (newfd == -1) {
[*] redisLog(REDIS_WARNING,
[*] "Unable to open the temporary AOF produced by the child: %s", strerror(errno));
[*] goto cleanup;
[*] }
[*] //处理server.aof_rewrite_buf_blocks中DIFF数据
[*] if (aofRewriteBufferWrite(newfd) == -1) {
[*] redisLog(REDIS_WARNING,
[*] "Error trying to flush the parent diff to the rewritten AOF: %s", strerror(errno));
[*] close(newfd);
[*] goto cleanup;
[*] }
[*]
[*] redisLog(REDIS_NOTICE,
[*] "Parent diff successfully flushed to the rewritten AOF (%lu bytes)", aofRewriteBufferSize());
[*]
[*] /* The only remaining thing to do is to rename the temporary file to
[*] * the configured file and switch the file descriptor used to do AOF
[*] * writes. We don't want close(2) or rename(2) calls to block the
[*] * server on old file deletion.
[*] *
[*] * There are two possible scenarios:
[*] *
[*] * 1) AOF is DISABLED and this was a one time rewrite. The temporary
[*] * file will be renamed to the configured file. When this file already
[*] * exists, it will be unlinked, which may block the server.
[*] *
[*] * 2) AOF is ENABLED and the rewritten AOF will immediately start
[*] * receiving writes. After the temporary file is renamed to the
[*] * configured file, the original AOF file descriptor will be closed.
[*] * Since this will be the last reference to that file, closing it
[*] * causes the underlying file to be unlinked, which may block the
[*] * server.
[*] *
[*] * To mitigate the blocking effect of the unlink operation (either
[*] * caused by rename(2) in scenario 1, or by close(2) in scenario 2), we
[*] * use a background thread to take care of this. First, we
[*] * make scenario 1 identical to scenario 2 by opening the target file
[*] * when it exists. The unlink operation after the rename(2) will then
[*] * be executed upon calling close(2) for its descriptor. Everything to
[*] * guarantee atomicity for this switch has already happened by then, so
[*] * we don't care what the outcome or duration of that close operation
[*] * is, as long as the file descriptor is released again. */
[*] if (server.aof_fd == -1) {
[*] /* AOF disabled */
[*]
[*] /* Don't care if this fails: oldfd will be -1 and we handle that.
[*] * One notable case of -1 return is if the old file does
[*] * not exist. */
[*] oldfd = open(server.aof_filename,O_RDONLY|O_NONBLOCK);
[*] } else {
[*] /* AOF enabled */
[*] oldfd = -1; /* We'll set this to the current AOF filedes later. */
[*] }
[*]
[*] /* Rename the temporary file. This will not unlink the target file if
[*] * it exists, because we reference it with "oldfd". */
[*] //把临时文件改名为正常的AOF文件名。由于当前oldfd已经指向这个之前的正常文件名的文件,
[*] //所以当前不会造成unlink操作,得等那个oldfd被close的时候,内核判断该文件没有指向了,就删除之。
[*] if (rename(tmpfile,server.aof_filename) == -1) {
[*] redisLog(REDIS_WARNING,
[*] "Error trying to rename the temporary AOF file: %s", strerror(errno));
[*] close(newfd);
[*] if (oldfd != -1) close(oldfd);
[*] goto cleanup;
[*] }
[*] //如果AOF关闭了,那只要处理新文件,直接关闭这个新的文件即可
[*] //但是这里会不会导致服务器卡呢?这个newfd应该是临时文件的最后一个fd了,不会的,
[*] //因为这个文件在本函数不会写入数据,因为stopAppendOnly函数会清空aof_rewrite_buf_blocks列表。
[*] if (server.aof_fd == -1) {
[*] /* AOF disabled, we don't need to set the AOF file descriptor
[*] * to this new file, so we can close it. */
[*] close(newfd);
[*] } else {
[*] /* AOF enabled, replace the old fd with the new one. */
[*] oldfd = server.aof_fd;
[*] //指向新的fd,此时这个fd由于上面的rename语句存在,已经为正常aof文件名
[*] server.aof_fd = newfd;
[*] //fsync到硬盘
[*] if (server.aof_fsync == AOF_FSYNC_ALWAYS)
[*] aof_fsync(newfd);
[*] else if (server.aof_fsync == AOF_FSYNC_EVERYSEC)
[*] aof_background_fsync(newfd);
[*] server.aof_selected_db = -1; /* Make sure SELECT is re-issued */
[*] aofUpdateCurrentSize();
[*] server.aof_rewrite_base_size = server.aof_current_size;
[*]
[*] /* Clear regular AOF buffer since its contents was just written to
[*] * the new AOF from the background rewrite buffer. */
[*] //rewrite得到的肯定是最新的数据,所以aof_buf中的数据没有意义,直接清空
[*] sdsfree(server.aof_buf);
[*] server.aof_buf = sdsempty();
[*] }
[*]
[*] server.aof_lastbgrewrite_status = REDIS_OK;
[*]
[*] redisLog(REDIS_NOTICE, "Background AOF rewrite finished successfully");
[*] /* Change state from WAIT_REWRITE to ON if needed */
[*] //下面判断是否需要打开AOF,比如bgrewriteaofCommand就不需要打开AOF。
[*] if (server.aof_state == REDIS_AOF_WAIT_REWRITE)
[*] server.aof_state = REDIS_AOF_ON;
[*]
[*] /* Asynchronously close the overwritten AOF. */
[*] //让后台线程去关闭这个旧的AOF文件FD,只要CLOSE就行,会自动unlink的,因为上面已经有rename
[*] if (oldfd != -1) bioCreateBackgroundJob(REDIS_BIO_CLOSE_FILE,(void*)(long)oldfd,NULL,NULL);
[*]
[*] redisLog(REDIS_VERBOSE,
[*] "Background AOF rewrite signal handler took %lldus", ustime()-now);
[*] } else if (!bysignal && exitcode != 0) {
[*] server.aof_lastbgrewrite_status = REDIS_ERR;
[*]
[*] redisLog(REDIS_WARNING,
[*] "Background AOF rewrite terminated with error");
[*] } else {
[*] server.aof_lastbgrewrite_status = REDIS_ERR;
[*]
[*] redisLog(REDIS_WARNING,
[*] "Background AOF rewrite terminated by signal %d", bysignal);
[*] }
[*]
[*]cleanup:
[*] aofRewriteBufferReset();
[*] aofRemoveTempFile(server.aof_child_pid);
[*] server.aof_child_pid = -1;
[*] server.aof_rewrite_time_last = time(NULL)-server.aof_rewrite_time_start;
[*] server.aof_rewrite_time_start = -1;
[*] /* Schedule a new rewrite if we are waiting for it to switch the AOF ON. */
[*] if (server.aof_state == REDIS_AOF_WAIT_REWRITE)
[*] server.aof_rewrite_scheduled = 1;
[*]}
至此,AOF数据持久化已经全部结束了,剩下的就是一些细节的处理,以及一些Linux库函数的理解,对于rename、unlink、wait3等库函数的深入认识就去问Google吧。
小结
Redis AOF数据持久化的实现机制通过三篇文章基本上比较详细的分析了,但这只是从代码层面去看AOF,对于AOF持久化的优缺点网上有很多分析,Redis的官方网站也有英文介绍,Redis的数据持久化还有一种方法叫RDB,更多RDB的内容等下次再分析。
感谢此篇博客给我在理解Redis AOF数据持久化方面的巨大帮助,http://chenzhenianqing.cn/articles/786.html,此篇博客对AOF的分析十分的详细。
页:
[1]