先看用例源码:
#include <stdio.h>
#include <uv.h>
uv_loop_t *loop;
uv_fs_t stdin_watcher;
uv_idle_t idler;
char buffer[1024];
void crunch_away(uv_idle_t* handle) {
// Compute extra-terrestrial life
// fold proteins
// computer another digit of PI
// or simila
fprintf(stderr, "Computing PI...\n");
// just to avoid overwhelming your terminal emulato
uv_idle_stop(handle);
}
void on_type(uv_fs_t *req) {
if (stdin_watcher.result > 0) {
buffer[stdin_watcher.result] = '\0';
printf("Typed %s\n", buffer);
uv_buf_t buf = uv_buf_init(buffer, 1024);
uv_fs_read(loop, &stdin_watcher, 0, &buf, 1, -1, on_type);
uv_idle_start(&idler, crunch_away);
}
else if (stdin_watcher.result < 0) {
fprintf(stderr, "error opening file: %s\n", uv_strerror(req->result));
}
}
int main() {
loop = uv_default_loop();
uv_idle_init(loop, &idler);
uv_buf_t buf = uv_buf_init(buffer, 1024);
uv_fs_read(loop, &stdin_watcher, 0, &buf, 1, -1, on_type);
uv_idle_start(&idler, crunch_away);
return uv_run(loop, UV_RUN_DEFAULT);
}
做的事情很简单:
1. 初始化loop
2. 初始化一个idle
3. 初始化读缓冲区
4. 初始化一个fs_read类型的ref请求资源 绑定对应的fd和cb
5. start上面的ide
6. run loop 等待fd可读 执行对应它上面的cb函数
typedef struct uv_buf_t {
char* base;
size_t len;
} uv_buf_t;
uv_buf_t uv_buf_init(char* base, unsigned int len) {
uv_buf_t buf;
buf.base = base;
buf.len = len;
return buf;
}
初始化一个读缓冲区 绑定到已经申请的一块内存区域
#define UV_REQ_FIELDS \
/* public */ \
void* data; \
/* read-only */ \
uv_req_type type; \
/* private */ \
void* reserved[6]; \
UV_REQ_PRIVATE_FIELDS \
/* Abstract base class of all requests. */
struct uv_req_s {
UV_REQ_FIELDS
};
#define UV_REQ_PRIVATE_FIELDS /* empty */
#define UV_FS_PRIVATE_FIELDS \
const char *new_path; \
uv_file file; \
int flags; \
mode_t mode; \
unsigned int nbufs; \
uv_buf_t* bufs; \
off_t off; \
uv_uid_t uid; \
uv_gid_t gid; \
double atime; \
double mtime; \
struct uv__work work_req; \
uv_buf_t bufsml[4]; \
struct uv__work {
void (*work)(struct uv__work *w);
void (*done)(struct uv__work *w, int status);
struct uv_loop_s* loop;
void* wq[2];
};
struct uv_fs_s {
UV_REQ_FIELDS
uv_fs_type fs_type;
uv_loop_t* loop;
uv_fs_cb cb;
ssize_t result;
void* ptr;
const char* path;
uv_stat_t statbuf; /* Stores the result of uv_fs_stat() and uv_fs_fstat(). */
UV_FS_PRIVATE_FIELDS
};
大家可以先看下 uv_fs_t 所具有的一些字段 有几个需要重点关注:
类型:type
回调:cb
结果:result
work队列中的work类型: work_req
#define INIT(subtype) \
do { \
if (req == NULL) \
return UV_EINVAL; \
UV_REQ_INIT(req, UV_FS); \
req->fs_type = UV_FS_ ## subtype; \
req->result = 0; \
req->ptr = NULL; \
req->loop = loop; \
req->path = NULL; \
req->new_path = NULL; \
req->bufs = NULL; \
req->cb = cb; \
} \
while (0)
int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
uv_file file,
const uv_buf_t bufs[],
unsigned int nbufs,
int64_t off,
uv_fs_cb cb) {
INIT(READ);
if (bufs == NULL || nbufs == 0)
return UV_EINVAL;
req->file = file;
req->nbufs = nbufs;
req->bufs = req->bufsml;
if (nbufs > ARRAY_SIZE(req->bufsml))
req->bufs = uv__malloc(nbufs * sizeof(*bufs));
if (req->bufs == NULL)
return UV_ENOMEM;
memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
req->off = off;
POST;
}
uv_fs_read 做的事情主要如下:
INIT绑定对应cb和设置ref的类型,拷贝缓冲区内容, POST提交工作任务
主要分析POST:
#define POST \
do { \
if (cb != NULL) { \
uv__req_register(loop, req); \
uv__work_submit(loop, \
&req->work_req, \
UV__WORK_FAST_IO, \
uv__fs_work, \
uv__fs_done); \
return 0; \
} \
else { \
uv__fs_work(&req->work_req); \
return req->result; \
} \
} \
while (0)
先看没有cb 的时候 就把它当做一个不需要异步回调的情况 直接主线程执行就可以了
static void uv__fs_work(struct uv__work* w) {
int retry_on_eintr;
uv_fs_t* req;
ssize_t r;
req = container_of(w, uv_fs_t, work_req);
retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
req->fs_type == UV_FS_READ);
do {
errno = 0;
#define X(type, action) \
case UV_FS_ ## type: \
r = action; \
break;
switch (req->fs_type) {
X(ACCESS, access(req->path, req->flags));
X(CHMOD, chmod(req->path, req->mode));
X(CHOWN, chown(req->path, req->uid, req->gid));
X(CLOSE, uv__fs_close(req->file));
X(COPYFILE, uv__fs_copyfile(req));
X(FCHMOD, fchmod(req->file, req->mode));
X(FCHOWN, fchown(req->file, req->uid, req->gid));
X(LCHOWN, lchown(req->path, req->uid, req->gid));
X(FDATASYNC, uv__fs_fdatasync(req));
X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
X(FSYNC, uv__fs_fsync(req));
X(FTRUNCATE, ftruncate(req->file, req->off));
X(FUTIME, uv__fs_futime(req));
X(LUTIME, uv__fs_lutime(req));
X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
X(LINK, link(req->path, req->new_path));
X(MKDIR, mkdir(req->path, req->mode));
X(MKDTEMP, uv__fs_mkdtemp(req));
X(MKSTEMP, uv__fs_mkstemp(req));
X(OPEN, uv__fs_open(req));
X(READ, uv__fs_read(req));
X(SCANDIR, uv__fs_scandir(req));
X(OPENDIR, uv__fs_opendir(req));
X(READDIR, uv__fs_readdir(req));
X(CLOSEDIR, uv__fs_closedir(req));
X(READLINK, uv__fs_readlink(req));
X(REALPATH, uv__fs_realpath(req));
X(RENAME, rename(req->path, req->new_path));
X(RMDIR, rmdir(req->path));
X(SENDFILE, uv__fs_sendfile(req));
X(STAT, uv__fs_stat(req->path, &req->statbuf));
X(STATFS, uv__fs_statfs(req));
X(SYMLINK, symlink(req->path, req->new_path));
X(UNLINK, unlink(req->path));
X(UTIME, uv__fs_utime(req));
X(WRITE, uv__fs_write_all(req));
default: abort();
}
#undef X
} while (r == -1 && errno == EINTR && retry_on_eintr);
if (r == -1)
req->result = UV__ERR(errno);
else
req->result = r;
if (r == 0 && (req->fs_type == UV_FS_STAT ||
req->fs_type == UV_FS_FSTAT ||
req->fs_type == UV_FS_LSTAT)) {
req->ptr = &req->statbuf;
}
}
static ssize_t uv__fs_read(uv_fs_t* req) {
#if defined(__linux__)
static int no_preadv;
#endif
unsigned int iovmax;
ssize_t result;
iovmax = uv__getiovmax();
if (req->nbufs > iovmax)
req->nbufs = iovmax;
if (req->off < 0) {
if (req->nbufs == 1)
result = read(req->file, req->bufs[0].base, req->bufs[0].len);
else
result = readv(req->file, (struct iovec*) req->bufs, req->nbufs);
} else {
if (req->nbufs == 1) {
result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
goto done;
}
#if HAVE_PREADV
result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
#else
# if defined(__linux__)
if (uv__load_relaxed(&no_preadv)) retry:
# endif
{
result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
}
# if defined(__linux__)
else {
result = uv__preadv(req->file,
(struct iovec*)req->bufs,
req->nbufs,
req->off);
if (result == -1 && errno == ENOSYS) {
uv__store_relaxed(&no_preadv, 1);
goto retry;
}
}
# endif
#endif
}
done:
/* Early cleanup of bufs allocation, since we're done with it. */
if (req->bufs != req->bufsml)
uv__free(req->bufs);
req->bufs = NULL;
req->nbufs = 0;
#ifdef __PASE__
/* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
if (result == -1 && errno == EOPNOTSUPP) {
struct stat buf;
ssize_t rc;
rc = fstat(req->file, &buf);
if (rc == 0 && S_ISDIR(buf.st_mode)) {
errno = EISDIR;
}
}
#endif
return result;
}
工作函数 uv__fs_work 做的事情很简答 根据要做的操作类型做对应的操作函数调用,uv__fs_read 根据系统类型采取合适的读取函数,可以简单的视为调用read从fd读数据即可;
然后读取的结果字节数放到result字段中返回
#define uv__req_register(loop, req) \
do { \
(loop)->active_reqs.count++; \
} \
while (0)
void uv__work_submit(uv_loop_t* loop,
struct uv__work* w,
enum uv__work_kind kind,
void (*work)(struct uv__work* w),
void (*done)(struct uv__work* w, int status)) {
uv_once(&once, init_once);
w->loop = loop;
w->work = work;
w->done = done;
post(&w->wq, kind);
}
在有cb的情况下说明需要把这个任务放到 待处理工作队列中 由线程池中的工作线程处理 处理后通知主线程的loop 调用done函数执行用户回调
static void init_once(void) {
#ifndef _WIN32
/* Re-initialize the threadpool after fork.
* Note that this discards the global mutex and condition as well
* as the work queue.
*/
if (pthread_atfork(NULL, NULL, &reset_once))
abort();
#endif
init_threads();
}
static void init_threads(void) {
unsigned int i;
const char* val;
uv_sem_t sem;
nthreads = ARRAY_SIZE(default_threads);
val = getenv("UV_THREADPOOL_SIZE");
if (val != NULL)
nthreads = atoi(val);
if (nthreads == 0)
nthreads = 1;
if (nthreads > MAX_THREADPOOL_SIZE)
nthreads = MAX_THREADPOOL_SIZE;
threads = default_threads;
if (nthreads > ARRAY_SIZE(default_threads)) {
threads = uv__malloc(nthreads * sizeof(threads[0]));
if (threads == NULL) {
nthreads = ARRAY_SIZE(default_threads);
threads = default_threads;
}
}
if (uv_cond_init(&cond))
abort();
if (uv_mutex_init(&mutex))
abort();
QUEUE_INIT(&wq);
QUEUE_INIT(&slow_io_pending_wq);
QUEUE_INIT(&run_slow_work_message);
if (uv_sem_init(&sem, 0))
abort();
for (i = 0; i < nthreads; i++)
if (uv_thread_create(threads + i, worker, &sem))
abort();
for (i = 0; i < nthreads; i++)
uv_sem_wait(&sem);
uv_sem_destroy(&sem);
}
创建好指定数量的线程 worker函数等下再看 现在只要知道它被阻塞在等待 wq 待处理队列中有任务即可
先看 post 如何提交一个工作任务到 wq 中:
static void post(QUEUE* q, enum uv__work_kind kind) {
uv_mutex_lock(&mutex);
if (kind == UV__WORK_SLOW_IO) {
/* Insert into a separate queue. */
QUEUE_INSERT_TAIL(&slow_io_pending_wq, q);
if (!QUEUE_EMPTY(&run_slow_work_message)) {
/* Running slow I/O tasks is already scheduled => Nothing to do here.
The worker that runs said other task will schedule this one as well. */
uv_mutex_unlock(&mutex);
return;
}
q = &run_slow_work_message;
}
QUEUE_INSERT_TAIL(&wq, q);
if (idle_threads > 0)
uv_cond_signal(&cond);
uv_mutex_unlock(&mutex);
}
原来是在这里把待处理任务 q 插入到 wq 中,同时 uv_cond_signal 唤醒被阻塞在条件变量上的某个工作线程来处理任务
这里需要注意: 如果是慢IO任务 会有单独一个慢IO队列来控制: 优先处理快IO 就是非慢IO任务
实现方式,是以插入一个慢IO消息类型
现在看下工作线程都在干嘛:
static void worker(void* arg) {
struct uv__work* w;
QUEUE* q;
int is_slow_work;
// 信号量+1 确保线程创建时可以正常等到所有子线程创建好后执行主线程的剩余逻辑 也就是销毁这个信号量
uv_sem_post((uv_sem_t*) arg);
arg = NULL;
// 配合 uv_cond_wait 使用的一个互斥锁 uv_cond_wait 内部会使用 unlock 然后wait等待信号可用把自己唤醒 然后再 lock 这样就确保多次 uv_cond_wait 都是取得正常的结果
uv_mutex_lock(&mutex);
for (;;) {
/* `mutex` should always be locked at this point. */
/* Keep waiting while either no work is present or only slow I/O
and we're at the threshold for that. */
// 优先处理 wq中非慢IO事件 也就是快IO事件 如果正在处理的慢IO事件超过一半了 那就继续等待了 wq为空或者只有慢IO事件且正在处理的慢IO事件数量超过一半了就需要等待 uv_cond_wait 否则直接取出来处理即可
while (QUEUE_EMPTY(&wq) ||
(QUEUE_HEAD(&wq) == &run_slow_work_message &&
QUEUE_NEXT(&run_slow_work_message) == &wq &&
slow_io_work_running >= slow_work_thread_threshold())) {
idle_threads += 1;
uv_cond_wait(&cond, &mutex);
idle_threads -= 1;
}
// 从上面的while出来后代表有任务可以处理了
// 当前线程退出消息 让别的线程去处理任务
q = QUEUE_HEAD(&wq);
if (q == &exit_message) {
uv_cond_signal(&cond);
uv_mutex_unlock(&mutex);
break;
}
QUEUE_REMOVE(q);
QUEUE_INIT(q); /* Signal uv_cancel() that the work req is executing. */
// 是否正在处理慢IO任务
is_slow_work = 0;
if (q == &run_slow_work_message) {
/* If we're at the slow I/O threshold, re-schedule until after all
other work in the queue is done. */
if (slow_io_work_running >= slow_work_thread_threshold()) {
QUEUE_INSERT_TAIL(&wq, q);
continue;
}
/* If we encountered a request to run slow I/O work but there is none
to run, that means it's cancelled => Start over. */
if (QUEUE_EMPTY(&slow_io_pending_wq))
continue;
is_slow_work = 1;
slow_io_work_running++;
// 去除一个任务来处理
q = QUEUE_HEAD(&slow_io_pending_wq);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
/* If there is more slow I/O work, schedule it to be run as well. */
// 如果还有其他慢IO 继续插入慢IO消息等待下次工作线程自己判断是否需要处理 每次把慢IO队列插入尾部 优先执行其他的快IO
if (!QUEUE_EMPTY(&slow_io_pending_wq)) {
QUEUE_INSERT_TAIL(&wq, &run_slow_work_message);
if (idle_threads > 0)
uv_cond_signal(&cond);
}
}
uv_mutex_unlock(&mutex);
// 调用工作函数做对应的事情 这里是去读fd上的可读数据
w = QUEUE_DATA(q, struct uv__work, wq);
w->work(w);
uv_mutex_lock(&w->loop->wq_mutex);
w->work = NULL; /* Signal uv_cancel() that the work req is done
executing. */
// 插入loop的wq 就绪队列中
QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq);
// 通知异步iowatcher 有工作线程任务做完了 可以执行回调了
uv_async_send(&w->loop->wq_async);
uv_mutex_unlock(&w->loop->wq_mutex);
/* Lock `mutex` since that is expected at the start of the next
* iteration. */
uv_mutex_lock(&mutex);
if (is_slow_work) {
/* `slow_io_work_running` is protected by `mutex`. */
slow_io_work_running--;
}
// 执行完后回到循环中 如果此时工作队列积攒了其他任务就继续处理 没有就继续等待
}
}
看下 uv_async_send:
int uv_async_send(uv_async_t* handle) {
/* Do a cheap read first. */
if (ACCESS_ONCE(int, handle->pending) != 0)
return 0;
// 设置handler的pengding为1 代表它准备好被处理了
/* Tell the other thread we're busy with the handle. */
if (cmpxchgi(&handle->pending, 0, 1) != 0)
return 0;
/* Wake up the other thread's event loop. */
uv__async_send(handle->loop);
/* Tell the other thread we're done. */
if (cmpxchgi(&handle->pending, 1, 2) != 1)
abort();
return 0;
}
static void uv__async_send(uv_loop_t* loop) {
const void* buf;
ssize_t len;
int fd;
int r;
buf = "";
len = 1;
fd = loop->async_wfd;
#if defined(__linux__)
if (fd == -1) {
static const uint64_t val = 1;
buf = &val;
len = sizeof(val);
fd = loop->async_io_watcher.fd; /* eventfd */
}
#endif
do
r = write(fd, buf, len);
while (r == -1 && errno == EINTR);
if (r == len)
return;
if (r == -1)
if (errno == EAGAIN || errno == EWOULDBLOCK)
return;
abort();
}
uv__async_send 激发fd可读事件 来让异步iowathcer去执行 uv__work_done
void uv__work_done(uv_async_t* handle) {
struct uv__work* w;
uv_loop_t* loop;
QUEUE* q;
QUEUE wq;
int err;
loop = container_of(handle, uv_loop_t, wq_async);
uv_mutex_lock(&loop->wq_mutex);
QUEUE_MOVE(&loop->wq, &wq);
uv_mutex_unlock(&loop->wq_mutex);
while (!QUEUE_EMPTY(&wq)) {
q = QUEUE_HEAD(&wq);
QUEUE_REMOVE(q);
w = container_of(q, struct uv__work, wq);
err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;
w->done(w, err);
}
}
从loop的就绪wq中同步取出所有 work 调用用户的done函数,至此,异步io任务如果通过多线程的方式基本梳理完毕。
void crunch_away(uv_idle_t* handle) {
// Compute extra-terrestrial life
// fold proteins
// computer another digit of PI
// or simila
fprintf(stderr, "Computing PI...\n");
// just to avoid overwhelming your terminal emulato
uv_idle_stop(handle);
}
void on_type(uv_fs_t *req) {
if (stdin_watcher.result > 0) {
buffer[stdin_watcher.result] = '\0';
printf("Typed %s\n", buffer);
uv_buf_t buf = uv_buf_init(buffer, 1024);
uv_fs_read(loop, &stdin_watcher, 0, &buf, 1, -1, on_type);
uv_idle_start(&idler, crunch_away);
}
else if (stdin_watcher.result < 0) {
fprintf(stderr, "error opening file: %s\n", uv_strerror(req->result));
}
}
fd可读事件回调 on_type 在一次读取后 会再次触发 uv_fs_read 和 uv_idle_start 直到返回 0 或者 error 重复以上的过程: 提交任务 处理任务...
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。