您好,登錄后才能下訂單哦!
Net的啟動流程:
main.cc
main()--->UnixNetProcessor::start()
Net模塊的啟動
int
UnixNetProcessor::start(int, size_t)
{
EventType etype = ET_NET;
//給NetHandler實例分配空間
netHandler_offset = eventProcessor.allocate(sizeof(NetHandler));
//給PollCont實例分配空間
pollCont_offset = eventProcessor.allocate(sizeof(PollCont));
//UnixNetProcessor對應的事件類型是ET_NET,如果是sslNetProcessor則對應的事件類型是ET_SSL
upgradeEtype(etype);
//從eventProcessor獲得net的線程,這事在event模塊初始化時做好的
n_netthreads = eventProcessor.n_threads_for_type[etype];
//從eventProcessor獲得net的線程數量
netthreads = eventProcessor.eventthread[etype];
//初始化所有Net線程
for (int i = 0; i < n_netthreads; ++i) {
initialize_thread_for_net(netthreads[i]);
#ifndef STANDALONE_IOCORE
extern void initialize_thread_for_http_sessions(EThread *thread, int thread_index);
initialize_thread_for_http_sessions(netthreads[i], i);
#endif
}
RecData d;
d.rec_int = 0;
//設置網絡鏈接數的閾值
change_net_connections_throttle(NULL, RECD_INT, d, NULL);
//sock相關,很少使用,這里先不介紹
if (!netProcessor.socks_conf_stuff) {
socks_conf_stuff = NEW(new socks_conf_struct);
loadSocksConfiguration(socks_conf_stuff);
if (!socks_conf_stuff->socks_needed && socks_conf_stuff->accept_enabled) {
Warning("We can not have accept_enabled and socks_needed turned off" " disabling Socks accept\n");
socks_conf_stuff->accept_enabled = 0;
} else {
socks_conf_stuff = netProcessor.socks_conf_stuff;
}
}
//在頁面上顯示Net相關的統計信息
#ifdef NON_MODULAR
extern Action *register_ShowNet(Continuation * c, HTTPHdr * h);
if (etype == ET_NET)
statPagesManager.register_http("net", register_ShowNet);
#endif
return 1;
}
main()--->UnixNetProcessor::start()--->initialize_thread_for_net()
顧名思義,這個函數的功能是為網絡初始化一個線程,
void
initialize_thread_for_net(EThread *thread)
{
//創建NetHandler、PollCont實例
//NetHandler:用于處理Net相關的所有時間
//PollCont:是一個Poll的continuation(ats的設計思想),包含指向NetHandler和PollDescriptor的指針
//PollDescriptor:Poll的描述封裝結構
new((ink_dummy_for_new *) get_NetHandler(thread)) NetHandler();
new((ink_dummy_for_new *) get_PollCont(thread)) PollCont(thread->mutex, get_NetHandler(thread));
get_NetHandler(thread)->mutex = new_ProxyMutex();
PollCont *pc = get_PollCont(thread);
PollDescriptor *pd = pc->pollDescriptor;
//調用NetHandler實例啟動,最終會每秒調用NetHandler::mainNetEvent()函數
thread->schedule_imm(get_NetHandler(thread));
#ifndef INACTIVITY_TIMEOUT
//創建InactivityCop實例,InactivityCop會定時(1秒)判斷每個鏈接vc是否可以關閉然后進行關閉處理
InactivityCop *inactivityCop = NEW(new InactivityCop(get_NetHandler(thread)->mutex));
//定時調度 inactivityCop的check_inactivity()函數
thread->schedule_every(inactivityCop, HRTIME_SECONDS(1));
#endif
//注冊信號處理函數
thread->signal_hook = net_signal_hook_function;
//創建EventIO實例并初始化
thread->ep = (EventIO*)ats_malloc(sizeof(EventIO));
thread->ep->type = EVENTIO_ASYNC_SIGNAL;
#if HAVE_EVENTFD
//啟動EventIO實例,使用epoll注冊讀事件(不知道epoll的先看一下啊)
thread->ep->start(pd, thread->evfd, 0, EVENTIO_READ);
#else
thread->ep->start(pd, thread->evpipe[0], 0, EVENTIO_READ);
#endif
}
NetHandler的初始化
main()--->UnixNetProcessor::start()--->NetHandler::NetHandler()
設置NetHandler的handler為NetHandler::startNetEvent
NetHandler::NetHandler():Continuation(NULL), trigger_event(0)
{
SET_HANDLER((NetContHandler) & NetHandler::startNetEvent);
}
設置NetHandler的handler為NetHandler::mainNetEvent,并定時調度該函數執行
int
NetHandler::startNetEvent(int event, Event *e)
{
(void) event;
SET_HANDLER((NetContHandler) & NetHandler::mainNetEvent);
e->schedule_every(NET_PERIOD);
trigger_event = e;
return EVENT_CONT;
}
PollCont的初始化
main()--->UnixNetProcessor::start()--->PollCont::PollCont()
PollCont::PollCont(ProxyMutex *m, NetHandler *nh, int pt):Continuation(m), net_handler(nh), poll_timeout(pt)
{
//創建PollDescriptor實例
pollDescriptor = NEW(new PollDescriptor);
//初始化PollDescriptor實例
pollDescriptor->init();
//設置PollCont的handler為 PollCont::pollEvent
SET_HANDLER(&PollCont::pollEvent);
}
PollDescriptor的初始化
main()--->UnixNetProcessor::start()--->PollCont::PollCont()--->init()
PollDescriptor *init()
{
result = 0;
#if TS_USE_EPOLL
nfds = 0;
//創建epoll用的文件描述符
epoll_fd = epoll_create(POLL_DESCRIPTOR_SIZE);
memset(ePoll_Triggered_Events, 0, sizeof(ePoll_Triggered_Events));
memset(pfd, 0, sizeof(pfd));
#endif
......
return this;
}
main()--->UnixNetProcessor::start()--->initialize_thread_for_net()--->NetHandler::mainNetEvent()
這個函數看起來有點長,先說一下它的功能:首先是調用epoll_wait()等待事件,再次是根據事件的類型做不同的處理,事件分為EVENTIO_READWRITE_VC(讀寫事件)、EVENTIO_DNS_CONNECTION(DNS的CONNECT事件)、EVENTIO_ASYNC_SIGNAL(同步信號事件),正常的HTTP請求的接收和響應屬于EVENTIO_READWRITE_VC,DNS請求發送流程時說過,調用connect()函數來發送DNS請求時會調用epoll_ctl()來注冊響應的事件,這就是EVENTIO_DNS_CONNECTION,我們先不關心
EVENTIO_ASYNC_SIGNAL。
最后是分別遍歷Handler的可讀和可寫隊列,并調用read和write進行讀和寫,然后通知上層
int
NetHandler::mainNetEvent(int event, Event *e)
{
ink_assert(trigger_event == e && (event == EVENT_INTERVAL || event == EVENT_POLL));
(void) event;
(void) e;
EventIO *epd = NULL;
int poll_timeout = net_config_poll_timeout;
//計數信息++
NET_INCREMENT_DYN_STAT(net_handler_run_stat);
//處理NetHandler的可讀和可寫隊列上的時間UnixNetVConnection,這里你可以看作什么都不做
process_enabled_list(this);
if (likely(!read_ready_list.empty() || !write_ready_list.empty() || !read_enable_list.empty() || !write_enable_list.empty()))
poll_timeout = 0;
else
poll_timeout = net_config_poll_timeout;
PollDescriptor *pd = get_PollDescriptor(trigger_event->ethread);
UnixNetVConnection *vc = NULL;
#if TS_USE_EPOLL
//調用epoll事件
pd->result = epoll_wait(pd->epoll_fd, pd->ePoll_Triggered_Events, POLL_DESCRIPTOR_SIZE, poll_timeout);
NetDebug("iocore_net_main_poll", "[NetHandler::mainNetEvent] epoll_wait(%d,%d), result=%d", pd->epoll_fd,poll_timeout,pd->result);
......
//處理所有的事件
vc = NULL;
for (int x = 0; x < pd->result; x++) {
epd = (EventIO*) get_ev_data(pd,x);
// EVENTIO_READWRITE_VC事件的處理:如果是讀事件則加入到NetHandler的可讀鏈表read_ready_list,如果是寫事件加入NetHandler的可讀鏈表write_ready_list
if (epd->type == EVENTIO_READWRITE_VC) {
vc = epd->data.vc;
if (get_ev_events(pd,x) & (EVENTIO_READ|EVENTIO_ERROR)) {
vc->read.triggered = 1;
if (!read_ready_list.in(vc))
read_ready_list.enqueue(vc);
else if (get_ev_events(pd,x) & EVENTIO_ERROR) {
// check for unhandled epoll events that should be handled
Debug("iocore_net_main", "Unhandled epoll event on read: 0x%04x read.enabled=%d closed=%d read.netready_queue=%d",
get_ev_events(pd,x), vc->read.enabled, vc->closed, read_ready_list.in(vc));
}
}
vc = epd->data.vc;
if (get_ev_events(pd,x) & (EVENTIO_WRITE|EVENTIO_ERROR)) {
vc->write.triggered = 1;
if (!write_ready_list.in(vc))
write_ready_list.enqueue(vc);
else if (get_ev_events(pd,x) & EVENTIO_ERROR) {
Debug("iocore_net_main",
"Unhandled epoll event on write: 0x%04x write.enabled=%d closed=%d write.netready_queue=%d",
get_ev_events(pd,x), vc->write.enabled, vc->closed, write_ready_list.in(vc));
}
} else if (!get_ev_events(pd,x) & EVENTIO_ERROR) {
Debug("iocore_net_main", "Unhandled epoll event: 0x%04x", get_ev_events(pd,x));
}
//EVENTIO_DNS_CONNECTION事件的處理:加入DNSHandler的triggered隊列
} else if (epd->type == EVENTIO_DNS_CONNECTION) {
if (epd->data.dnscon != NULL) {
epd->data.dnscon->trigger();
#if defined(USE_EDGE_TRIGGER)
epd->refresh(EVENTIO_READ);
#endif
}
} else if (epd->type == EVENTIO_ASYNC_SIGNAL)
net_signal_hook_callback(trigger_event->ethread);
ev_next_event(pd,x);
}
pd->result = 0;
#if defined(USE_EDGE_TRIGGER)
//遍歷Handler的可讀隊列中的vc,調用net_read_io分別處理每個vc,而net_read_io的功能就是調用read去接收數據,然后通知上層(HttpSM)
while ((vc = read_ready_list.dequeue())) {
if (vc->closed)
close_UnixNetVConnection(vc, trigger_event->ethread);
else if (vc->read.enabled && vc->read.triggered)
vc->net_read_io(this, trigger_event->ethread);
else if (!vc->read.enabled) {
read_ready_list.remove(vc);
}
}
//遍歷Handler的可寫隊列中的vc,調用write_to_net分別處理每個vc,而write_to_net的功能就是調用write去發送數據,然后通知上層(HttpSM)
while ((vc = write_ready_list.dequeue())) {
if (vc->closed)
close_UnixNetVConnection(vc, trigger_event->ethread);
else if (vc->write.enabled && vc->write.triggered)
write_to_net(this, vc, trigger_event->ethread);
else if (!vc->write.enabled) {
write_ready_list.remove(vc);
}
}
return EVENT_CONT;
}
別忘了InactivityCop這個結構
main()--->UnixNetProcessor::start()--->initialize_thread_for_net()--->InactivityCop()
設置handler為InactivityCop::check_inactivity,該函數被每秒中調用一次
struct InactivityCop : public Continuation {
InactivityCop(ProxyMutex *m):Continuation(m) {
SET_HANDLER(&InactivityCop::check_inactivity);
}
main()--->UnixNetProcessor::start()--->initialize_thread_for_net()--->InactivityCop()---> InactivityCop::check_inactivity()
int check_inactivity(int event, Event *e) {
(void) event;
ink_hrtime now = ink_get_hrtime();
NetHandler *nh = get_NetHandler(this_ethread());
//遍歷NetHandler的鏈接隊列,判斷和本線程是不是統一線程,是的話加到NetHandler的cop_list隊列
forl_LL(UnixNetVConnection, vc, nh->open_list) {
if (vc->thread == this_ethread())
nh->cop_list.push(vc);
}
while (UnixNetVConnection *vc = nh->cop_list.pop()) {
// If we cannot ge tthe lock don't stop just keep cleaning
MUTEX_TRY_LOCK(lock, vc->mutex, this_ethread());
if (!lock.lock_acquired) {
NET_INCREMENT_DYN_STAT(inactivity_cop_lock_acquire_failure_stat);
continue;
}
//如果該鏈接vc已設置為關閉狀態,則調用close_UnixNetVConnection()進行關閉操作
if (vc->closed) {
close_UnixNetVConnection(vc, e->ethread);
continue;
}
if (vc->next_inactivity_timeout_at && vc->next_inactivity_timeout_at < now)
//調用vc的handler(UnixNetVConnection::mainEvent)進行處理
vc->handleEvent(EVENT_IMMEDIATE, e);
}
return 0;
}
好了,到此為止,NetProcessor的啟動流程已經分析完成,可以簡單得總結為:NetProcessor的啟動主要任務是初始化幾個線程定時調用epoll_wait()等待讀寫事件,如果有讀事件到來時調用read進行讀操作,然后把讀到的數據傳給上層處理,如果有寫事件到來時調用write進行發送操作,發送完成后通知上層發送結果。那么讀寫事件是怎么來的呢?根據網絡編程的經驗,server在read和write之前一般都要accept,這里的讀寫事件正是來于accept,下面來分析NetProcessor的accept。NetProcessor的accept是在HttpProxyServer啟動時調用的,正確的說是main_accept()函數。
main()--->start_HttpProxyServer()
void
start_HttpProxyServer()
{
//根據配置,每個端口(默認只有一個:8080)創建一個Acceptor
for ( int i = 0 , n = proxy_ports.length() ; i < n ; ++i ) {
HttpProxyAcceptor& acceptor = HttpProxyAcceptors[i];
HttpProxyPort& port = proxy_ports[i];
......
if (NULL == netProcessor.main_accept(acceptor._accept, port.m_fd, acceptor._net_opt))
return;
}
......
}
}
main()--->start_HttpProxyServer()--->NetProcessor::main_accept()
Action *
NetProcessor::main_accept(Continuation *cont, SOCKET fd, AcceptOptions const& opt)
{
UnixNetProcessor* this_unp = static_cast<UnixNetProcessor*>(this);
Debug("iocore_net_processor", "NetProcessor::main_accept - port %d,recv_bufsize %d, send_bufsize %d, sockopt 0x%0x",
opt.local_port, opt.recv_bufsize, opt.send_bufsize, opt.sockopt_flags);
//直接調用UnixNetProcessor::accept_internal()
return this_unp->accept_internal(cont, fd, opt);
}
main()--->start_HttpProxyServer()--->NetProcessor::main_accept()--->UnixNetProcessor::accept_internal()
Action *
UnixNetProcessor::accept_internal(Continuation *cont, int fd, AcceptOptions const& opt)
{
EventType et = opt.etype;
//創建NetAccept實例
NetAccept *na = createNetAccept();
EThread *thread = this_ethread();
ProxyMutex *mutex = thread->mutex;
int accept_threads = opt.accept_threads;
IpEndpoint accept_ip;
upgradeEtype(et);
//opt是從配置里讀出來的網絡相關的配置,作為Net的選項,具體的配置想請看ATS配置說明
if (opt.accept_threads < 0) {
REC_ReadConfigInteger(accept_threads, "proxy.config.accept_threads");
}
NET_INCREMENT_DYN_STAT(net_accepts_currently_open_stat);
//根據配置的模式設置server的地址
if (opt.localhost_only) {
accept_ip.setToLoopback(opt.ip_family);
} else if (opt.local_ip.isValid()) {
accept_ip.assign(opt.local_ip);
} else {
accept_ip.setToAnyAddr(opt.ip_family);
}
ink_assert(0 < opt.local_port && opt.local_port < 65536);
accept_ip.port() = htons(opt.local_port);
na->accept_fn = net_accept;
na->server.fd = fd;
ats_ip_copy(&na->server.accept_addr, &accept_ip);
na->server.f_inbound_transparent = opt.f_inbound_transparent;
//透明代理
if (opt.f_inbound_transparent) {
Debug( "http_tproxy", "Marking accept server %p on port %d as inbound transparent", na, opt.local_port);
}
int should_filter_int = 0;
na->server.http_accept_filter = false;
//查看該配置項的說明,只有數據到來時才會accept,默認等45秒沒來就放棄,是在下面調用setsockopt()通過設置socket的選項來實現的
REC_ReadConfigInteger(should_filter_int, "proxy.config.net.defer_accept");
if (should_filter_int > 0 && opt.etype == ET_NET)
na->server.http_accept_filter = true;
na->action_ = NEW(new NetAcceptAction());
*na->action_ = cont;//指向上層的continuation,如HttpAccept
//下面是初始化接收buffer大小等網絡的一些參數
na->action_->server = &na->server;
na->callback_on_open = opt.f_callback_on_open;
na->recv_bufsize = opt.recv_bufsize;
na->send_bufsize = opt.send_bufsize;
na->sockopt_flags = opt.sockopt_flags;
na->packet_mark = opt.packet_mark;
na->packet_tos = opt.packet_tos;
na->etype = opt.etype;
na->backdoor = opt.backdoor;
if (na->callback_on_open)
na->mutex = cont->mutex;
//實時接收
if (opt.frequent_accept) {
//配置的accept的線程數
if (accept_threads > 0) {
//設置socket的選項
if (0 == na->do_listen(BLOCKING, opt.f_inbound_transparent)) {
NetAccept *a;
//循環為每個線程創建NetAccept實例,并把上面創建的na賦值給之,最后調用NetAccept的 init_accept_loop()函數進入循環地accept的狀態
for (int i=1; i < accept_threads; ++i) {
a = createNetAccept();
*a = *na;
a->init_accept_loop();
Debug("iocore_net_accept", "Created accept thread #%d for port %d", i, ats_ip_port_host_order(&accept_ip));
}
Debug("iocore_net_accept", "Created accept thread #%d for port %d", accept_threads, ats_ip_port_host_order(&accept_ip));
na->init_accept_loop();
}
} else {
na->init_accept_per_thread();
}
} else
na->init_accept();
//查看該配置項的說明,只有數據到來時才會accept,默認等45秒沒來就放棄,是在下面調用setsockopt()通過設置socket的選項來實現的
#ifdef TCP_DEFER_ACCEPT
if (should_filter_int > 0) {
setsockopt(na->server.fd, IPPROTO_TCP, TCP_DEFER_ACCEPT, &should_filter_int, sizeof(int));
}
#endif
return na->action_;
}
main()--->start_HttpProxyServer()--->NetProcessor::main_accept()--->UnixNetProcessor::accept_internal()--->NetAccept::init_accept_loop()
該函數的功能是創建一個線程,并設置線程的執行函數為NetAccept::acceptLoopEvent
void
NetAccept::init_accept_loop()
{
size_t stacksize;
//線程棧的大小
REC_ReadConfigInteger(stacksize, "proxy.config.thread.default.stacksize");
SET_CONTINUATION_HANDLER(this, &NetAccept::acceptLoopEvent);
eventProcessor.spawn_thread(this, "[ACCEPT]", stacksize);
}
main()--->start_HttpProxyServer()--->NetProcessor::main_accept()--->UnixNetProcessor::accept_internal()--->NetAccept::init_accept_loop()--->NetAccept::acceptLoopEvent()
int
NetAccept::acceptLoopEvent(int event, Event * e)
{
(void) event;
(void) e;
EThread *t = this_ethread();
//媽啊,終于看到死循環在accept了(當然listen已經在前面了采用accept)
while (1)
do_blocking_accept(t);
NET_DECREMENT_DYN_STAT(net_accepts_currently_open_stat);
delete this;
return EVENT_DONE;
}
main()--->start_HttpProxyServer()--->NetProcessor::main_accept()--->UnixNetProcessor::accept_internal()--->NetAccept::init_accept_loop()--->NetAccept::acceptLoopEvent()--->NetAccept::do_blocking_accept()
該函數的功能是循環地調用accept去接收請求,并讓event系統調度去處理每個鏈接 UnixNetVConnection
int
NetAccept::do_blocking_accept(EThread * t)
{
int res = 0;
int loop = accept_till_done;
UnixNetVConnection *vc = NULL;
do {
//創建表示一個鏈接的UnixNetVConnection的實例
vc = (UnixNetVConnection *)alloc_cache;
if (likely(!vc)) {
vc = allocateGlobal();
vc->from_accept_thread = true;
vc->id = net_next_connection_number();
alloc_cache = vc;
}
//流量控制
ink_hrtime now = ink_get_hrtime();
while (!backdoor && check_net_throttle(ACCEPT, now)) {
check_throttle_warning();
if (!unix_netProcessor.throttle_error_message) {
safe_delay(NET_THROTTLE_DELAY);
} else if (send_throttle_message(this) < 0) {
goto Lerror;
}
now = ink_get_hrtime();
}
//調用accept去接收請求
if ((res = server.accept(&vc->con)) < 0) {
//錯誤處理
Lerror:
int seriousness = accept_error_seriousness(res);
if (seriousness >= 0) {
if (!seriousness)
check_transient_accept_error(res);
safe_delay(NET_THROTTLE_DELAY);
return 0;
}
if (!action_->cancelled) {
MUTEX_LOCK(lock, action_->mutex, t);
action_->continuation->handleEvent(EVENT_ERROR, (void *)(intptr_t)res);
MUTEX_UNTAKE_LOCK(action_->mutex, t);
Warning("accept thread received fatal error: errno = %d", errno);
}
return -1;
}
//流量控制
check_emergency_throttle(vc->con);
alloc_cache = NULL;
NET_SUM_GLOBAL_DYN_STAT(net_connections_currently_open_stat, 1);
//設置vc的時間和server的ip地址
vc->submit_time = now;
ats_ip_copy(&vc->server_addr, &vc->con.addr);
//透明代理標志位
vc->set_is_transparent(server.f_inbound_transparent);
vc->mutex = new_ProxyMutex();
vc->action_ = *action_;
//設置UnixNetVConnection的handler為UnixNetVConnection::acceptEvent
SET_CONTINUATION_HANDLER(vc, (NetVConnHandler) & UnixNetVConnection::acceptEvent);
//讓event系統調度vc的handler執行
eventProcessor.schedule_imm_signal(vc, getEtype());
} while (loop);
return 1;
}
main()--->start_HttpProxyServer()--->NetProcessor::main_accept()--->UnixNetProcessor::accept_internal()--->NetAccept::init_accept_loop()--->NetAccept::acceptLoopEvent()--->NetAccept::do_blocking_accept()--->UnixNetVConnection::acceptEvent()
該函數的功能是接收一個鏈接,向NetHandler注冊讀寫事件,讓NetHandler去接收該鏈接的數據和發送對該請求的響應報文(NetHandler肯定是干這個事的啊),最后調用上層(HttpSM)的handler(HttpAccept::mainEvent)來接收該鏈接
int
UnixNetVConnection::acceptEvent(int event, Event *e)
{
thread = e->ethread;
MUTEX_TRY_LOCK(lock, get_NetHandler(thread)->mutex, e->ethread);
if (!lock) {
if (event == EVENT_NONE) {
thread->schedule_in(this, NET_RETRY_DELAY);
return EVENT_DONE;
} else {
e->schedule_in(NET_RETRY_DELAY);
return EVENT_CONT;
}
}
if (action_.cancelled) {
free(thread);
return EVENT_DONE;
}
//設置UnixNetVConnection的handler為UnixNetVConnection::mainEvent
SET_HANDLER((NetVConnHandler) & UnixNetVConnection::mainEvent);
//獲取指向NetHandler的指針,NetHandler前面介紹過了
nh = get_NetHandler(thread);
//獲取指向PollDescriptor的指針,PollDescriptor前面介紹過了
PollDescriptor *pd = get_PollDescriptor(thread);
//注冊epoll讀和寫事件,這會和上面的流程聯系起來了吧
if (ep.start(pd, this, EVENTIO_READ|EVENTIO_WRITE) < 0) {
Debug("iocore_net", "acceptEvent : failed EventIO::start\n");
close_UnixNetVConnection(this, e->ethread);
return EVENT_DONE;
}
//把vc加入NetHandler的開鏈接隊列open_list
nh->open_list.enqueue(this);
//設置相應的超時時間用于關閉鏈接
if (inactivity_timeout_in)
UnixNetVConnection::set_inactivity_timeout(inactivity_timeout_in);
if (active_timeout_in)
UnixNetVConnection::set_active_timeout(active_timeout_in);
//調用上層的handler來處理本次鏈接,如HttpAccept::mainEvent,怎么處理留到HTTP流程再分析
action_.continuation->handleEvent(NET_EVENT_ACCEPT, this);
return EVENT_DONE;
}
HttpSM <----------------------HttpAccept
^ ^
| |
| |
| 注冊READ/WRITE事件 |
NetHandler <------------------NetAccept
^ ^
| |
| |
| |
| |
read() write() accept()
此外,NetProcessor還有兩個函數要說明,那就是NetProcessor::connect_s()和NetProcessor::connect_re()。這兩個函數都是給上層提供的connect接口,他們的區別是一個是異步的一個是同步的,connect_s是同步,connect_re是異步。
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。