-
Notifications
You must be signed in to change notification settings - Fork 232
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
This patch gets everything set up to do the work of gk_proc() inside coroutines, but no work is actually moved into coroutines. The following patches gradually move the work of gk_proc() into coroutines.
- Loading branch information
1 parent
57b0f19
commit cac633a
Showing
10 changed files
with
839 additions
and
90 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,272 @@ | ||
/* | ||
* Gatekeeper - DoS protection system. | ||
* Copyright (C) 2016 Digirati LTDA. | ||
* | ||
* This program is free software: you can redistribute it and/or modify | ||
* it under the terms of the GNU General Public License as published by | ||
* the Free Software Foundation, either version 3 of the License, or | ||
* (at your option) any later version. | ||
* | ||
* This program is distributed in the hope that it will be useful, | ||
* but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
* GNU General Public License for more details. | ||
* | ||
* You should have received a copy of the GNU General Public License | ||
* along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
*/ | ||
|
||
#include "co.h" | ||
|
||
static struct gk_co * | ||
get_next_co(struct gk_co *this_co) | ||
{ | ||
/* | ||
* It is unlikely because as long as there is more than | ||
* one working coroutine, there is at least 50% chance that | ||
* @this_co is not the last working coroutine. | ||
*/ | ||
if (unlikely(this_co->co_list.next == &this_co->work->working_cos)) { | ||
/* @this_co is the last working co. */ | ||
return list_first_entry(&this_co->work->working_cos, | ||
struct gk_co, co_list); | ||
} | ||
return list_next_entry(this_co, co_list); | ||
} | ||
|
||
static void | ||
yield_next(struct gk_co *this_co) | ||
{ | ||
struct gk_co *next_co = get_next_co(this_co); | ||
if (unlikely(this_co == next_co)) | ||
return; | ||
coro_transfer(&this_co->coro, &next_co->coro); | ||
} | ||
|
||
/* | ||
* If @task is added to @this_co->work without a proper @task->task_hash, | ||
* @task must be rescheduled once the proper @task->task_hash becomes known | ||
* in order to avoid race conditions related to the proper @task->task_hash. | ||
* | ||
* NOTICE: while a task is running without a proper @task->task_hash, | ||
* the task must not use the leftover available to it because it is likely | ||
* running under a task hash that is different of its proper @task->task_hash. | ||
*/ | ||
static void | ||
reschedule_task(struct gk_co *this_co, struct gk_co_task *task) | ||
{ | ||
struct gk_co_work *work = this_co->work; | ||
struct gk_co *task_owner_co = get_task_owner_co(work, task); | ||
|
||
__schedule_task(task_owner_co, task); | ||
|
||
if (list_poison(&task_owner_co->co_list)) | ||
list_add_tail(&task_owner_co->co_list, &work->working_cos); | ||
} | ||
|
||
static inline void | ||
reset_leftover(struct gk_co_leftover *leftover) | ||
{ | ||
leftover->fe = NULL; | ||
leftover->fib = NULL; | ||
} | ||
|
||
static inline void | ||
set_leftover_fe(struct gk_co_leftover *leftover, struct flow_entry *fe) | ||
{ | ||
leftover->fe = fe; | ||
leftover->fib = NULL; | ||
} | ||
|
||
static int | ||
gk_del_flow_entry_from_hash(struct rte_hash *h, struct flow_entry *fe) | ||
{ | ||
int ret = rte_hash_del_key_with_hash(h, &fe->flow, fe->flow_hash_val); | ||
if (likely(ret >= 0)) | ||
memset(fe, 0, sizeof(*fe)); | ||
else { | ||
GK_LOG(ERR, | ||
"The GK block failed to delete a key from hash table at %s: %s\n", | ||
__func__, strerror(-ret)); | ||
} | ||
|
||
return ret; | ||
} | ||
|
||
static void | ||
gk_co_scan_flow_table_final(struct gk_co *this_co, | ||
struct gk_co_task *task, struct gk_co_leftover *leftover) | ||
{ | ||
struct flow_entry *fe = task->task_arg; | ||
struct gk_instance *instance = this_co->work->instance; | ||
|
||
rte_hash_prefetch_buckets_non_temporal(instance->ip_flow_hash_table, | ||
task->task_hash); | ||
yield_next(this_co); | ||
|
||
gk_del_flow_entry_from_hash(instance->ip_flow_hash_table, fe); | ||
if (leftover->fe == fe) | ||
reset_leftover(leftover); | ||
|
||
if (instance->num_scan_del > 0) | ||
instance->num_scan_del--; | ||
} | ||
|
||
/* We should avoid calling integer_log_base_2() with zero. */ | ||
static inline uint8_t | ||
integer_log_base_2(uint64_t delta_time) | ||
{ | ||
#if __WORDSIZE == 64 | ||
return (8 * sizeof(uint64_t) - 1) - __builtin_clzl(delta_time); | ||
#else | ||
return (8 * sizeof(uint64_t) - 1) - __builtin_clzll(delta_time); | ||
#endif | ||
} | ||
|
||
/* | ||
* It converts the difference of time between the current packet and | ||
* the last seen packet into a given priority. | ||
*/ | ||
static uint8_t | ||
priority_from_delta_time(uint64_t present, uint64_t past) | ||
{ | ||
uint64_t delta_time; | ||
|
||
if (unlikely(present < past)) { | ||
/* | ||
* This should never happen, but we handle it gracefully here | ||
* in order to keep going. | ||
*/ | ||
GK_LOG(ERR, "The present time smaller than the past time\n"); | ||
return 0; | ||
} | ||
|
||
delta_time = (present - past) * picosec_per_cycle; | ||
if (unlikely(delta_time < 1)) | ||
return 0; | ||
|
||
return integer_log_base_2(delta_time); | ||
} | ||
|
||
static bool | ||
is_flow_expired(struct flow_entry *fe, uint64_t now) | ||
{ | ||
switch(fe->state) { | ||
case GK_REQUEST: | ||
if (fe->u.request.last_packet_seen_at > now) { | ||
char err_msg[128]; | ||
int ret = snprintf(err_msg, sizeof(err_msg), | ||
"gk: buggy condition at %s: wrong timestamp", | ||
__func__); | ||
RTE_VERIFY(ret > 0 && ret < (int)sizeof(err_msg)); | ||
print_flow_err_msg(&fe->flow, err_msg); | ||
return true; | ||
} | ||
|
||
/* | ||
* A request entry is considered expired if it is not | ||
* doubling its waiting time. We use +2 instead of +1 in | ||
* the test below to account for random delays in the network. | ||
*/ | ||
return priority_from_delta_time(now, | ||
fe->u.request.last_packet_seen_at) > | ||
fe->u.request.last_priority + 2; | ||
case GK_GRANTED: | ||
return now >= fe->u.granted.cap_expire_at; | ||
case GK_DECLINED: | ||
return now >= fe->u.declined.expire_at; | ||
case GK_BPF: | ||
return now >= fe->u.bpf.expire_at; | ||
default: | ||
return true; | ||
} | ||
} | ||
|
||
void | ||
gk_co_scan_flow_table(struct gk_co *this_co, | ||
struct gk_co_task *task, | ||
__attribute__((unused)) struct gk_co_leftover *leftover) | ||
{ | ||
struct flow_entry *fe = task->task_arg; | ||
|
||
/* | ||
* Only one prefetch is needed here because one only needs | ||
* the beginning of a struct flow_entry to | ||
* check if it's expired. | ||
*/ | ||
rte_prefetch_non_temporal(fe); | ||
yield_next(this_co); | ||
|
||
if (!fe->in_use || !is_flow_expired(fe, rte_rdtsc())) | ||
return; | ||
|
||
/* Finish up the work with the correct hash value. */ | ||
task->task_hash = fe->flow_hash_val; | ||
task->task_func = gk_co_scan_flow_table_final; | ||
reschedule_task(this_co, task); | ||
} | ||
|
||
static struct gk_co_task * | ||
next_task(struct gk_co *this_co, struct gk_co_leftover *leftover) | ||
{ | ||
while (true) { | ||
struct gk_co *next_co; | ||
|
||
/* | ||
* This test is likely because if @this_co has at least | ||
* one task, there's at least 50% that it will be true because | ||
* this function is called twice. | ||
*/ | ||
if (likely(!list_empty(&this_co->task_queue))) { | ||
/* | ||
* @this_co has assigned tasks. | ||
* Return the first assigned task. | ||
*/ | ||
struct gk_co_task *task = list_first_entry( | ||
&this_co->task_queue, struct gk_co_task, | ||
task_list); | ||
list_del(&task->task_list); | ||
return task; | ||
} | ||
|
||
/* There is no more tasks assigned to @this_co. */ | ||
|
||
next_co = get_next_co(this_co); | ||
|
||
/* Make @this_co idle. */ | ||
reset_leftover(leftover); | ||
list_del(&this_co->co_list); | ||
|
||
/* Transfer control to another coroutine. */ | ||
if (likely(this_co != next_co)) { | ||
/* | ||
* @this_co is NOT the last working coroutine. | ||
* Yield to the next coroutine. | ||
*/ | ||
coro_transfer(&this_co->coro, &next_co->coro); | ||
} else { | ||
/* | ||
* No more work and no more working coroutines; | ||
* @this_co is the last working coroutine. | ||
* Return to the main coroutine. | ||
*/ | ||
coro_transfer(&this_co->coro, | ||
&this_co->work->instance->coro_root); | ||
} | ||
} | ||
} | ||
|
||
void | ||
gk_co_main(void *arg) | ||
{ | ||
struct gk_co *this_co = arg; | ||
struct gk_co_leftover leftover = {}; | ||
struct gk_co_task *task = next_task(this_co, &leftover); | ||
|
||
while (likely(task != NULL)) { | ||
task->task_func(this_co, task, &leftover); | ||
task = next_task(this_co, &leftover); | ||
} | ||
|
||
rte_panic("%s() terminated\n", __func__); | ||
} |
Oops, something went wrong.