|
|
@ -162,9 +162,8 @@ current_sandbox_start(void)
|
|
|
|
assert(next_module);
|
|
|
|
assert(next_module);
|
|
|
|
size_t next_module_pre_count = next_module[0]->pre_module_count;
|
|
|
|
size_t next_module_pre_count = next_module[0]->pre_module_count;
|
|
|
|
assert(next_module_pre_count);
|
|
|
|
assert(next_module_pre_count);
|
|
|
|
if (next_module_idx > 1 || (next_module_idx == 1 && next_module_pre_count == 1))
|
|
|
|
if (next_module_idx == 1 && next_module_pre_count == 1)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
/* Generate a new request, copy the current sandbox's output to the next request's buffer, and put it to the global queue */
|
|
|
|
|
|
|
|
ssize_t output_length = sandbox->request_response_data_length - sandbox->request_length;
|
|
|
|
ssize_t output_length = sandbox->request_response_data_length - sandbox->request_length;
|
|
|
|
char * pre_func_output = (char *)malloc(output_length);
|
|
|
|
char * pre_func_output = (char *)malloc(output_length);
|
|
|
|
if (!pre_func_output) {
|
|
|
|
if (!pre_func_output) {
|
|
|
@ -173,20 +172,41 @@ current_sandbox_start(void)
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
memcpy(pre_func_output, sandbox->request_response_data + sandbox->request_length, output_length);
|
|
|
|
memcpy(pre_func_output, sandbox->request_response_data + sandbox->request_length, output_length);
|
|
|
|
//mem_log("time %lu request id:%d executing, name:%s remaining slack %lu\n", current_rs,
|
|
|
|
uint64_t now = __getcycles();
|
|
|
|
// sandbox->id, sandbox->module->name, sandbox->remaining_slack);
|
|
|
|
current_sandbox_get_newlaxity(sandbox, now);
|
|
|
|
|
|
|
|
struct module *next_module_node = next_module[0];
|
|
|
|
|
|
|
|
struct sandbox_request *sandbox_request =
|
|
|
|
|
|
|
|
sandbox_request_allocate(next_module_node, false, sandbox->request_length,
|
|
|
|
|
|
|
|
next_module_node->name, sandbox->client_socket_descriptor,
|
|
|
|
|
|
|
|
(const struct sockaddr *)&sandbox->client_address,
|
|
|
|
|
|
|
|
sandbox->request_arrival_timestamp, now,
|
|
|
|
|
|
|
|
sandbox->remaining_slack, sandbox->laxity, true, pre_func_output, output_length);
|
|
|
|
|
|
|
|
/* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate()
|
|
|
|
|
|
|
|
* will busy-wait to generate an unique id, should we optimize it here?
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
sandbox_request->id = sandbox->id;
|
|
|
|
|
|
|
|
if (sandbox->request_from_outside) {
|
|
|
|
|
|
|
|
sandbox_remove_from_epoll(sandbox);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
global_request_scheduler_add(sandbox_request);
|
|
|
|
|
|
|
|
sandbox_set_as_returned(sandbox, SANDBOX_RUNNING);
|
|
|
|
|
|
|
|
} else if (next_module_idx > 1 && next_module_pre_count == 1)
|
|
|
|
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(next_module_idx > 1);
|
|
|
|
for (size_t i = 0; i < next_module_idx; i++)
|
|
|
|
for (size_t i = 0; i < next_module_idx; i++)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
|
|
|
|
/* Generate a new request, copy the current sandbox's output to the next request's buffer, and put it to the global queue */
|
|
|
|
|
|
|
|
ssize_t output_length = sandbox->request_response_data_length - sandbox->request_length;
|
|
|
|
|
|
|
|
char * pre_func_output = (char *)malloc(output_length);
|
|
|
|
|
|
|
|
if (!pre_func_output) {
|
|
|
|
|
|
|
|
fprintf(stderr, "Failed to allocate memory for the previous output: %s\n", strerror(errno));
|
|
|
|
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
memcpy(pre_func_output, sandbox->request_response_data + sandbox->request_length, output_length);
|
|
|
|
|
|
|
|
//mem_log("time %lu request id:%d executing, name:%s remaining slack %lu\n", current_rs,
|
|
|
|
|
|
|
|
// sandbox->id, sandbox->module->name, sandbox->remaining_slack);
|
|
|
|
struct module * next_module_node = next_module[i];
|
|
|
|
struct module * next_module_node = next_module[i];
|
|
|
|
assert(next_module_node);
|
|
|
|
assert(next_module_node);
|
|
|
|
char * individual_pre_func_output = (char *)malloc(output_length);
|
|
|
|
|
|
|
|
if (!individual_pre_func_output) {
|
|
|
|
|
|
|
|
fprintf(stderr, "Failed to allocate memory for the individual previous output: %s\n", strerror(errno));
|
|
|
|
|
|
|
|
free(pre_func_output);
|
|
|
|
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(individual_pre_func_output, pre_func_output, output_length);
|
|
|
|
|
|
|
|
uint64_t now = __getcycles();
|
|
|
|
uint64_t now = __getcycles();
|
|
|
|
current_sandbox_get_newlaxity(sandbox, now);
|
|
|
|
current_sandbox_get_newlaxity(sandbox, now);
|
|
|
|
#ifdef LOG_DEEP_LEARN_SCHDUE
|
|
|
|
#ifdef LOG_DEEP_LEARN_SCHDUE
|
|
|
@ -205,12 +225,11 @@ current_sandbox_start(void)
|
|
|
|
next_module_node->name, sandbox->client_socket_descriptor,
|
|
|
|
next_module_node->name, sandbox->client_socket_descriptor,
|
|
|
|
(const struct sockaddr *)&sandbox->client_address,
|
|
|
|
(const struct sockaddr *)&sandbox->client_address,
|
|
|
|
sandbox->request_arrival_timestamp, now,
|
|
|
|
sandbox->request_arrival_timestamp, now,
|
|
|
|
sandbox->remaining_slack, sandbox->laxity, true, NULL, 0);
|
|
|
|
sandbox->remaining_slack, sandbox->laxity, true, pre_func_output, output_length);
|
|
|
|
/* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate()
|
|
|
|
/* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate()
|
|
|
|
* will busy-wait to generate an unique id, should we optimize it here?
|
|
|
|
* will busy-wait to generate an unique id, should we optimize it here?
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
sandbox_request->id = sandbox->id;
|
|
|
|
sandbox_request->id = sandbox->id;
|
|
|
|
pre_functions_output_request_add(sandbox_request, individual_pre_func_output, output_length, sandbox->module->run_priority);
|
|
|
|
|
|
|
|
#ifdef OPT_AVOID_GLOBAL_QUEUE
|
|
|
|
#ifdef OPT_AVOID_GLOBAL_QUEUE
|
|
|
|
/* TODO: The running time of the current sandbox contains the next sandbox's initialization time, does it matter? */
|
|
|
|
/* TODO: The running time of the current sandbox contains the next sandbox's initialization time, does it matter? */
|
|
|
|
if (sandbox->absolute_deadline == sandbox_request->absolute_deadline) {
|
|
|
|
if (sandbox->absolute_deadline == sandbox_request->absolute_deadline) {
|
|
|
@ -236,10 +255,8 @@ current_sandbox_start(void)
|
|
|
|
if (sandbox->request_from_outside) {
|
|
|
|
if (sandbox->request_from_outside) {
|
|
|
|
sandbox_remove_from_epoll(sandbox);
|
|
|
|
sandbox_remove_from_epoll(sandbox);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*free memory of pre_func_out, Because it has been deeply copied its copy into requestbecause */
|
|
|
|
|
|
|
|
free(pre_func_output);
|
|
|
|
|
|
|
|
pre_func_output = NULL;
|
|
|
|
|
|
|
|
sandbox_set_as_returned(sandbox, SANDBOX_RUNNING);
|
|
|
|
sandbox_set_as_returned(sandbox, SANDBOX_RUNNING);
|
|
|
|
|
|
|
|
goto done;
|
|
|
|
} else if (next_module_idx == 1 && next_module_pre_count > 1)
|
|
|
|
} else if (next_module_idx == 1 && next_module_pre_count > 1)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
static bool lock_flag = true;
|
|
|
|
static bool lock_flag = true;
|
|
|
@ -327,6 +344,7 @@ current_sandbox_start(void)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
map_upsert(sandbox_request_id, cur_request_id, strlen(cur_request_id), &rest_pre_count, sizeof(uint32_t));
|
|
|
|
map_upsert(sandbox_request_id, cur_request_id, strlen(cur_request_id), &rest_pre_count, sizeof(uint32_t));
|
|
|
|
}else{
|
|
|
|
}else{
|
|
|
|
|
|
|
|
concatenate_outputs(sandbox_request);
|
|
|
|
uint64_t enqueue_timestamp = __getcycles();
|
|
|
|
uint64_t enqueue_timestamp = __getcycles();
|
|
|
|
sandbox_request->enqueue_timestamp = enqueue_timestamp;
|
|
|
|
sandbox_request->enqueue_timestamp = enqueue_timestamp;
|
|
|
|
global_request_scheduler_add(sandbox_request);
|
|
|
|
global_request_scheduler_add(sandbox_request);
|
|
|
|