|
|
|
@ -73,11 +73,21 @@ current_sandbox_start(void)
|
|
|
|
|
|
|
|
|
|
sandbox_initialize_stdio(sandbox);
|
|
|
|
|
int next_module_idx = sandbox->module->next_module_count;
|
|
|
|
|
static struct hashmap *sandbox_req_map = NULL;
|
|
|
|
|
if (sandbox_req_map == NULL) {
|
|
|
|
|
sandbox_req_map = malloc(sizeof(struct hashmap));
|
|
|
|
|
static struct hashmap *sandbox_req_map = NULL;
|
|
|
|
|
static struct hashmap *sandbox_request_id = NULL;
|
|
|
|
|
if (sandbox_req_map == NULL || sandbox_request_id == NULL) {
|
|
|
|
|
if(sandbox_req_map == NULL)
|
|
|
|
|
{
|
|
|
|
|
sandbox_req_map = malloc(sizeof(struct hashmap));
|
|
|
|
|
map_init(sandbox_req_map);
|
|
|
|
|
}
|
|
|
|
|
if(sandbox_request_id == NULL)
|
|
|
|
|
{
|
|
|
|
|
sandbox_request_id = malloc(sizeof(struct hashmap));
|
|
|
|
|
map_init(sandbox_request_id);
|
|
|
|
|
}
|
|
|
|
|
assert(sandbox_req_map != NULL);
|
|
|
|
|
map_init(sandbox_req_map);
|
|
|
|
|
assert(sandbox_request_id != NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -130,62 +140,163 @@ current_sandbox_start(void)
|
|
|
|
|
*/
|
|
|
|
|
goto err;
|
|
|
|
|
} else if (next_module != NULL) {
|
|
|
|
|
/* Generate a new request, copy the current sandbox's output to the next request's buffer, and put it to the global queue */
|
|
|
|
|
ssize_t output_length = sandbox->request_response_data_length - sandbox->request_length;
|
|
|
|
|
char * pre_func_output = (char *)malloc(output_length);
|
|
|
|
|
if (!pre_func_output) {
|
|
|
|
|
fprintf(stderr, "Failed to allocate memory for the previous output: %s\n", strerror(errno));
|
|
|
|
|
goto err;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
memcpy(pre_func_output, sandbox->request_response_data + sandbox->request_length, output_length);
|
|
|
|
|
uint64_t enqueue_timestamp = __getcycles();
|
|
|
|
|
//uint64_t current_rs = enqueue_timestamp - system_start_timestamp;
|
|
|
|
|
//mem_log("time %lu request id:%d executing, name:%s remaining slack %lu\n", current_rs,
|
|
|
|
|
// sandbox->id, sandbox->module->name, sandbox->remaining_slack);
|
|
|
|
|
assert(next_module_idx);
|
|
|
|
|
assert(next_module);
|
|
|
|
|
for (size_t i = 0; i < next_module_idx; i++)
|
|
|
|
|
{
|
|
|
|
|
struct module * next_module_node = next_module[i];
|
|
|
|
|
assert(next_module_node);
|
|
|
|
|
struct sandbox_request *sandbox_request =
|
|
|
|
|
sandbox_request_allocate(next_module_node, false, sandbox->request_length,
|
|
|
|
|
next_module_node->name, sandbox->client_socket_descriptor,
|
|
|
|
|
size_t next_module_pre_count_flag = next_module[0]->pre_module_count;
|
|
|
|
|
assert(next_module_pre_count_flag);
|
|
|
|
|
if (next_module_idx > 1 || (next_module_idx == 1 && next_module_pre_count_flag == 1))
|
|
|
|
|
{
|
|
|
|
|
/* Generate a new request, copy the current sandbox's output to the next request's buffer, and put it to the global queue */
|
|
|
|
|
ssize_t output_length = sandbox->request_response_data_length - sandbox->request_length;
|
|
|
|
|
char * pre_func_output = (char *)malloc(output_length);
|
|
|
|
|
if (!pre_func_output) {
|
|
|
|
|
fprintf(stderr, "Failed to allocate memory for the previous output: %s\n", strerror(errno));
|
|
|
|
|
goto err;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
memcpy(pre_func_output, sandbox->request_response_data + sandbox->request_length, output_length);
|
|
|
|
|
uint64_t enqueue_timestamp = __getcycles();
|
|
|
|
|
//uint64_t current_rs = enqueue_timestamp - system_start_timestamp;
|
|
|
|
|
//mem_log("time %lu request id:%d executing, name:%s remaining slack %lu\n", current_rs,
|
|
|
|
|
// sandbox->id, sandbox->module->name, sandbox->remaining_slack);
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < next_module_idx; i++)
|
|
|
|
|
{
|
|
|
|
|
struct module * next_module_node = next_module[i];
|
|
|
|
|
assert(next_module_node);
|
|
|
|
|
struct sandbox_request *sandbox_request =
|
|
|
|
|
sandbox_request_allocate(next_module_node, false, sandbox->request_length,
|
|
|
|
|
next_module_node->name, sandbox->client_socket_descriptor,
|
|
|
|
|
(const struct sockaddr *)&sandbox->client_address,
|
|
|
|
|
sandbox->request_arrival_timestamp, enqueue_timestamp,
|
|
|
|
|
sandbox->remaining_slack, true, pre_func_output, output_length);
|
|
|
|
|
/* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate()
|
|
|
|
|
* will busy-wait to generate an unique id, should we optimize it here?
|
|
|
|
|
*/
|
|
|
|
|
sandbox_request->id = sandbox->id;
|
|
|
|
|
sandbox->remaining_slack, true, pre_func_output, output_length);
|
|
|
|
|
/* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate()
|
|
|
|
|
* will busy-wait to generate an unique id, should we optimize it here?
|
|
|
|
|
*/
|
|
|
|
|
sandbox_request->id = sandbox->id;
|
|
|
|
|
#ifdef OPT_AVOID_GLOBAL_QUEUE
|
|
|
|
|
/* TODO: The running time of the current sandbox contains the next sandbox's initialization time, does it matter? */
|
|
|
|
|
if (sandbox->absolute_deadline == sandbox_request->absolute_deadline) {
|
|
|
|
|
/* Put the next sandbox to the local run queue to reduce the overhead of the global queue */
|
|
|
|
|
struct sandbox *next_sandbox = sandbox_allocate(sandbox_request);
|
|
|
|
|
if (!next_sandbox) {
|
|
|
|
|
free(sandbox_request);
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
/* TODO: The running time of the current sandbox contains the next sandbox's initialization time, does it matter? */
|
|
|
|
|
if (sandbox->absolute_deadline == sandbox_request->absolute_deadline) {
|
|
|
|
|
/* Put the next sandbox to the local run queue to reduce the overhead of the global queue */
|
|
|
|
|
struct sandbox *next_sandbox = sandbox_allocate(sandbox_request);
|
|
|
|
|
if (!next_sandbox) {
|
|
|
|
|
free(sandbox_request);
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(next_sandbox->state == SANDBOX_INITIALIZED);
|
|
|
|
|
sandbox_set_as_runnable(next_sandbox, SANDBOX_INITIALIZED);
|
|
|
|
|
} else {
|
|
|
|
|
/* Add to the Global Sandbox Request Scheduler */
|
|
|
|
|
global_request_scheduler_add(sandbox_request);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/* Add to the Global Sandbox Request Scheduler */
|
|
|
|
|
global_request_scheduler_add(sandbox_request);
|
|
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
/* Add to the Global Sandbox Request Scheduler */
|
|
|
|
|
global_request_scheduler_add(sandbox_request);
|
|
|
|
|
/* Add to the Global Sandbox Request Scheduler */
|
|
|
|
|
global_request_scheduler_add(sandbox_request);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
/* Remove the client fd from epoll if it is the first sandbox in the chain */
|
|
|
|
|
if (sandbox->request_from_outside) {
|
|
|
|
|
sandbox_remove_from_epoll(sandbox);
|
|
|
|
|
}
|
|
|
|
|
sandbox_set_as_returned(sandbox, SANDBOX_RUNNING);
|
|
|
|
|
/* Remove the client fd from epoll if it is the first sandbox in the chain */
|
|
|
|
|
if (sandbox->request_from_outside) {
|
|
|
|
|
sandbox_remove_from_epoll(sandbox);
|
|
|
|
|
}
|
|
|
|
|
sandbox_set_as_returned(sandbox, SANDBOX_RUNNING);
|
|
|
|
|
}else if (next_module_idx == 1 && next_module_pre_count_flag > 1)
|
|
|
|
|
{
|
|
|
|
|
/*Before each id is put into the hash table, the key needs to add a "module handle"*/
|
|
|
|
|
struct module * next_module_node = next_module[0];
|
|
|
|
|
assert(next_module_node);
|
|
|
|
|
char *cur_request_id = NULL;
|
|
|
|
|
int key_len = snprintf(NULL, 0, "%s%lu", next_module_node->name, sandbox->id) + 1;
|
|
|
|
|
cur_request_id = (char *)malloc(key_len);
|
|
|
|
|
assert(cur_request_id);
|
|
|
|
|
snprintf(cur_request_id, key_len, "%s%lu", next_module_node->name, sandbox->id);
|
|
|
|
|
uint32_t ret_value_len;
|
|
|
|
|
|
|
|
|
|
uint64_t *requet_id = (uint64_t *)map_get(sandbox_request_id, cur_request_id, strlen(cur_request_id), &ret_value_len);
|
|
|
|
|
if (!requet_id) {
|
|
|
|
|
//it means that the first sandbox is calculated, and it needs to wait for the return value of other sandboxes.
|
|
|
|
|
ssize_t output_length = sandbox->request_response_data_length - sandbox->request_length;
|
|
|
|
|
char * pre_func_output = (char *)malloc(output_length);
|
|
|
|
|
if (!pre_func_output) {
|
|
|
|
|
fprintf(stderr, "Failed to allocate memory for the previous output: %s\n", strerror(errno));
|
|
|
|
|
goto err;
|
|
|
|
|
};
|
|
|
|
|
memcpy(pre_func_output, sandbox->request_response_data + sandbox->request_length, output_length);
|
|
|
|
|
uint64_t enqueue_timestamp = __getcycles();
|
|
|
|
|
//uint64_t current_rs = enqueue_timestamp - system_start_timestamp;
|
|
|
|
|
//mem_log("time %lu request id:%d executing, name:%s remaining slack %lu\n", current_rs,
|
|
|
|
|
// sandbox->id, sandbox->module->name, sandbox->remaining_slack);
|
|
|
|
|
|
|
|
|
|
struct sandbox_request *sandbox_request =
|
|
|
|
|
sandbox_request_allocate(next_module_node, false, sandbox->request_length,
|
|
|
|
|
next_module_node->name, sandbox->client_socket_descriptor,
|
|
|
|
|
(const struct sockaddr *)&sandbox->client_address,
|
|
|
|
|
sandbox->request_arrival_timestamp, enqueue_timestamp,
|
|
|
|
|
sandbox->remaining_slack, true, pre_func_output, output_length);
|
|
|
|
|
/* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate()
|
|
|
|
|
* will busy-wait to generate an unique id, should we optimize it here?
|
|
|
|
|
*/
|
|
|
|
|
sandbox_request->id = sandbox->id;
|
|
|
|
|
|
|
|
|
|
uint32_t module_pre_count = next_module[0]->pre_module_count;
|
|
|
|
|
module_pre_count--;
|
|
|
|
|
assert(module_pre_count);
|
|
|
|
|
map_set(sandbox_request_id, cur_request_id, strlen(cur_request_id), &module_pre_count, sizeof(uint32_t), true);
|
|
|
|
|
map_set(sandbox_req_map, cur_request_id, strlen(cur_request_id), sandbox_request, sizeof(struct sandbox_request *), false);
|
|
|
|
|
}else
|
|
|
|
|
{
|
|
|
|
|
uint32_t rest_pre_count =*requet_id;
|
|
|
|
|
assert(rest_pre_count >= 1);
|
|
|
|
|
|
|
|
|
|
struct sandbox_request *sandbox_request = map_get(sandbox_req_map, cur_request_id, strlen(cur_request_id), &ret_value_len);
|
|
|
|
|
assert(sandbox_request);
|
|
|
|
|
|
|
|
|
|
// Copy data into pre_func_output
|
|
|
|
|
ssize_t output_length = sandbox->request_response_data_length - sandbox->request_length;
|
|
|
|
|
char *pre_func_output = (char *)malloc(output_length);
|
|
|
|
|
if (!pre_func_output) {
|
|
|
|
|
fprintf(stderr, "Failed to allocate memory for the previous output: %s\n", strerror(errno));
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
memcpy(pre_func_output, sandbox->request_response_data + sandbox->request_length, output_length);
|
|
|
|
|
|
|
|
|
|
uint64_t enqueue_timestamp = __getcycles();
|
|
|
|
|
|
|
|
|
|
const char *previous_output = sandbox_request->previous_function_output ? sandbox_request->previous_function_output : "";
|
|
|
|
|
int new_output_length = strlen(previous_output) + output_length + 2;
|
|
|
|
|
char *new_output = (char *)malloc(new_output_length);
|
|
|
|
|
if (!new_output) {
|
|
|
|
|
fprintf(stderr, "Failed to allocate memory for the new output: %s\n", strerror(errno));
|
|
|
|
|
free(pre_func_output);
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
snprintf(new_output, new_output_length, "%s+%s", previous_output, pre_func_output);
|
|
|
|
|
free(sandbox_request->previous_function_output);
|
|
|
|
|
sandbox_request->previous_function_output = new_output;
|
|
|
|
|
free(pre_func_output);
|
|
|
|
|
sandbox_request->output_length +=output_length;
|
|
|
|
|
|
|
|
|
|
rest_pre_count --;
|
|
|
|
|
if (rest_pre_count != 0)
|
|
|
|
|
{
|
|
|
|
|
map_upsert(sandbox_request_id, cur_request_id, strlen(cur_request_id), &rest_pre_count, sizeof(uint32_t));
|
|
|
|
|
}else
|
|
|
|
|
{
|
|
|
|
|
global_request_scheduler_add(sandbox_request);
|
|
|
|
|
map_delete(sandbox_req_map, cur_request_id, strlen(cur_request_id));
|
|
|
|
|
map_delete(sandbox_request_id, cur_request_id, strlen(cur_request_id));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (sandbox->request_from_outside) {
|
|
|
|
|
sandbox_remove_from_epoll(sandbox);
|
|
|
|
|
}
|
|
|
|
|
sandbox_set_as_returned(sandbox, SANDBOX_RUNNING);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
error_message = "the strcuture of DAG is not supported\n";
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/* Retrieve the result, construct the HTTP response, and send to client */
|
|
|
|
|
if (sandbox_send_response(sandbox) < 0) {
|
|
|
|
|