添加LLF调度 修改SRSF调度细节

sledge_graph
hwwang 3 weeks ago
parent 87ba8af8a0
commit 87672b74c4

3
.gitignore vendored

@ -58,6 +58,9 @@ runtime/tests/**/*.txt
runtime/tests/**/*.xlsx runtime/tests/**/*.xlsx
runtime/tests/test_data runtime/tests/test_data
runtime/tests/*.log runtime/tests/*.log
data/*.txt
data/*.log
data/*.json
# Swap Files # Swap Files
*.swp *.swp

@ -0,0 +1,30 @@
import sys
def calculate_average(input_file, column_index):
total = 0
count = 0
with open(input_file, 'r') as f:
for line in f:
columns = line.strip().split(',')
if len(columns) > column_index:
try:
value = float(columns[column_index])
total += value
count += 1
except ValueError:
print(f"error value: {columns[column_index]}")
if count > 0:
average = total / count
print(f"list {column_index + 1} average: {average}")
else:
print("no value")
if __name__ == "__main__":
if len(sys.argv) != 3:
print(" python calculate_average.py input_file column_index")
else:
input_file = sys.argv[1]
column_index = int(sys.argv[2]) - 1
calculate_average(input_file, column_index)

@ -0,0 +1,24 @@
# split_logs.py
def split_logs(input_file):
modules = {
"resize1": [],
"png2bmp1": [],
"lpd_wasm1": [],
"cifar10_1": [],
"work1": []
}
with open(input_file, 'r') as f:
for line in f:
for module in modules.keys():
if module in line:
modules[module].append(line.strip())
break
for module, entries in modules.items():
with open(f"{module}.txt", 'w') as outfile:
outfile.write("\n".join(entries) + "\n")
if __name__ == "__main__":
split_logs("sledge.log")

@ -0,0 +1,20 @@
import sys
def split_columns(input_file):
columns = []
with open(input_file, 'r') as f:
for line in f:
parts = line.strip().split(',')
for i, part in enumerate(parts):
if len(columns) <= i:
columns.append([])
columns[i].append(part)
for i, column in enumerate(columns):
with open(f"{input_file[:-4]}_column_{i + 1}.txt", 'w') as outfile:
outfile.write("\n".join(column) + "\n")
if __name__ == "__main__":
for input_file in sys.argv[1:]:
split_columns(input_file)

@ -1,46 +0,0 @@
Summary:
Total: 30.0061 secs
Slowest: 0.1721 secs
Fastest: 0.0035 secs
Average: 0.0088 secs
Requests/sec: 227.3541
Total data: 327456 bytes
Size/request: 48 bytes
Response time histogram:
0.004 [1] |
0.020 [6695] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
0.037 [114] |■
0.054 [4] |
0.071 [0] |
0.088 [5] |
0.105 [1] |
0.122 [1] |
0.138 [0] |
0.155 [0] |
0.172 [1] |
Latency distribution:
10% in 0.0059 secs
25% in 0.0068 secs
50% in 0.0079 secs
75% in 0.0095 secs
90% in 0.0119 secs
95% in 0.0143 secs
99% in 0.0237 secs
Details (average, fastest, slowest):
DNS+dialup: 0.0004 secs, 0.0035 secs, 0.1721 secs
DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs
req write: 0.0001 secs, 0.0000 secs, 0.0067 secs
resp wait: 0.0081 secs, 0.0031 secs, 0.1715 secs
resp read: 0.0002 secs, 0.0000 secs, 0.0049 secs
Status code distribution:
[200] 6822 responses

@ -44,7 +44,7 @@ extern uint32_t runtime_worker_threads_count;
extern int runtime_worker_threads_argument[RUNTIME_WORKER_THREAD_CORE_COUNT]; extern int runtime_worker_threads_argument[RUNTIME_WORKER_THREAD_CORE_COUNT];
extern uint64_t runtime_worker_threads_deadline[RUNTIME_WORKER_THREAD_CORE_COUNT]; extern uint64_t runtime_worker_threads_deadline[RUNTIME_WORKER_THREAD_CORE_COUNT];
extern uint64_t runtime_worker_threads_remaining_slack[RUNTIME_WORKER_THREAD_CORE_COUNT]; extern uint64_t runtime_worker_threads_remaining_slack[RUNTIME_WORKER_THREAD_CORE_COUNT];
extern uint64_t runtime_worker_threads_laxity[RUNTIME_WORKER_THREAD_CORE_COUNT];
extern void runtime_initialize(void); extern void runtime_initialize(void);
extern void runtime_set_pthread_prio(pthread_t thread, unsigned int nice); extern void runtime_set_pthread_prio(pthread_t thread, unsigned int nice);
extern void runtime_set_resource_limits_to_max(void); extern void runtime_set_resource_limits_to_max(void);

@ -124,6 +124,15 @@ sandbox_get_mdl_priority(void *element)
return remaining_slack_mdl; return remaining_slack_mdl;
}; };
static inline uint64_t
sandbox_get_llf_priority(void *element)
{
struct sandbox *sandbox = (struct sandbox *)element;
uint64_t now = __getcycles();
uint64_t Laxity_llf = sandbox->laxity - (now - sandbox->last_update_timestamp);
return Laxity_llf;
};
/** /**
* Maps a sandbox fd to an underlying host fd * Maps a sandbox fd to an underlying host fd
* Returns error condition if the file_descriptor to set does not contain sandbox preopen magic * Returns error condition if the file_descriptor to set does not contain sandbox preopen magic

@ -32,6 +32,7 @@ struct sandbox_request {
uint64_t absolute_deadline; /* cycles */ uint64_t absolute_deadline; /* cycles */
uint64_t last_update_timestamp; /* cycles */ uint64_t last_update_timestamp; /* cycles */
uint64_t remaining_slack; /* cycles */ uint64_t remaining_slack; /* cycles */
uint64_t laxity; /* cycles */
struct sandbox_pre_functions_output *pre_functions_output; struct sandbox_pre_functions_output *pre_functions_output;
pthread_spinlock_t lock; pthread_spinlock_t lock;
char * previous_function_output; char * previous_function_output;
@ -82,7 +83,7 @@ sandbox_request_log_allocation(struct sandbox_request *sandbox_request)
static inline struct sandbox_request * static inline struct sandbox_request *
sandbox_request_allocate(struct module *module, bool request_from_outside, ssize_t request_length, sandbox_request_allocate(struct module *module, bool request_from_outside, ssize_t request_length,
char *arguments, int socket_descriptor, const struct sockaddr *socket_address, char *arguments, int socket_descriptor, const struct sockaddr *socket_address,
uint64_t request_arrival_timestamp, uint64_t enqueue_timestamp, uint64_t remaining_slack, uint64_t request_arrival_timestamp, uint64_t enqueue_timestamp, uint64_t remaining_slack, uint64_t laxity,
uint64_t admissions_estimate, char *previous_function_output, ssize_t output_length) uint64_t admissions_estimate, char *previous_function_output, ssize_t output_length)
{ {
struct sandbox_request *sandbox_request = (struct sandbox_request *)malloc(sizeof(struct sandbox_request)); struct sandbox_request *sandbox_request = (struct sandbox_request *)malloc(sizeof(struct sandbox_request));
@ -104,6 +105,7 @@ sandbox_request_allocate(struct module *module, bool request_from_outside, ssize
sandbox_request->previous_request_length = request_length; sandbox_request->previous_request_length = request_length;
sandbox_request->last_update_timestamp = enqueue_timestamp; sandbox_request->last_update_timestamp = enqueue_timestamp;
sandbox_request->remaining_slack = remaining_slack; sandbox_request->remaining_slack = remaining_slack;
sandbox_request->laxity = laxity;
/*Avoid pointer suspension*/ /*Avoid pointer suspension*/
sandbox_request->pre_functions_output = NULL; sandbox_request->pre_functions_output = NULL;
@ -229,4 +231,3 @@ concatenate_outputs(struct sandbox_request *request) {
request->output_length = total_length; request->output_length = total_length;
request->previous_function_output = concatenated_output; request->previous_function_output = concatenated_output;
} }

@ -53,6 +53,7 @@ sandbox_set_as_initialized(struct sandbox *sandbox, struct sandbox_request *sand
/* Copy the socket descriptor, address, and arguments of the client invocation */ /* Copy the socket descriptor, address, and arguments of the client invocation */
sandbox->absolute_deadline = sandbox_request->absolute_deadline; sandbox->absolute_deadline = sandbox_request->absolute_deadline;
sandbox->remaining_slack = sandbox_request->remaining_slack; sandbox->remaining_slack = sandbox_request->remaining_slack;
sandbox->laxity = sandbox_request->laxity;
sandbox->last_update_timestamp = sandbox_request->last_update_timestamp; sandbox->last_update_timestamp = sandbox_request->last_update_timestamp;
sandbox->arguments = (void *)sandbox_request->arguments; sandbox->arguments = (void *)sandbox_request->arguments;
sandbox->client_socket_descriptor = sandbox_request->socket_descriptor; sandbox->client_socket_descriptor = sandbox_request->socket_descriptor;

@ -25,11 +25,13 @@ sandbox_set_as_running(struct sandbox *sandbox, sandbox_state_t last_state)
//uint64_t last = sandbox->last_update_timestamp; //uint64_t last = sandbox->last_update_timestamp;
//uint64_t last_rs = sandbox->remaining_slack; //uint64_t last_rs = sandbox->remaining_slack;
sandbox->remaining_slack -= (now - sandbox->last_update_timestamp); sandbox->remaining_slack -= (now - sandbox->last_update_timestamp);
sandbox->laxity -= (now - sandbox->last_update_timestamp);
sandbox->last_update_timestamp = now; sandbox->last_update_timestamp = now;
sandbox->runnable_duration += duration_of_last_state; sandbox->runnable_duration += duration_of_last_state;
current_sandbox_set(sandbox); current_sandbox_set(sandbox);
runtime_worker_threads_deadline[worker_thread_idx] = sandbox->absolute_deadline; runtime_worker_threads_deadline[worker_thread_idx] = sandbox->absolute_deadline;
runtime_worker_threads_remaining_slack[worker_thread_idx] = sandbox->remaining_slack; runtime_worker_threads_remaining_slack[worker_thread_idx] = sandbox->remaining_slack;
runtime_worker_threads_laxity[worker_thread_idx] = sandbox->laxity;
//mem_log("time %lu sandbox starts running, request id:%d name %s obj=%p remaining slack %lu, last_rs %lu now %lu last %lu \n", start_execution, //mem_log("time %lu sandbox starts running, request id:%d name %s obj=%p remaining slack %lu, last_rs %lu now %lu last %lu \n", start_execution,
// sandbox->id, sandbox->module->name, sandbox, sandbox->remaining_slack, last_rs, now, last); // sandbox->id, sandbox->module->name, sandbox, sandbox->remaining_slack, last_rs, now, last);
/* Does not handle context switch because the caller knows if we need to use fast or slow switched */ /* Does not handle context switch because the caller knows if we need to use fast or slow switched */
@ -49,3 +51,5 @@ sandbox_set_as_running(struct sandbox *sandbox, sandbox_state_t last_state)
sandbox->last_state_change_timestamp = now; sandbox->last_state_change_timestamp = now;
sandbox->state = SANDBOX_RUNNING; sandbox->state = SANDBOX_RUNNING;
} }

@ -54,8 +54,9 @@ struct sandbox {
uint64_t response_timestamp; /* Timestamp when response is sent */ uint64_t response_timestamp; /* Timestamp when response is sent */
uint64_t completion_timestamp; /* Timestamp when sandbox runs to completion */ uint64_t completion_timestamp; /* Timestamp when sandbox runs to completion */
uint64_t last_state_change_timestamp; /* Used for bookkeeping of actual execution time */ uint64_t last_state_change_timestamp; /* Used for bookkeeping of actual execution time */
uint64_t last_update_timestamp; /* Used for bookkeeping timestamp for SRSF */ uint64_t last_update_timestamp; /* Used for bookkeeping timestamp for SRSF && LLF */
uint64_t remaining_slack; /* Cycles */ uint64_t remaining_slack; /* Cycles */
uint64_t laxity; /* Cycles */
#ifdef LOG_SANDBOX_MEMORY_PROFILE #ifdef LOG_SANDBOX_MEMORY_PROFILE
uint32_t page_allocation_timestamps[SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT]; uint32_t page_allocation_timestamps[SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT];
size_t page_allocation_timestamps_size; size_t page_allocation_timestamps_size;

@ -29,7 +29,8 @@ enum SCHEDULER
SCHEDULER_FIFO = 0, SCHEDULER_FIFO = 0,
SCHEDULER_EDF = 1, SCHEDULER_EDF = 1,
SCHEDULER_SRSF = 2, SCHEDULER_SRSF = 2,
SCHEDULER_MDL = 3 SCHEDULER_MDL = 3,
SCHEDULER_LLF = 4,
}; };
extern enum SCHEDULER scheduler; extern enum SCHEDULER scheduler;
@ -154,6 +155,45 @@ err_allocate:
goto done; goto done;
} }
static inline struct sandbox *
scheduler_LLF_get_next()
{
/* Get the deadline of the sandbox at the head of the local request queue */
struct sandbox * local = local_runqueue_get_next();
uint64_t local_Laxity = local == NULL ? UINT64_MAX : local->remaining_slack;
struct sandbox_request *request = NULL;
uint64_t global_local_Laxity = global_request_scheduler_peek();
/* Try to pull and allocate from the global queue if earlier
* This will be placed at the head of the local runqueue */
if (global_local_Laxity < local_Laxity && (local_workload_count <=2 || local_runqueue_count == 0)) {
if (global_request_scheduler_remove_if_earlier(&request, local_Laxity) == 0) {
//uint64_t pop_time = __getcycles() - system_start_timestamp;
//mem_log("time %lu remove from GQ, request id:%d name %s remaining slack %lu\n", pop_time,
// request->id, request->module->name, request->remaining_slack);
assert(request != NULL);
struct sandbox *global = sandbox_allocate(request);
if (!global) goto err_allocate;
assert(global->state == SANDBOX_INITIALIZED);
sandbox_set_as_runnable(global, SANDBOX_INITIALIZED);
}
}
/* Return what is at the head of the local runqueue or NULL if empty */
done:
return local_runqueue_get_next();
err_allocate:
client_socket_send(request->socket_descriptor, 503);
client_socket_close(request->socket_descriptor, &request->socket_address);
free(request);
request = NULL;
goto done;
}
static inline struct sandbox * static inline struct sandbox *
scheduler_fifo_get_next() scheduler_fifo_get_next()
{ {
@ -200,6 +240,8 @@ scheduler_get_next()
return scheduler_fifo_get_next(); return scheduler_fifo_get_next();
case SCHEDULER_MDL: case SCHEDULER_MDL:
return scheduler_MDL_get_next(); return scheduler_MDL_get_next();
case SCHEDULER_LLF:
return scheduler_LLF_get_next();
default: default:
panic("Unimplemented\n"); panic("Unimplemented\n");
} }
@ -218,6 +260,9 @@ scheduler_initialize()
case SCHEDULER_MDL: case SCHEDULER_MDL:
global_request_scheduler_minheap_initialize(SCHEDULER_MDL); global_request_scheduler_minheap_initialize(SCHEDULER_MDL);
break; break;
case SCHEDULER_LLF:
global_request_scheduler_minheap_initialize(SCHEDULER_LLF);
break;
case SCHEDULER_FIFO: case SCHEDULER_FIFO:
global_request_scheduler_deque_initialize(); global_request_scheduler_deque_initialize();
break; break;
@ -238,6 +283,10 @@ scheduler_runqueue_initialize()
break; break;
case SCHEDULER_MDL: case SCHEDULER_MDL:
local_runqueue_minheap_initialize(SCHEDULER_MDL); local_runqueue_minheap_initialize(SCHEDULER_MDL);
break;
case SCHEDULER_LLF:
local_runqueue_minheap_initialize(SCHEDULER_LLF);
break;
case SCHEDULER_FIFO: case SCHEDULER_FIFO:
local_runqueue_list_initialize(); local_runqueue_list_initialize();
break; break;
@ -263,8 +312,8 @@ scheduler_preempt(ucontext_t *user_context)
struct sandbox *current = current_sandbox_get(); struct sandbox *current = current_sandbox_get();
assert(current != NULL); assert(current != NULL);
assert(current->state == SANDBOX_RUNNING); assert(current->state == SANDBOX_RUNNING);
uint64_t RR_least_time = 5000 * runtime_processor_speed_MHz;
if (current-> remaining_slack <= 5000 * runtime_processor_speed_MHz) { if (current-> remaining_slack <= RR_least_time || current->laxity <= RR_least_time) {
return; return;
} }
/* This is for better state-change bookkeeping */ /* This is for better state-change bookkeeping */
@ -340,10 +389,12 @@ scheduler_print(enum SCHEDULER variant)
return "FIFO"; return "FIFO";
case SCHEDULER_EDF: case SCHEDULER_EDF:
return "EDF"; return "EDF";
case SCHEDULER_MDL:
return "MDL";
case SCHEDULER_SRSF: case SCHEDULER_SRSF:
return "SRSF"; return "SRSF";
case SCHEDULER_MDL:
return "MDL";
case SCHEDULER_LLF:
return "LLF";
} }
} }

@ -58,6 +58,13 @@ current_sandbox_disable_preemption(struct sandbox *sandbox)
} }
} }
static inline void
current_sandbox_get_newlaxity(struct sandbox *sandbox, uint64_t now)
{
assert(sandbox);
sandbox->remaining_slack -= (now - sandbox->last_update_timestamp);
}
/** /**
* Sandbox execution logic * Sandbox execution logic
* Handles setup, request parsing, WebAssembly initialization, function execution, response building and * Handles setup, request parsing, WebAssembly initialization, function execution, response building and
@ -170,13 +177,14 @@ current_sandbox_start(void)
goto err; goto err;
} }
memcpy(individual_pre_func_output, pre_func_output, output_length); memcpy(individual_pre_func_output, pre_func_output, output_length);
uint64_t enqueue_timestamp = __getcycles(); uint64_t now = __getcycles();
current_sandbox_get_newlaxity(sandbox, now);
struct sandbox_request *sandbox_request = struct sandbox_request *sandbox_request =
sandbox_request_allocate(next_module_node, false, sandbox->request_length, sandbox_request_allocate(next_module_node, false, sandbox->request_length,
next_module_node->name, sandbox->client_socket_descriptor, next_module_node->name, sandbox->client_socket_descriptor,
(const struct sockaddr *)&sandbox->client_address, (const struct sockaddr *)&sandbox->client_address,
sandbox->request_arrival_timestamp, enqueue_timestamp, sandbox->request_arrival_timestamp, now,
sandbox->remaining_slack, true, NULL, 0); sandbox->remaining_slack, sandbox->laxity, true, NULL, 0);
/* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate() /* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate()
* will busy-wait to generate an unique id, should we optimize it here? * will busy-wait to generate an unique id, should we optimize it here?
*/ */
@ -242,15 +250,16 @@ current_sandbox_start(void)
uint64_t *request_id = (uint64_t *)map_get(sandbox_request_id, cur_request_id, strlen(cur_request_id), &ret_value_len); uint64_t *request_id = (uint64_t *)map_get(sandbox_request_id, cur_request_id, strlen(cur_request_id), &ret_value_len);
bool mapflag = false; bool mapflag = false;
if (!request_id) { if (!request_id) {
uint64_t enqueue_timestamp = __getcycles(); uint64_t now = __getcycles();
current_sandbox_get_newlaxity(sandbox, now);
//mem_log("time %lu request id:%d executing, name:%s remaining slack %lu\n", current_rs, //mem_log("time %lu request id:%d executing, name:%s remaining slack %lu\n", current_rs,
// sandbox->id, sandbox->module->name, sandbox->remaining_slack); // sandbox->id, sandbox->module->name, sandbox->remaining_slack);
struct sandbox_request *sandbox_request = struct sandbox_request *sandbox_request =
sandbox_request_allocate(next_module_node, false, sandbox->request_length, sandbox_request_allocate(next_module_node, false, sandbox->request_length,
next_module_node->name, sandbox->client_socket_descriptor, next_module_node->name, sandbox->client_socket_descriptor,
(const struct sockaddr *)&sandbox->client_address, (const struct sockaddr *)&sandbox->client_address,
sandbox->request_arrival_timestamp, enqueue_timestamp, sandbox->request_arrival_timestamp, now,
sandbox->remaining_slack, true, NULL, 0); sandbox->remaining_slack,sandbox->laxity, true, NULL, 0);
/* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate() /* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate()
* will busy-wait to generate an unique id, should we optimize it here? * will busy-wait to generate an unique id, should we optimize it here?
*/ */

@ -89,6 +89,16 @@ sandbox_request_get_priority_mdl_fn(void *element)
return remaining_slack_mdl; return remaining_slack_mdl;
}; };
uint64_t
sandbox_request_get_priority_llf_fn(void *element)
{
struct sandbox_request *sandbox_request = (struct sandbox_request *)element;
uint64_t now = __getcycles();
uint64_t Laxity_llf = sandbox_request->laxity - (now - sandbox_request->last_update_timestamp);
return Laxity_llf;
};
/** /**
* Initializes the variant and registers against the polymorphic interface * Initializes the variant and registers against the polymorphic interface
*/ */
@ -101,6 +111,8 @@ global_request_scheduler_minheap_initialize(enum SCHEDULER scheduler)
global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_srsf_fn); global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_srsf_fn);
} else if (scheduler == SCHEDULER_MDL) { } else if (scheduler == SCHEDULER_MDL) {
global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_mdl_fn); global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_mdl_fn);
} else if (scheduler == SCHEDULER_LLF) {
global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_llf_fn);
} }
struct global_request_scheduler_config config = { struct global_request_scheduler_config config = {

@ -186,10 +186,10 @@ listener_thread_main(void *dummy)
while (rear != front) while (rear != front)
{ {
struct module *current_module = queue[front++]; struct module *current_module = queue[front++];
if (scheduler == SCHEDULER_SRSF || scheduler == SCHEDULER_EDF) if (scheduler == SCHEDULER_SRSF || scheduler == SCHEDULER_EDF || scheduler == SCHEDULER_LLF)
{ {
estimated_execution_time += admission_info_get_percentile(&current_module->admissions_info); estimated_execution_time += admission_info_get_percentile(&current_module->admissions_info);
}else if (scheduler == SCHEDULER_MDL){ }else if (scheduler == SCHEDULER_MDL ){
/* Set a baseline model, and if the predicted parameters of the model are more than 1.2 times the parameters of the baseline, then select the baseline parameters */ /* Set a baseline model, and if the predicted parameters of the model are more than 1.2 times the parameters of the baseline, then select the baseline parameters */
uint64_t estimated_execution_base_percentile = 0, estimated_execution_model = 0; uint64_t estimated_execution_base_percentile = 0, estimated_execution_model = 0;
estimated_execution_base_percentile = admission_info_get_percentile(&current_module->admissions_info); estimated_execution_base_percentile = admission_info_get_percentile(&current_module->admissions_info);
@ -227,7 +227,7 @@ listener_thread_main(void *dummy)
struct sandbox_request *sandbox_request = struct sandbox_request *sandbox_request =
sandbox_request_allocate(module, true, 0, module->name, client_socket, sandbox_request_allocate(module, true, 0, module->name, client_socket,
(const struct sockaddr *)&client_address, (const struct sockaddr *)&client_address,
request_arrival_timestamp, request_arrival_timestamp,remaining_slack, request_arrival_timestamp, request_arrival_timestamp,remaining_slack, remaining_slack,
work_admitted, NULL, 0); work_admitted, NULL, 0);
/* Add to the Global Sandbox Request Scheduler */ /* Add to the Global Sandbox Request Scheduler */

@ -84,7 +84,9 @@ local_runqueue_minheap_initialize(enum SCHEDULER scheduler)
local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_srsf_priority); local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_srsf_priority);
} else if (scheduler == SCHEDULER_MDL) { } else if (scheduler == SCHEDULER_MDL) {
local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_mdl_priority); local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_mdl_priority);
} else { } else if (scheduler == SCHEDULER_LLF) {
local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_llf_priority);
} else{
panic("Invalid scheduler type %d\n", scheduler); panic("Invalid scheduler type %d\n", scheduler);
} }

@ -188,6 +188,9 @@ runtime_configure()
scheduler = SCHEDULER_SRSF; scheduler = SCHEDULER_SRSF;
} else if (strcmp(scheduler_policy, "MDL") == 0) { } else if (strcmp(scheduler_policy, "MDL") == 0) {
scheduler = SCHEDULER_MDL; scheduler = SCHEDULER_MDL;
} else if (strcmp(scheduler_policy, "LLF") == 0)
{
scheduler = SCHEDULER_LLF;
} else { } else {
panic("Invalid scheduler policy: %s. Must be {EDF|FIFO}\n", scheduler_policy); panic("Invalid scheduler policy: %s. Must be {EDF|FIFO}\n", scheduler_policy);
} }
@ -199,7 +202,7 @@ runtime_configure()
if (strcmp(sigalrm_policy, "BROADCAST") == 0) { if (strcmp(sigalrm_policy, "BROADCAST") == 0) {
runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_BROADCAST; runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_BROADCAST;
} else if (strcmp(sigalrm_policy, "TRIAGED") == 0) { } else if (strcmp(sigalrm_policy, "TRIAGED") == 0) {
if (unlikely(scheduler != SCHEDULER_EDF && scheduler != SCHEDULER_SRSF && scheduler != SCHEDULER_MDL)) panic("triaged sigalrm handlers are only valid with EDF and SRSF\n"); if (unlikely(scheduler != SCHEDULER_EDF && scheduler != SCHEDULER_SRSF && scheduler != SCHEDULER_MDL && scheduler != SCHEDULER_LLF)) panic("triaged sigalrm handlers are only valid with EDF and SRSF\n");
runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_TRIAGED; runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_TRIAGED;
} else { } else {
panic("Invalid sigalrm policy: %s. Must be {BROADCAST|TRIAGED}\n", sigalrm_policy); panic("Invalid sigalrm policy: %s. Must be {BROADCAST|TRIAGED}\n", sigalrm_policy);

@ -33,6 +33,7 @@ int runtime_worker_threads_argument[RUNTIME_WORKER_THREAD_CORE_COUNT] = {
/* The active deadline of the sandbox running on each worker thread */ /* The active deadline of the sandbox running on each worker thread */
uint64_t runtime_worker_threads_deadline[RUNTIME_WORKER_THREAD_CORE_COUNT] = { UINT64_MAX }; uint64_t runtime_worker_threads_deadline[RUNTIME_WORKER_THREAD_CORE_COUNT] = { UINT64_MAX };
uint64_t runtime_worker_threads_remaining_slack[RUNTIME_WORKER_THREAD_CORE_COUNT] = { UINT64_MAX }; uint64_t runtime_worker_threads_remaining_slack[RUNTIME_WORKER_THREAD_CORE_COUNT] = { UINT64_MAX };
uint64_t runtime_worker_threads_laxity[RUNTIME_WORKER_THREAD_CORE_COUNT] = { UINT64_MAX };
/****************************************** /******************************************
* Shared Process / Listener Thread Logic * * Shared Process / Listener Thread Logic *

@ -99,6 +99,11 @@ sigalrm_propagate_workers(siginfo_t *signal_info)
uint64_t global_slack = global_request_scheduler_peek(); uint64_t global_slack = global_request_scheduler_peek();
if (global_slack < local_remaining_slack) pthread_kill(runtime_worker_threads[i], SIGALRM); if (global_slack < local_remaining_slack) pthread_kill(runtime_worker_threads[i], SIGALRM);
continue; continue;
} else if (scheduler == SCHEDULER_LLF) {
uint64_t local_Laxity = runtime_worker_threads_laxity[i];
uint64_t global_Laxity = global_request_scheduler_peek();
if (global_Laxity < local_Laxity) pthread_kill(runtime_worker_threads[i], SIGALRM);
continue;
} }
} }
case RUNTIME_SIGALRM_HANDLER_BROADCAST: { case RUNTIME_SIGALRM_HANDLER_BROADCAST: {

@ -20,10 +20,10 @@ declare project_path="$(
echo $project_path echo $project_path
path=`pwd` path=`pwd`
#export SLEDGE_DISABLE_PREEMPTION=true #export SLEDGE_DISABLE_PREEMPTION=true
export SLEDGE_CPU_SPEED=3300 #export SLEDGE_CPU_SPEED=3300
#export SLEDGE_SCHEDULER=SRSF export SLEDGE_SCHEDULER=EDF
export SLEDGE_SIGALRM_HANDLER=BROADCAST #export SLEDGE_SIGALRM_HANDLER=BROADCAST
#export SLEDGE_SIGALRM_HANDLER=TRIAGED export SLEDGE_SIGALRM_HANDLER=TRIAGED
#export SLEDGE_NWORKERS=1 #export SLEDGE_NWORKERS=1
export SLEDGE_SCHEDULER=EDF export SLEDGE_SCHEDULER=EDF
export SLEDGE_SANDBOX_PERF_LOG=$path/$output export SLEDGE_SANDBOX_PERF_LOG=$path/$output
@ -37,7 +37,8 @@ cd $project_path/runtime/bin
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/mulitple_linear_chain.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/mulitple_linear_chain.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing3.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing3.json
LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing4.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_fibonacci.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_fibonacci.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_sodresize.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_sodresize.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_sodresize.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_sodresize.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing4.json
LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing_graph.json

@ -21,7 +21,7 @@ echo $project_path
path=`pwd` path=`pwd`
#export SLEDGE_DISABLE_PREEMPTION=true #export SLEDGE_DISABLE_PREEMPTION=true
export SLEDGE_CPU_SPEED=2400 export SLEDGE_CPU_SPEED=2400
export SLEDGE_SCHEDULER=MDL export SLEDGE_SCHEDULER=LLF
export SLEDGE_SIGALRM_HANDLER=TRIAGED export SLEDGE_SIGALRM_HANDLER=TRIAGED
#export SLEDGE_SIGALRM_HANDLER=TRIAGED #export SLEDGE_SIGALRM_HANDLER=TRIAGED
#export SLEDGE_NWORKERS=1 #export SLEDGE_NWORKERS=1

@ -22,11 +22,11 @@ path=`pwd`
#export SLEDGE_DISABLE_PREEMPTION=true #export SLEDGE_DISABLE_PREEMPTION=true
export SLEDGE_CPU_SPEED=2500 export SLEDGE_CPU_SPEED=2500
export SLEDGE_SCHEDULER=FIFO export SLEDGE_SCHEDULER=FIFO
export SLEDGE_SIGALRM_HANDLER=BROADCAST #xport SLEDGE_SIGALRM_HANDLER=BROADCAST
#export SLEDGE_SIGALRM_HANDLER=TRIAGED #export SLEDGE_SIGALRM_HANDLER=TRIAGED
#export SLEDGE_NWORKERS=16 #export SLEDGE_NWORKERS=16
#export SLEDGE_SCHEDULER=EDF #export SLEDGE_SCHEDULER=EDF
export SLEDGE_SANDBOX_PERF_LOG=$path/$output export SLEDGE_SANDBOX_PERF_LOG=$path/$output
echo $SLEDGE_SANDBOX_PERF_LOG echo $SLEDGE_SANDBOX_PERF_LOG
cd $project_path/runtime/bin cd $project_path/runtime/bin
LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_noop1.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_noop1.json

@ -0,0 +1,50 @@
#!/bin/bash
function usage {
echo "Please enter right parameters: current_rps(*5) add_step(*5) duratime"
exit 1
}
if [ $# -ne 3 ]; then
usage
fi
echo "current_rps(*5) add_step(*5) duratime"
#path="/home/njl/sledge/runtime/tests"
path="/home/hai/sledge/sledge/runtime/tests"
current_rps=$1
step=$2
duratime=$3
max_rps=0
max_latency=0
output="hey_test_max_rps.log"
server_log_file="test_rps.log"
loop=1
for loop in {1..5}; do
$path/start-edf.sh $server_log_file >/dev/null 2>&1 &
echo "sledge is running loop $loop"
./test_rps.sh $output $duratime $current_rps 5k.jpg 10000 2>&1 &
pid1=$!
wait $pid1
$path/kill_sledge.sh
latency=$(grep "Requests" $output | awk -F ': ' '{print $2}')
if (( $(echo "$latency < $max_rps" | bc -l) )); then
break
fi
echo "loop_$loop RPS: $latency"
max_rps=$latency
current_rps=$((current_rps + step))
done
echo "Maximum RPS: $max_rps"

@ -16,6 +16,6 @@ port=$5
echo "hey test" echo "hey test"
hey -disable-compression -disable-keepalive -disable-redirects -c 5 -q $rps -z $duration\s -t 0 -m GET -D "$image" "http://10.16.109.192:$port" > $output hey -disable-compression -disable-keepalive -disable-redirects -c 5 -q $rps -z $duration\s -t 0 -m GET -D "$image" "http://127.0.0.1:$port" > $output
#loadtest -c 5 --rps $rps -t $duration --method GET --data @$image "http://10.16.109.192:$port" > $output #loadtest -c 5 --rps $rps -t $duration --method GET --data @$image "http://10.16.109.192:$port" > $output
#hey -disable-compression -disable-keepalive -disable-redirects -c 8 -q 50 -z $duration\s -t 0 -m GET -D "$image" "http://10.10.1.1:$port" > $output #hey -disable-compression -disable-keepalive -disable-redirects -c 8 -q 50 -z $duration\s -t 0 -m GET -D "$image" "http://10.10.1.1:$port" > $output
Loading…
Cancel
Save