edited readme

anubhav
Nadesh Seen 2 years ago
parent 6632d78875
commit 02accd6fd6

28
.gitignore vendored

@ -49,5 +49,33 @@ redis-input.json
flask_test.py
data1/
data2/
data3/
data4/
data5/
test.py
internet.sh
img_to_text.py
iluvatar-faas/
save_hg*
minio
.env
minio_test.py
controlplane/__pycache__/trigger_gateway.cpython-38.pyc
controlplane/.env
controlplane/tests3.py
function_modules/testaction/test.py

Binary file not shown.

@ -1,3 +0,0 @@
AWS_ACCESS_KEY_ID="AKIAYFB773UVZSOAVZN4"
AWS_SECRET_ACCESS_KEY="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"
AWS_REGION="ap-south-1"

@ -0,0 +1 @@
from . import *

@ -1,12 +0,0 @@
#!/bin/bash
# ./register.sh /decode-function /decode decode-action [SAMPLE USE]
function_dir_name=$1
docker_image_name=$2
cd $function_dir_name
chmod -R 777 ./
./buildAndPush.sh $docker_image_name

@ -1,550 +0,0 @@
#!/usr/bin/env python3
import sys
import requests
import uuid
import re
import subprocess
import threading
import queue
import redis
import pickle
import json
import os
import time
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from flask import Flask, request,jsonify,send_file
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
import pymongo
import trigger_gateway
app = Flask(__name__)
action_url_mappings = {} #Store action->url mappings
action_properties_mapping = {} #Stores the action name and its corresponding properties
responses = []
queue = []
list_of_func_ids = []
def hello():
print("Hello")
def preprocess(filename):
with open(filename) as f:
lines = f.readlines()
action_url_list = []
for line in lines:
line = line.replace("\n", "")
line = line.replace("/guest/","")
action_url_list.append(line)
for item in action_url_list:
action_name = item.split(' ')[0]
url = item.split(' ')[1]
action_url_mappings[action_name] = url
def execute_thread(action,redis,url,json):
reply = requests.post(url = url,json=json,verify=False)
list_of_func_ids.append(reply.json()["activation_id"])
redis.set(action+"-output",pickle.dumps(reply.json()))
responses.append(reply.json())
def handle_parallel(queue,redis,action_properties_mapping,parallel_action_list):
thread_list = []
output_list = [] # List to store the output of actions whose outputs are required by downstream operations
for action in parallel_action_list:
action_names = action_properties_mapping[action]["outputs_from"]
next_action = action_properties_mapping[action]["next"]
if(next_action!=""):
if next_action not in queue:
queue.append(next_action)
if(len(action_names)==1): # if only output of one action is required
key = action_names[0]+"-output"
output = pickle.loads(redis.get(key))
action_properties_mapping[action]["arguments"] = output
else:
for item in action_names:
key = item+"-output"
output = pickle.loads(redis.get(key))
output_list.append(output)
action_properties_mapping[action]["arguments"] = output_list
url = action_url_mappings[action]
thread_list.append(threading.Thread(target=execute_thread, args=[action,redis,url,action_properties_mapping[action]["arguments"]]))
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
action_properties_mapping[next_action]["arguments"] = responses
return responses
def create_redis_instance():
r = redis.Redis(host="10.129.28.219", port=6379, db=2)
return r
def get_redis_contents(r):
keys = r.keys()
for key in keys:
value = pickle.loads(r.get(key))
if value is not None:
print(f"{key.decode('utf-8')}: {json.dumps(value, indent=4)}")
def connect_mongo():
myclient = pymongo.MongoClient("mongodb://127.0.0.1/27017")
mydb = myclient["dag_store"]
mycol = mydb["dags"]
return mycol
def get_dag_json(dag_name):
myclient = pymongo.MongoClient("mongodb://127.0.0.1/27017")
mydb = myclient["dag_store"]
mycol = mydb["dags"]
query = {"name":dag_name}
projection = {"_id": 0, "name": 1,"dag":1}
document = mycol.find(query, projection)
data = list(document)
return data
def submit_dag_metadata(dag_metadata):
myclient = pymongo.MongoClient("mongodb://127.0.0.1/27017")
mydb = myclient["dag_store"]
mycol = mydb["dag_metadata"]
try:
cursor = mycol.insert_one(dag_metadata)
# print("OBJECT ID GENERATED",cursor.inserted_id)
data = {"message":"success"}
return json.dumps(data)
except Exception as err:
data = {"message":"failed","reason":err}
return json.dumps(data)
@app.route("/")
def home():
data = {"message": "Hello,welcome to create and manage serverless workflows.","author":"Anubhav Jana"}
return jsonify(data)
@app.route('/view/functions', methods=['GET'])
def list_actions():
list_of_actions = []
stream = os.popen(' wsk -i action list')
actions = stream.read().strip().split(' ')
for action in actions:
if action=='' or action=='private' or action=='blackbox':
continue
else:
list_of_actions.append(action.split('/')[2])
data = {"list of available actions":list_of_actions}
return jsonify(data)
@app.route('/register/trigger/',methods=['POST'])
def register_trigger():
trigger_json = request.json
myclient = pymongo.MongoClient("mongodb://127.0.0.1/27017")
mydb = myclient["trigger_store"]
mycol = mydb["triggers"]
try:
cursor = mycol.insert_one(trigger_json)
print("OBJECT ID GENERATED",cursor.inserted_id)
if(trigger_json["type"]=="dag"):
targets = trigger_json["dags"]
elif(trigger_json["type"]=="function"):
targets = trigger_json["functions"]
data = {"message":"success","trigger_name":trigger_json["trigger_name"],"trigger":trigger_json["trigger"],"trigger_type":trigger_json["type"],"trigger_target":targets}
return json.dumps(data)
except Exception as e:
print("Error--->",e)
data = {"message":"fail","reason":e}
return json.dumps(data)
@app.route('/register/function/<function_name>',methods=['POST'])
def register_function(function_name):
list_of_file_keys = []
document = {}
function_dir = '/home/faasapp/Desktop/anubhav/function_modules' # Library of functions
new_dir = function_name
destination = os.path.join(function_dir, new_dir)
# Create the directory
os.makedirs(destination, exist_ok=True)
files = request.files
for filekey in files:
if filekey!='description':
list_of_file_keys.append(filekey)
for key in list_of_file_keys:
file = request.files[key]
filename = file.filename
# Save, copy, remove
file.save(file.filename)
shutil.copy(filename, destination)
os.remove(filename)
image_build_script = 'buildAndPush.sh'
shutil.copy(image_build_script, destination)
# Prepare data
document["function_name"] = function_name
document["image_build_script"] = 'buildAndPush.sh'
document["python_script"] = (request.files[list_of_file_keys[0]]).filename
document["dockerfile"] = (request.files[list_of_file_keys[1]]).filename
document["requirements.txt"] =(request.files[list_of_file_keys[2]]).filename
docker_image_name = "10.129.28.219:5000/"+function_name+"-image"
api_name = "/"+function_name+"-api"
path_name = "/"+function_name+"-path"
password = '1234'
# build docker image
cmd = ["sudo", "-S", "/home/faasapp/Desktop/anubhav/controlplane/build_image.sh",destination,docker_image_name]
# open subprocess with Popen
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
# pass password to standard input
process.stdin.write(password + "\n")
process.stdin.flush()
# wait for process to complete and get output
output, errors = process.communicate()
print("OUTPUT---------",output)
print("ERRORS---------",errors)
# if(errors):
# print("There is error building docker file")
# data = {"message":"fail","reason":"docker build failed"}
# return json.dumps(data)
# else:
# create action, register action with api, populate its mapping
subprocess.call(['./create_action.sh',destination,docker_image_name,function_name])
subprocess.call(['./register.sh',api_name,path_name,function_name])
subprocess.call(['bash', './actions.sh'])
myclient = pymongo.MongoClient("mongodb://127.0.0.1/27017")
mydb = myclient["function_store"]
mycol = mydb["functions"]
try:
cursor = mycol.insert_one(document)
print("OBJECT ID GENERATED",cursor.inserted_id)
data = {"message":"success"}
return json.dumps(data)
except Exception as e:
print("Error--->",e)
data = {"message":"fail","reason":e}
return json.dumps(data)
# data = {"message":"success"}
# return json.dumps(data)
@app.route('/register/dag/',methods=['POST'])
def register_dag():
dag_json = request.json
myclient = pymongo.MongoClient("mongodb://127.0.0.1/27017")
mydb = myclient["dag_store"]
mycol = mydb["dags"]
try:
cursor = mycol.insert_one(dag_json)
print("OBJECT ID GENERATED",cursor.inserted_id)
data = {"message":"success"}
return json.dumps(data)
except Exception as e:
print("Error--->",e)
data = {"message":"fail","reason":e}
return json.dumps(data)
@app.route('/view/dag/<dag_name>',methods=['GET'])
def view_dag(dag_name):
dag_info_map = {}
myclient = pymongo.MongoClient("mongodb://127.0.0.1/27017")
mydb = myclient["dag_store"]
mycol = mydb["dags"]
document = mycol.find({"name":dag_name})
data = list(document)
dag_info_list = []
for items in data:
dag_info_list = items["dag"]
dag_info_map["DAG_Name--->>"] = items["name"]
dag_info_map["Number_of_nodes-->"] = len(dag_info_list)
dag_info_map["Starting_Node-->"] = dag_info_list[0]["node_id"]
for dag_items in dag_info_list:
node_info_map = {}
if(len(dag_items["properties"]["outputs_from"])==0):
node_info_map["get_outputs_from-->"] = "Starting action->No outputs consumed"
else:
node_info_map["get_outputs_from-->"] = dag_items["properties"]["outputs_from"]
node_info_map["primitive_type"] = dag_items["properties"]["primitive"]
if(dag_items["properties"]["primitive"]=="condition"):
node_info_map["next_node_id_if_condition_true"] = dag_items["properties"]["branch_1"]
node_info_map["next_node_id_if_condition_false"] = dag_items["properties"]["branch_2"]
else:
if(dag_items["properties"]["next"]!=""):
node_info_map["next_node_id-->"] = dag_items["properties"]["next"]
else:
node_info_map["next_node_id-->"] = "Ending node_id of a path"
dag_info_map[dag_items["node_id"]] = node_info_map
response = {"dag_data":dag_info_map}
formatted_json = json.dumps(response, indent=20)
return formatted_json
@app.route('/view/dags',methods=['GET'])
def view_dags():
myclient = pymongo.MongoClient("mongodb://127.0.0.1/27017")
mydb = myclient["dag_store"]
mycol = mydb["dags"]
document = mycol.find()
data = list(document)
# Serialize the data to JSON
json_data = json.dumps(data, default=str)
json_string ='{"dag":'+str(json_data)+'}'
data = json.loads(json_string)
# Format the JSON string with indentation
formatted_json = json.dumps(data, indent=4)
return formatted_json
@app.route('/view/triggers',methods=['GET'])
def view_triggers():
myclient = pymongo.MongoClient("mongodb://127.0.0.1/27017")
mydb = myclient["trigger_store"]
mycol = mydb["triggers"]
document = mycol.find()
data = list(document)
# Serialize the data to JSON
json_data = json.dumps(data, default=str)
json_string ='{"trigger":'+str(json_data)+'}'
data = json.loads(json_string)
# Format the JSON string with indentation
formatted_json = json.dumps(data, indent=4)
return formatted_json
@app.route('/view/trigger/<trigger_name>',methods=['GET'])
def view_trigger(trigger_name):
print(request.url)
myclient = pymongo.MongoClient("mongodb://127.0.0.1/27017")
mydb = myclient["trigger_store"]
mycol = mydb["triggers"]
query = {"trigger_name":trigger_name}
projection = {"_id": 0,"trigger_name":1,"type":1,"trigger":1,"dags":1,"functions":1}
document = mycol.find(query,projection)
data = list(document)
# print(data)
json_data = json.dumps(data, default=str)
json_string ='{"trigger":'+str(json_data)+'}'
data = json.loads(json_string)
formatted_json = json.dumps(data, indent=4)
return formatted_json
# EXAMPLE URL: http://10.129.28.219:5001/view/activation/8d7df93e8f2940b8bdf93e8f2910b80f
@app.route('/view/activation/<activation_id>', methods=['GET', 'POST'])
def list_activations(activation_id):
# activation_id = '74a7b6c707d14973a7b6c707d1a97392'
cmd = ['wsk', '-i', 'activation', 'get', activation_id]
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
json_res = result.stdout.decode().split('\n')[1:] # Ignore first line of output
res = json.loads('\n'.join(json_res))
d={}
d["action_name"] = res["name"]
d["duration"] = res["duration"]
d["status"] = res["response"]["status"]
d["result"] = res["response"]["result"]
return({"action_name":res["name"],
"duration": res["duration"],
"status": res["response"]["status"],
"result":res["response"]["result"]
})
# EXAMPLE URL: http://10.129.28.219:5001/view/76cc8a53-0a63-47bb-a5b5-9e6744f67c61
@app.route('/view/<dag_id>',methods=['GET'])
def view_dag_metadata(dag_id):
myclient = pymongo.MongoClient("mongodb://127.0.0.1/27017")
mydb = myclient["dag_store"]
mycol = mydb["dag_metadata"]
query = {"dag_id":dag_id}
projection = {"_id": 0,"dag_id":1,"dag_name":1,"function_activation_ids":1}
document = mycol.find(query, projection)
data = list(document)
response = {"dag_metadata":data}
return json.dumps(response)
# EXAMPLE URL: http://10.129.28.219:5001/run/action/odd-even-action
# http://10.129.28.219:5001/run/action/decode-function
@app.route('/run/action/<action_name>/', methods=['POST'])
def execute_action(action_name):
script_file = './actions.sh'
subprocess.call(['bash', script_file])
preprocess("action_url.txt")
url = action_url_mappings[action_name]
# json_data = json.loads(request.json)
reply = requests.post(url = url,json = request.json,verify=False)
return reply.json()
# EXAMPLE URL: http://10.129.28.219:5001/run/dag/odd-even-test/{"number":16}
@app.route('/run/dag/<dag_name>/', methods=['GET', 'POST'])
def execute_dag(dag_name):
print("------------------------------------DAG START-----------------------------------------------")
unique_id = uuid.uuid4()
print("DAG UNIQUE ID----------",unique_id)
dag_metadata={}
dag_metadata["dag_id"] = str(unique_id)
dag_metadata["dag_name"] = dag_name
# list_of_func_ids = []
######### Updates the list of action->url mapping ###################
script_file = './actions.sh'
subprocess.call(['bash', script_file])
#####################################################################
preprocess("action_url.txt")
### Create in-memory redis storage ###
redis_instace = create_redis_instance()
#######################################
action_properties_mapping = {} #Stores the action name and its corresponding properties
dag_res = json.loads(json.dumps(get_dag_json(dag_name)))
dag_data = dag_res[0]["dag"]
for dag_item in dag_data:
action_properties_mapping[dag_item["node_id"]] = dag_item["properties"]
flag = 0
for dag_item in dag_data:
if(flag==0): # To indicate the first action in the DAG
queue.append(dag_item["node_id"])
action_properties_mapping[dag_item["node_id"]]["arguments"] = request.json
while(len(queue)!=0):
flag=flag+1
action = queue.pop(0)
print("ACTION DEQUEUED FROM QUEUE : --->",action)
##########################################################
# HANDLE THE ACTION #
##########################################################
if isinstance(action, str):
# if(isinstance(action_properties_mapping[action]['arguments'],list)):
# pass
json_data = action_properties_mapping[action]["arguments"]
url = action_url_mappings[action]
reply = requests.post(url = url,json=json_data,verify=False)
list_of_func_ids.append(reply.json()["activation_id"])
# print("Line 292------------",reply.json()["activation_id"])
redis_instace.set(action+"-output",pickle.dumps(reply.json()))
action_type = action_properties_mapping[action]["primitive"]
if(action_type=="condition"):
branching_action = action_properties_mapping[action]["branch_1"]
alternate_action = action_properties_mapping[action]["branch_2"]
result=reply.json()["result"]
condition_op = action_properties_mapping[action]["condition"]["operator"]
if(condition_op=="equals"):
if(isinstance(action_properties_mapping[action]["condition"]["target"], str)):
target = action_properties_mapping[action]["condition"]["target"]
else:
target=int(action_properties_mapping[action]["condition"]["target"])
if(result==target):
output_list = [] # List to store the output of actions whose outputs are required by downstream operations
queue.append(branching_action)
action_names = action_properties_mapping[branching_action]["outputs_from"] # Get the list of actions whose output will be used
if(len(action_names)==1): # if only output of one action is required
key = action_names[0]+"-output"
output = pickle.loads(redis_instace.get(key))
action_properties_mapping[branching_action]["arguments"] = output
else:
for item in action_names:
key = item+"-output"
output = pickle.loads(redis_instace.get(key))
output_list.append(output)
action_properties_mapping[branching_action]["arguments"] = output_list
else:
output_list = [] # List to store the output of actions whose outputs are required by downstream operations
queue.append(alternate_action)
action_names = action_properties_mapping[alternate_action]["outputs_from"] # Get the list of actions whose output will be used
if(len(action_names)==1): # if only output of one action is required
key = action_names[0]+"-output"
output = pickle.loads(redis_instace.get(key))
action_properties_mapping[alternate_action]["arguments"] = output
else:
for item in action_names:
key = item+"-output"
output = pickle.loads(redis_instace.get(key))
output_list.append(output)
action_properties_mapping[alternate_action]["arguments"] = output_list
if(condition_op=="greater_than"):
pass
if(condition_op=="greater_than_equals"):
pass
if(condition_op=="less_than"):
pass
if(condition_op=="less_than_equals"):
pass
elif(action_type=="serial"):
next_action = action_properties_mapping[action]["next"]
if(next_action!=""):
output_list = [] # List to store the output of actions whose outputs are required by downstream operations
queue.append(next_action)
action_names = action_properties_mapping[next_action]["outputs_from"] # Get the list of actions whose output will be used
if(len(action_names)==1): # if only output of one action is required
key = action_names[0]+"-output"
output = pickle.loads(redis_instace.get(key))
action_properties_mapping[next_action]["arguments"] = output
else:
for item in action_names:
key = item+"-output"
output = pickle.loads(redis_instace.get(key))
output_list.append(output)
action_properties_mapping[next_action]["arguments"] = output_list
elif(action_type=="parallel"):
parallel_action_list = action_properties_mapping[action]["next"]
queue.append(parallel_action_list)
else:
reply = handle_parallel(queue,redis_instace,action_properties_mapping,action)
dag_metadata["function_activation_ids"] = list_of_func_ids
# print("DAG SPEC AFTER WORKFLOW EXECUTION--------\n")
# print(action_properties_mapping)
# print('\n')
submit_dag_metadata(dag_metadata)
# print("DAG ID---->FUNC IDS",dag_metadata)
print('\n')
# print('INTERMEDIATE OUTPUTS FROM ALL ACTIONS-----\n')
# get_redis_contents(redis_instace)
# print('\n')
redis_instace.flushdb()
print("Cleaned up in-memory intermediate outputs successfully\n")
if(isinstance(reply,list)):
return({"dag_id": dag_metadata["dag_id"],
"result": reply
})
else:
return({
"dag_id": dag_metadata["dag_id"],
"result": reply.json()
})
# return({
# "result": "success"
# })
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5001)

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

@ -1,51 +0,0 @@
import os
import boto3
from botocore.exceptions import ClientError
aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
aws_region = os.getenv('AWS_REGION')
print(aws_access_key_id,aws_secret_access_key)
# s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=aws_region)
# upload_file_path = "dag_register.py"
# bucket_name = 'dagit-store'
# key_name = upload_file_path
# folder_path = 'images'
# folder_name = "images"
# try:
# s3.upload_file(upload_file_path,bucket_name,key_name)
# s3.put_object_acl(Bucket=bucket_name, Key=key_name, ACL='public-read')
# object_url = "https://dagit-store.s3.ap-south-1.amazonaws.com/"+key_name
# print("Uploaded....\n")
# print(object_url)
# except ClientError as e:
# print(e)
# loop through files in folder
# for subdir, dirs, files in os.walk(folder_path):
# for file in files:
# # get full path of file
# file_path = os.path.join(subdir, file)
# # get S3 object key
# object_key = os.path.relpath(file_path, folder_path)
# # upload file to S3
# # s3.Object(bucket_name, object_key).upload_file(file_path)
# # s3.upload_file(file_path,bucket_name,object_key)
# s3.upload_file(file_path, bucket_name, f'{folder_name}/{file_path.split("/")[-1]}')
# s3.put_object_acl(Bucket=bucket_name, Key=f'{folder_name}/{file_path.split("/")[-1]}', ACL='public-read')
# print("Uploaded....\n")
# try:
# response = s3.generate_presigned_url('get_object',
# Params={'Bucket': bucket_name,
# 'Key': key_name},
# ExpiresIn=3600)
# print(response)
# except ClientError as e:
# print(e)

@ -19,12 +19,16 @@ def main():
blurred_result = []
try:
decode_activation_id = params["activation_id"]
# face_detect_activation_id = params["activation_id"]
parts = params["parts"]
faces = params["faces"]
for i in range(0,parts):
if os.path.exists(images_dir+'/blurred_image_'+str(i)+'.jpg'):
os.remove(images_dir+'/blurred_image_'+str(i)+'.jpg')
for i in range(0,parts):
decode_output = "decode-output-image"+decode_activation_id+"-"+str(i)
# face_detect_output = "face-detected-image"+face_detect_activation_id+"-"+str(i)
# load_image = pickle.loads(r.get(face_detect_output))
load_image = pickle.loads(r.get(decode_output))
image_name = 'Image'+str(i)+'.jpg'
with open(image_name, 'wb') as f:
@ -82,7 +86,7 @@ def main():
}))
return({"output_image_url_links":url_list,
return({"blurred_image_url_links":url_list,
"activation_id": str(activation_id),
"parts": parts

@ -1,3 +1,5 @@
sudo ./buildAndPush.sh 10.129.28.219:5000/image-blur-image
wsk -i action create image-blur --docker 10.129.28.219:5000/image-blur-image --web=true --timeout=300000
wsk -i action update image-blur --docker 10.129.28.219:5000/image-blur-image blur.py --web=true --timeout=300000
./register.sh /image-blur-api /image-blur-path image-blur --response-type=json

@ -1,3 +1,3 @@
sudo ./buildAndPush.sh 10.129.28.219:5000/image-denoise-image
./register.sh /image-denoise-api /image-denoise-path image-denoise --response-type=j
./register.sh /image-denoise-api /image-denoise-path image-denoise --response-type=json
wsk -i action create image-denoise --docker 10.129.28.219:5000/image-denoise-image --web=true --timeout=300000

@ -1,3 +1,5 @@
sudo ./buildAndPush.sh 10.129.28.219:5000/image-resize-image
wsk -i action create image-resize --docker 10.129.28.219:5000/image-resize-image --web=true --timeout=300000
./register.sh /image-resize-api /image-resize-path image-blur --response-type=json
./register.sh /image-resize-api /image-resize-path image-resize --response-type=json
wsk -i action update image-resize --docker 10.129.28.219:5000/image-resize-image resize.py --web=true --timeout=300000

@ -0,0 +1,111 @@
# # Dockerfile for Python whisk docker action
# FROM openwhisk/dockerskeleton
# ENV FLASK_PROXY_PORT 8080
# ## Install our action's Python dependencies
# ADD requirements.txt /action/requirements.txt
# RUN apk --update add python py-pip openssl ca-certificates py-openssl wget git
# RUN apk --update add --virtual build-dependencies libffi-dev openssl-dev python-dev py-pip build-base && apk add jpeg-dev zlib-dev libjpeg && pip install --upgrade pip
# RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
# ENV PATH="/root/.cargo/bin:${PATH}"
# # RUN pip install tokenizers
# # RUN apk --update add world build-essential
# RUN cd /action; pip install -r requirements.txt
# # RUN pip install torch===1.4.0 torchvision===0.5.0 -f https://download.pytorch.org/whl/torch_stable.html
# # RUN pip install torch===1.4.0 -f https://download.pytorch.org/whl/torch_stable.html
# ENV USE_OPENMP=0
# ENV OMP_NUM_THREADS=1
# RUN apk --update add gcompat libc6-compat musl musl-dev
# RUN apk add --no-cache libgcc ncurses-libs libstdc++
# # ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/python3.6/site-packages/torch/lib/:/usr/local/lib/python3.6/site-packages/torch/lib//
# RUN pip install torch==1.10.2+cpu -f https://download.pytorch.org/whl/torch_stable.html
# # ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/ld-linux-x86-64.so.2:/usr/local/lib/python3.6/site-packages/torch/lib/
# # ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libgomp.so.1://usr/local/lib/python3.6/site-packages/torch/lib//libgomp-a34b3233.so.1
# # ENV LD_LIBRARY_PATH=/usr/local/lib/python3.6/site-packages/torch/lib/:/usr/local/lib/python3.6/site-packages/torch/lib//libgomp-a34b3233.so.1
# # ENV LD_LIBRARY_PATH=/usr/local/lib/python3.6/site-packages/torch/lib:$LD_LIBRARY_PATH
# # RUN pip install torch===2.0.0 torchvision===0.15.1
# # Ensure source assets are not drawn from the cache after this date
# ENV REFRESHED_AT 2016-09-05T13:59:39Z
# # Add all source assets
# ADD . /action
# # Rename our executable Python action
# ADD img_text.py /action/exec
# # Leave CMD as is for Openwhisk
# CMD ["/bin/bash", "-c", "cd actionProxy && python3 -u actionproxy.py"]
FROM ibmfunctions/action-python-v3.9
#download pip packages
RUN pip install --upgrade pip
RUN pip install transformers requests redis pilgram Pillow==6.2.2 zlib-state torch
ADD img_text.py /action/exec
# RUN python3 -c "from huggingface_hub import snapshot_download,hf_hub_download;REPO_ID = 'Salesforce/blip-image-captioning-base';snapshot_download(repo_id='Salesforce/blip-image-captioning-base',local_dir='/action/models/transformers')"
ADD models /action/models
CMD ["/bin/bash", "-c", "cd actionProxy && python3 -u actionproxy.py"]
#download pre-trained model from hugging face to docker image
# RUN pip install sentence_transformers
# RUN python3 -c "from transformers import BlipProcessor, BlipForConditionalGeneration;model = BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-base');model.save('models')"
# COPY models .
# ADD img_text.py .
# CMD ["python3","img_text.py"]
# RUN ls /action/1/bin/models/transformers
# CMD ['echo','/action/1/bin/models/transformers']
# CMD ['cat','/action/1/bin/models/transformers/exec']
# CMD ['ls','/action/1/bin/models/transformers']
# FROM python:3.9-slim-buster
# RUN apt-get update
# #download pip packages
# RUN pip install --upgrade pip
# RUN pip install transformers
# RUN pip install requests
# RUN pip install redis
# RUN pip install pilgram
# # RUN apt-get install jpeg-dev zlib-dev
# RUN pip install Pillow
# RUN pip install zlib-state
# RUN pip install torch
# RUN python3 -c "from huggingface_hub import snapshot_download,hf_hub_download;REPO_ID = 'Salesforce/blip-image-captioning-base';snapshot_download(repo_id='Salesforce/blip-image-captioning-base',local_dir='/models/transformers')"
# RUN ls /
# FROM openwhisk/python3action
# RUN apk update
# # RUN apk add build-dependencies libffi-dev openssl-dev python-dev py-pip build-base
# RUN pip3 install --upgrade pip
# RUN pip3 install transformers redis requests boto3 Pillow torch pilgram
# RUN python3 -c "from huggingface_hub import snapshot_download,hf_hub_download;REPO_ID = 'Salesforce/blip-image-captioning-base';snapshot_download(repo_id='Salesforce/blip-image-captioning-base',local_dir='/models/transformers')"
# RUN ls /

@ -0,0 +1,9 @@
sudo ./buildAndPush.sh 10.129.28.219:5000/img-text
wsk -i action create img-to-text --docker 10.129.28.219:5000/img-text img_text.py --web=true --timeout=300000
./register.sh /image-text-api /image-text-path img-to-text --response-type=json
wsk -i action update img-to-text --docker 10.129.28.219:5000/img-text img_text.py --timeout 300000

@ -0,0 +1,91 @@
import requests
import threading
import os
import json
import sys
import requests
import torch
from PIL import Image
from transformers import BlipProcessor, BlipForConditionalGeneration
thread_list = []
results = []
def process(image_url):
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
# img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
raw_image = Image.open(requests.get(image_url, stream=True).raw).convert('RGB')
inputs = processor(raw_image, return_tensors="pt")
out = model.generate(**inputs)
result = processor.decode(out[0], skip_special_tokens=True)
results.append(result)
def main(params):
activation_id = os.environ.get('__OW_ACTIVATION_ID')
# file = open('action/1/bin/exec__.py', 'r')
# content = file.read()
# print(content)
print("Current directory----",os.getcwd())
print("root----",os.listdir('/'))
print("inside action----",os.listdir('/action/models/transformers'))
print("current directory contents------",os.listdir(os.getcwd()))
# print("/action/model contents------",os.listdir('../models/transformers'))
# print("action/1/bin------",os.listdir('/action/1/bin'))
# params = json.loads(sys.argv[1])
img_urls = params["image_url_links"]
modelpath = '/action/models/transformers'
# print("Line 32------",os.listdir(model_path))
# processor = BlipProcessor.from_pretrained(modelpath)
# model = BlipForConditionalGeneration.from_pretrained(modelpath)
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
inputs = processor(raw_image, return_tensors="pt")
out = model.generate(**inputs)
result = processor.decode(out[0], skip_special_tokens=True)
print(res)
# # processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
# # model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
# img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
# raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
# # raw_image = Image.open('puppy.jpg').convert('RGB')
# inputs = processor(raw_image, return_tensors="pt")
# out = model.generate(**inputs)
# res = processor.decode(out[0], skip_special_tokens=True)
# print(res)
# # result = []
# for image_url in img_urls:
# thread_list.append(threading.Thread(target=process, args=[image_url]))
# for thread in thread_list:
# thread.start()
# for thread in thread_list:
# thread.join()
# text = process(image_url)
# result.append(text)
#https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg
print(json.dumps({"activation_id": str(activation_id),
"image_url_links": img_urls,
"result":res
}))
return({"activation_id": str(activation_id),
"image_url_links": img_urls,
"result":res
})
if __name__ == "__main__":
# print("Line 52.....",sys.argv)
main(params)

@ -0,0 +1,34 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text

@ -0,0 +1,152 @@
---
pipeline_tag: image-to-text
tags:
- image-captioning
languages:
- en
license: bsd-3-clause
---
# BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation
Model card for image captioning pretrained on COCO dataset - base architecture (with ViT base backbone).
| ![BLIP.gif](https://s3.amazonaws.com/moonup/production/uploads/1670928184033-62441d1d9fdefb55a0b7d12c.gif) |
|:--:|
| <b> Pull figure from BLIP official repo | Image source: https://github.com/salesforce/BLIP </b>|
## TL;DR
Authors from the [paper](https://arxiv.org/abs/2201.12086) write in the abstract:
*Vision-Language Pre-training (VLP) has advanced the performance for many vision-language tasks. However, most existing pre-trained models only excel in either understanding-based tasks or generation-based tasks. Furthermore, performance improvement has been largely achieved by scaling up the dataset with noisy image-text pairs collected from the web, which is a suboptimal source of supervision. In this paper, we propose BLIP, a new VLP framework which transfers flexibly to both vision-language understanding and generation tasks. BLIP effectively utilizes the noisy web data by bootstrapping the captions, where a captioner generates synthetic captions and a filter removes the noisy ones. We achieve state-of-the-art results on a wide range of vision-language tasks, such as image-text retrieval (+2.7% in average recall@1), image captioning (+2.8% in CIDEr), and VQA (+1.6% in VQA score). BLIP also demonstrates strong generalization ability when directly transferred to videolanguage tasks in a zero-shot manner. Code, models, and datasets are released.*
## Usage
You can use this model for conditional and un-conditional image captioning
### Using the Pytorch model
#### Running the model on CPU
<details>
<summary> Click to expand </summary>
```python
import requests
from PIL import Image
from transformers import BlipProcessor, BlipForConditionalGeneration
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
# conditional image captioning
text = "a photography of"
inputs = processor(raw_image, text, return_tensors="pt")
out = model.generate(**inputs)
print(processor.decode(out[0], skip_special_tokens=True))
# >>> a photography of a woman and her dog
# unconditional image captioning
inputs = processor(raw_image, return_tensors="pt")
out = model.generate(**inputs)
print(processor.decode(out[0], skip_special_tokens=True))
>>> a woman sitting on the beach with her dog
```
</details>
#### Running the model on GPU
##### In full precision
<details>
<summary> Click to expand </summary>
```python
import requests
from PIL import Image
from transformers import BlipProcessor, BlipForConditionalGeneration
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to("cuda")
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
# conditional image captioning
text = "a photography of"
inputs = processor(raw_image, text, return_tensors="pt").to("cuda")
out = model.generate(**inputs)
print(processor.decode(out[0], skip_special_tokens=True))
# >>> a photography of a woman and her dog
# unconditional image captioning
inputs = processor(raw_image, return_tensors="pt").to("cuda")
out = model.generate(**inputs)
print(processor.decode(out[0], skip_special_tokens=True))
>>> a woman sitting on the beach with her dog
```
</details>
##### In half precision (`float16`)
<details>
<summary> Click to expand </summary>
```python
import torch
import requests
from PIL import Image
from transformers import BlipProcessor, BlipForConditionalGeneration
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float16).to("cuda")
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
# conditional image captioning
text = "a photography of"
inputs = processor(raw_image, text, return_tensors="pt").to("cuda", torch.float16)
out = model.generate(**inputs)
print(processor.decode(out[0], skip_special_tokens=True))
# >>> a photography of a woman and her dog
# unconditional image captioning
inputs = processor(raw_image, return_tensors="pt").to("cuda", torch.float16)
out = model.generate(**inputs)
print(processor.decode(out[0], skip_special_tokens=True))
>>> a woman sitting on the beach with her dog
```
</details>
## BibTex and citation info
```
@misc{https://doi.org/10.48550/arxiv.2201.12086,
doi = {10.48550/ARXIV.2201.12086},
url = {https://arxiv.org/abs/2201.12086},
author = {Li, Junnan and Li, Dongxu and Xiong, Caiming and Hoi, Steven},
keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation},
publisher = {arXiv},
year = {2022},
copyright = {Creative Commons Attribution 4.0 International}
}
```

@ -0,0 +1,169 @@
{
"_commit_hash": null,
"architectures": [
"BlipForConditionalGeneration"
],
"image_text_hidden_size": 256,
"initializer_factor": 1.0,
"logit_scale_init_value": 2.6592,
"model_type": "blip",
"projection_dim": 512,
"text_config": {
"_name_or_path": "",
"add_cross_attention": false,
"architectures": null,
"attention_probs_dropout_prob": 0.0,
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": 30522,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"diversity_penalty": 0.0,
"do_sample": false,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": 2,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"hidden_size": 768,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"initializer_factor": 1.0,
"initializer_range": 0.02,
"intermediate_size": 3072,
"is_decoder": true,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"layer_norm_eps": 1e-12,
"length_penalty": 1.0,
"max_length": 20,
"max_position_embeddings": 512,
"min_length": 0,
"model_type": "blip_text_model",
"no_repeat_ngram_size": 0,
"num_attention_heads": 12,
"num_beam_groups": 1,
"num_beams": 1,
"num_hidden_layers": 12,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": 0,
"prefix": null,
"problem_type": null,
"projection_dim": 768,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"sep_token_id": 102,
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": null,
"torchscript": false,
"transformers_version": "4.26.0.dev0",
"typical_p": 1.0,
"use_bfloat16": false,
"use_cache": true,
"vocab_size": 30524
},
"torch_dtype": "float32",
"transformers_version": null,
"vision_config": {
"_name_or_path": "",
"add_cross_attention": false,
"architectures": null,
"attention_dropout": 0.0,
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": null,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"diversity_penalty": 0.0,
"do_sample": false,
"dropout": 0.0,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": null,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"hidden_act": "gelu",
"hidden_size": 768,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"image_size": 384,
"initializer_factor": 1.0,
"initializer_range": 0.02,
"intermediate_size": 3072,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"layer_norm_eps": 1e-05,
"length_penalty": 1.0,
"max_length": 20,
"min_length": 0,
"model_type": "blip_vision_model",
"no_repeat_ngram_size": 0,
"num_attention_heads": 12,
"num_beam_groups": 1,
"num_beams": 1,
"num_channels": 3,
"num_hidden_layers": 12,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": null,
"patch_size": 16,
"prefix": null,
"problem_type": null,
"projection_dim": 512,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"sep_token_id": null,
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": null,
"torchscript": false,
"transformers_version": "4.26.0.dev0",
"typical_p": 1.0,
"use_bfloat16": false
}
}

@ -0,0 +1,17 @@
{
"do_normalize": true,
"do_resize": true,
"image_mean": [
0.48145466,
0.4578275,
0.40821073
],
"image_processor_type": "BlipImageProcessor",
"image_std": [
0.26862954,
0.26130258,
0.27577711
],
"processor_class": "BlipProcessor",
"size": 384
}

@ -0,0 +1 @@
../../../../../../.cache/huggingface/hub/models--Salesforce--blip-image-captioning-base/blobs/d6638651a5526cc2ede56f2b5104d6851b0755816d220e5e046870430180c767

@ -0,0 +1,7 @@
{
"cls_token": "[CLS]",
"mask_token": "[MASK]",
"pad_token": "[PAD]",
"sep_token": "[SEP]",
"unk_token": "[UNK]"
}

@ -0,0 +1 @@
../../../../../../.cache/huggingface/hub/models--Salesforce--blip-image-captioning-base/blobs/d0aaa4c0e003f599d8baa53a9dee85af14eef20554cf2f8113a2673e25a59f8c

@ -0,0 +1,17 @@
{
"cls_token": "[CLS]",
"do_basic_tokenize": true,
"do_lower_case": true,
"mask_token": "[MASK]",
"model_max_length": 512,
"name_or_path": "bert-base-uncased",
"never_split": null,
"pad_token": "[PAD]",
"processor_class": "BlipProcessor",
"sep_token": "[SEP]",
"special_tokens_map_file": null,
"strip_accents": null,
"tokenize_chinese_chars": true,
"tokenizer_class": "BertTokenizer",
"unk_token": "[UNK]"
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,7 @@
{
"image_url_links": [
"https://dagit-store.s3.ap-south-1.amazonaws.com/image_text_classification/flight.jpg",
"https://dagit-store.s3.ap-south-1.amazonaws.com/image_text_classification/puppy.jpg",
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
]
}

@ -0,0 +1,7 @@
requests
redis
torch
pilgram
Pillow==6.2.2
zlib-state
transformers

@ -0,0 +1,25 @@
# Dockerfile for Python whisk docker action
FROM openwhisk/dockerskeleton
ENV FLASK_PROXY_PORT 8080
## Install our action's Python dependencies
ADD requirements.txt /action/requirements.txt
ENV AWS_ACCESS_KEY_ID="AKIAYFB773UVZSOAVZN4"
ENV AWS_SECRET_ACCESS_KEY="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"
ENV AWS_REGION="ap-south-1"
RUN apk --update add python py-pip openssl ca-certificates py-openssl wget
RUN apk --update add --virtual build-dependencies libffi-dev openssl-dev python-dev py-pip build-base \
&& apk add jpeg-dev zlib-dev libjpeg \
&& pip install --upgrade pip
RUN cd /action; pip install --no-cache-dir -r requirements.txt
RUN pip install opencv-python
# Ensure source assets are not drawn from the cache after this date
ENV REFRESHED_AT 2016-09-05T13:59:39Z
# Add all source assets
ADD . /action
# Rename our executable Python action
ADD rotate.py /action/exec
# Leave CMD as is for Openwhisk
CMD ["/bin/bash", "-c", "cd actionProxy && python3 -u actionproxy.py"]

@ -0,0 +1,3 @@
sudo ./buildAndPush.sh 10.129.28.219:5000/image-rotate-image
wsk -i action create image-rotate --docker 10.129.28.219:5000/image-rotate-image --web=true --timeout=300000
./register.sh /image-rotate-api /image-rotate-path image-rotate --response-type=json

@ -0,0 +1,24 @@
#!/bin/bash
#
# This script will build the docker image and push it to dockerhub.
#
# Usage: buildAndPush.sh imageName
#
# Dockerhub image names look like "username/appname" and must be all lower case.
# For example, "janesmith/calculator"
IMAGE_NAME=$1
echo "Using $IMAGE_NAME as the image name"
# Make the docker image
docker build -t $IMAGE_NAME .
if [ $? -ne 0 ]; then
echo "Docker build failed"
exit
fi
docker push $IMAGE_NAME
if [ $? -ne 0 ]; then
echo "Docker push failed"
exit
fi

@ -0,0 +1,5 @@
requests
boto3
redis
opencv-python

@ -0,0 +1,97 @@
#!/usr/bin/env python3
import requests
import os
import boto3
import redis
import pickle
import json
import cv2
import sys
# Rotate an image by 90 degree clockwise
def main():
images_dir = "rotated-images"
is_images_dir = os.path.isdir(images_dir)
if(is_images_dir == False):
os.mkdir(images_dir)
r = redis.Redis(host="10.129.28.219", port=6379, db=2)
activation_id = os.environ.get('__OW_ACTIVATION_ID')
params = json.loads(sys.argv[1])
rotated_result=[]
try:
decode_activation_id = params["activation_id"]
parts = params["parts"]
for i in range(0,parts):
if os.path.exists(images_dir+'/rotated_image_'+str(i)+'.jpg'):
os.remove(images_dir+'/rotated_image_'+str(i)+'.jpg')
for i in range(0,parts):
decode_output = "decode-output-image"+decode_activation_id+"-"+str(i)
load_image = pickle.loads(r.get(decode_output))
image_name = 'Image'+str(i)+'.jpg'
with open(image_name, 'wb') as f:
f.write(load_image)
img = cv2.imread(image_name)
# Rotate the image by 90 degrees
rotated = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
output_image = images_dir+'/rotated_image_'+str(i)+'.jpg'
cv2.imwrite(output_image, rotated)
rotated_result.append('rotated_image_'+str(i)+'.jpg')
except Exception as e: #If not running as a part of DAG workflow and implemented as a single standalone function
image_url_list = params["image_url_links"]
parts = len(image_url_list)
for i in range(0,parts):
if os.path.exists(images_dir+'/rotated_image_'+str(i)+'.jpg'):
os.remove(images_dir+'/rotated_image_'+str(i)+'.jpg')
for i in range(0,parts):
response = requests.get(image_url_list[i])
image_name = 'Image'+str(i)+'.jpg'
with open(image_name, "wb") as f:
f.write(response.content)
img = cv2.imread(image_name)
# Rotate the image by 90 degrees
rotated = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
output_image = images_dir+'/rotated_image_'+str(i)+'.jpg'
cv2.imwrite(output_image, rotated)
rotated_result.append('rotated_image_'+str(i)+'.jpg')
aws_access_key_id = os.getenv('AWS_ACCESS_KEY_ID')
aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
aws_region = os.getenv('AWS_REGION')
s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=aws_region)
bucket_name = 'dagit-store'
folder_path = images_dir
folder_name = images_dir
for subdir, dirs, files in os.walk(folder_path):
for file in files:
file_path = os.path.join(subdir, file)
s3.upload_file(file_path, bucket_name, f'{folder_name}/{file_path.split("/")[-1]}')
s3.put_object_acl(Bucket=bucket_name, Key=f'{folder_name}/{file_path.split("/")[-1]}', ACL='public-read')
url_list=[]
for image in rotated_result:
url = "https://dagit-store.s3.ap-south-1.amazonaws.com/"+images_dir+"/"+image
url_list.append(url)
print(json.dumps({"rotated_image_url_links":url_list,
"activation_id": str(activation_id),
"parts": parts
}))
return({"rotated_image_url_links":url_list,
"activation_id": str(activation_id),
"parts": parts
})
if __name__ == "__main__":
main()

@ -13,12 +13,8 @@ RUN apk --update add --virtual build-dependencies libffi-dev openssl-dev python-
&& apk add jpeg-dev zlib-dev libjpeg \
&& pip install --upgrade pip
RUN apk add ffmpeg
RUN pip install opencv-python
RUN cd /action; pip install -r requirements.txt
# Ensure source assets are not drawn from the cache

@ -1,3 +1,7 @@
sudo ./buildAndPush.sh 10.129.28.219:5000/encode-image-1
wsk -i action create encode-action --docker 10.129.28.219:5000/encode-image-1
wsk -i action update encode-action --docker 10.129.28.219:5000/encode-image-1 encode.py --timeout 400000
sudo ./buildAndPush.sh 10.129.28.219:5000/image-processing
wsk -i action create encode-function --docker 10.129.28.219:5000/image-processing encode.py --web=true --timeout=420000 -m 4096
wsk -i action update encode-function --docker 10.129.28.219:5000/image-processing encode.py --web=true --timeout=420000 -m 4096
./register.sh /encode-function /encode encode-function

@ -1,75 +1,62 @@
#!/usr/bin/env python3
import ffmpeg
import cv2
import time
from io import BytesIO
import os
import sys
import redis
import pickle
import json
from PIL import Image
import pysftp
import logging
import boto3
import requests
logging.basicConfig(level=logging.INFO)
def main():
print("Inside encode\n")
import time as time1
start = time1.time()
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
try:
sftp = pysftp.Connection(
host="10.129.28.219",
username="faasapp",
password="1234",
cnopts=cnopts
)
logging.info("connection established successfully")
except:
logging.info('failed to establish connection to targeted server')
filtered_dir = "filtered-images"
is_images_dir = os.path.isdir(filtered_dir)
if(is_images_dir == False):
os.mkdir(filtered_dir)
remote_path = "/home/faasapp/Desktop/anubhav/sprocket-filter/"+filtered_dir
remote_upload_path = "/home/faasapp/Desktop/anubhav/sprocket-encode/"+filtered_dir
try:
sftp.chdir(remote_path) # Test if remote_path exists
except IOError:
sftp.mkdir(remote_path) # Create remote_path
sftp.chdir(remote_path)
try:
sftp.chdir(remote_upload_path) # Test if remote_path exists
except IOError:
sftp.mkdir(remote_upload_path) # Create remote_path
sftp.chdir(remote_upload_path)
current_path = os.getcwd()
sftp.get_d(remote_path,preserve_mtime=True,localdir=filtered_dir)
sftp.put_d(current_path+"/"+filtered_dir,preserve_mtime=True,remotepath=remote_upload_path)
# print("Current Path",current_path)
path = current_path+"/"+filtered_dir+"/"
output_path="output.avi"
# filtered_dir = "filtered-images"
# is_images_dir = os.path.isdir(filtered_dir)
# if(is_images_dir == False):
# os.mkdir(filtered_dir)
# output_path="output.avi"
r = redis.Redis(host="10.129.28.219", port=6379, db=2)
activation_id = os.environ.get('__OW_ACTIVATION_ID')
params = json.loads(sys.argv[1])
images = []
input_images = os.listdir(path)
for i in input_images:
i=path+i
images.append(i)
try:
bilateral_activation_id = params["activation_id"]
parts = params["parts"]
# for i in range(0,parts):
# if os.path.exists(images_dir+'/resized_image_'+str(i)+'.jpg'):
# os.remove(images_dir+'/resized_image_'+str(i)+'.jpg')
for i in range(0,parts):
bilateral_output = "bilateral-output-image"+bilateral_activation_id+"-"+str(i)
load_image = pickle.loads(r.get(bilateral_output))
image_name = 'Image'+str(i)+'.jpg'
with open(image_name, 'wb') as f:
f.write(load_image)
images.append(image_name)
# img = cv2.imread(image_name)
# resized_result.append('resized_image_'+str(i)+'.jpg')
except Exception as e:
image_url_list = params["image_url_links"]
parts = len(image_url_list)
for i in range(0,parts):
response = requests.get(image_url_list[i])
image_name = 'Image'+str(i)+'.jpg'
with open(image_name, "wb") as f:
f.write(response.content)
images.append(image_name)
# input_images = os.listdir(path)
# for i in input_images:
# i=path+i
# images.append(i)
images.sort()
@ -89,36 +76,24 @@ def main():
print('frame',i+1,'of',len(images))
video.release()
output_video_size = os.stat(output_path).st_size
upload_path = "/home/faasapp/Desktop/anubhav/sprocket-decode/output.avi"
current_files = os.listdir('.')
sftp.put(output_path,preserve_mtime=True,remotepath=upload_path)
# r = redis.Redis(host="10.129.28.219", port=6379, db=2)
activation_id = os.environ.get('__OW_ACTIVATION_ID')
params = json.loads(sys.argv[1])
decode_execution_time = params["exec_time_decode"]
#print(decode_execution_time)
filter_execution_time = params["exec_time_filter"]
# print(filter_execution_time)
parts = params["parts"]
aws_access_key_id = os.getenv('AWS_ACCESS_KEY_ID')
aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
aws_region = os.getenv('AWS_REGION')
end = time1.time()
exec_time = end-start
total_time = decode_execution_time + filter_execution_time + exec_time
print(json.dumps({ "encode_output": output_path,
"number_of_images_processed": parts,
"activation_id": str(activation_id),
"exec_time_filter": filter_execution_time,
"exec_time_decode": decode_execution_time,
"exec_time_encode": exec_time,
"workflow_execution_time": total_time,
"output_video_size_in_bytes": output_video_size
#"params":params
bucket_name = 'dagit-store'
s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=aws_region)
s3.upload_file('output.avi', bucket_name, 'output.avi')
s3.put_object_acl(Bucket=bucket_name, Key='output.avi', ACL='public-read')
url = "https://dagit-store.s3.ap-south-1.amazonaws.com/output.avi"
print(json.dumps({"encode_output": url,
"activation_id": activation_id,
"number_of_images_processed": parts,
}))

@ -1,11 +1,3 @@
opencv-python
requests
redis
ffmpeg-python
zlib-state
pilgram
Pillow==6.2.2
paramiko==2.11.0
pycparser==2.21
PyNaCl==1.5.0
pysftp==0.2.9
boto3

@ -7,8 +7,8 @@ ENV FLASK_PROXY_PORT 8080
ADD requirements.txt /action/requirements.txt
RUN apk --update add python py-pip openssl ca-certificates py-openssl wget
RUN apk --update add python py-pip openssl ca-certificates py-openssl wget
RUN apk --update add --virtual build-dependencies libffi-dev openssl-dev python-dev py-pip build-base \
&& apk add jpeg-dev zlib-dev libjpeg \
&& pip install --upgrade pip
@ -23,6 +23,9 @@ ENV REFRESHED_AT 2016-09-05T13:59:39Z
ADD . /action
# Rename our executable Python action
ADD test.py /action/exec
ENV AWS_ACCESS_KEY_ID="AKIAYFB773UVZSOAVZN4"
ENV AWS_SECRET_ACCESS_KEY="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"
ENV AWS_REGION="ap-south-1"
# Leave CMD as is for Openwhisk
CMD ["/bin/bash", "-c", "cd actionProxy && python3 -u actionproxy.py"]

@ -0,0 +1,11 @@
sudo ./buildAndPush.sh 10.129.28.219:5000/test-s3
wsk -i action create test-s3-trigger --docker 10.129.28.219:5000/test-s3 --web=true --timeout=300000
wsk -i action update test-s3-trigger --docker 10.129.28.219:5000/test-s3 test.py --timeout 300000
./register.sh /image-rotate-api /image-rotate-path image-rotate --response-type=json
wsk -i trigger create myTrigger
wsk -i rule create myRule1 s3-trigger test-s3-trigger --param bucket dagit-store --param suffix .txt
wsk -i trigger create s3-trigger1 --feed /whisk.system/s3-trigger-feed/changes --param bucket dagit-store --param suffix .txt

@ -1,4 +1,2 @@
paramiko==2.11.0
pycparser==2.21
PyNaCl==1.5.0
pysftp==0.2.9
requests
boto3

@ -3,21 +3,36 @@
import os
import json
import sys
import boto3
def main():
activation_id = os.environ.get('__OW_ACTIVATION_ID')
params = json.loads(sys.argv[1])
number=params["number"]
res = number + 2
# provider_ns = os.environ.get('PROVIDER_NS')
# aws_access_key_id = os.getenv('AWS_ACCESS_KEY_ID')
# aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
# aws_region = os.getenv('AWS_REGION')
# s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=aws_region)
# print("Args----------",args)
# # Retrieve information about the uploaded file from the trigger event
# bucket_name = args['Records'][0]['s3']['bucket']['name']
# object_key = args['Records'][0]['s3']['object']['key']
# # Process the uploaded file (replace with your own code)
# response = s3.get_object(Bucket=bucket_name, Key=object_key)
# file_contents = response['Body'].read()
# print('File contents:', file_contents)
print(json.dumps({ "activation_id": str(activation_id),
"number": number,
"result": res,
"provider_ns": "test",
"message":"Hello yayy"
}))
return({"activation_id": str(activation_id),
"number": number,
"result": res,
"provider_ns": "test",
"message":"Hello yayy"
})

@ -397,8 +397,4 @@ op_2 = params["__ow_body"][1]["key_action_2"]
Use these op_1 and op_2 to process
<<<<<<< HEAD
##############################################
=======
##############################################
>>>>>>> 544c0a4dc690739a0fe08a2b7a830d804bb9f647

Loading…
Cancel
Save