parent
0b3d51c30b
commit
21e5405260
@ -0,0 +1,27 @@
|
||||
# Dockerfile for Python whisk docker action
|
||||
FROM openwhisk/dockerskeleton
|
||||
|
||||
ENV FLASK_PROXY_PORT 8080
|
||||
|
||||
# Install our action's Python dependencies
|
||||
ADD requirements.txt /action/requirements.txt
|
||||
ENV AWS_ACCESS_KEY_ID="AKIAYFB773UVZSOAVZN4"
|
||||
ENV AWS_SECRET_ACCESS_KEY="OZPLMjN/2ao6OlSd5PpIkT5d7cWD9WAP/DXSZbEs"
|
||||
ENV AWS_REGION="ap-south-1"
|
||||
RUN apk --update add python py-pip openssl ca-certificates py-openssl wget
|
||||
RUN apk add ffmpeg
|
||||
RUN apk --update add --virtual build-dependencies libffi-dev openssl-dev python-dev py-pip build-base \
|
||||
&& apk add jpeg-dev zlib-dev libjpeg \
|
||||
&& pip install --upgrade pip
|
||||
RUN cd /action; pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Ensure source assets are not drawn from the cache
|
||||
# after this date
|
||||
ENV REFRESHED_AT 2016-09-05T13:59:39Z
|
||||
# Add all source assets
|
||||
ADD . /action
|
||||
# Rename our executable Python action
|
||||
ADD decode.py /action/exec
|
||||
|
||||
# Leave CMD as is for Openwhisk
|
||||
CMD ["/bin/bash", "-c", "cd actionProxy && python3 -u actionproxy.py"]
|
@ -0,0 +1,2 @@
|
||||
sudo ./buildAndPush.sh 10.129.28.219:5000/decode-function-image
|
||||
wsk -i action create decode --docker 10.129.28.219:5000/decode-function-image --web=true --timeout=300000
|
@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script will build the docker image and push it to dockerhub.
|
||||
#
|
||||
# Usage: buildAndPush.sh imageName
|
||||
#
|
||||
# Dockerhub image names look like "username/appname" and must be all lower case.
|
||||
# For example, "janesmith/calculator"
|
||||
|
||||
IMAGE_NAME=$1
|
||||
echo "Using $IMAGE_NAME as the image name"
|
||||
|
||||
# Make the docker image
|
||||
docker build -t $IMAGE_NAME .
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker build failed"
|
||||
exit
|
||||
fi
|
||||
docker push $IMAGE_NAME
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker push failed"
|
||||
exit
|
||||
fi
|
||||
|
@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import time
|
||||
import redis
|
||||
import pickle
|
||||
import subprocess
|
||||
import logging
|
||||
import json
|
||||
import sys
|
||||
import ffmpeg
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
|
||||
from urllib.request import urlopen,urlretrieve
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
|
||||
def main():
|
||||
images_dir = "decoded-images"
|
||||
r = redis.Redis(host="10.129.28.219", port=6379, db=2)
|
||||
r.flushdb() #flush previous content if any
|
||||
activation_id = os.environ.get('__OW_ACTIVATION_ID')
|
||||
|
||||
#dwn_link = 'http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ElephantsDream.mp4'
|
||||
params = json.loads(sys.argv[1])
|
||||
dwn_link = params["filename"]
|
||||
# Set how many spots you want to extract a video from.
|
||||
parts = params["parts"]
|
||||
file_name = 'decode_video.mp4'
|
||||
urlretrieve(dwn_link, file_name)
|
||||
is_images_dir = os.path.isdir(images_dir)
|
||||
if(is_images_dir == False):
|
||||
os.mkdir(images_dir)
|
||||
probe = ffmpeg.probe(file_name)
|
||||
video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
|
||||
time = float(probe['streams'][0]['duration']) // 2
|
||||
width = int(video_stream['width'])
|
||||
|
||||
|
||||
|
||||
intervals = time // parts
|
||||
intervals = int(intervals)
|
||||
interval_list = [(i * intervals, (i + 1) * intervals) for i in range(parts)]
|
||||
|
||||
result = []
|
||||
|
||||
for i in range(0,parts):
|
||||
if os.path.exists(images_dir+'/Image' + str(i) + '.jpg'):
|
||||
os.remove(images_dir+'/Image' + str(i) + '.jpg')
|
||||
|
||||
i = 0
|
||||
for item in interval_list:
|
||||
out = (
|
||||
ffmpeg
|
||||
.input(file_name, ss=item[1])
|
||||
.filter('scale', width, -1)
|
||||
.output(images_dir+'/Image' + str(i) + '.jpg', vframes=1)
|
||||
.run(capture_stdout=False)
|
||||
|
||||
)
|
||||
|
||||
img = open(images_dir+'/Image' + str(i) + '.jpg',"rb").read()
|
||||
pickled_object = pickle.dumps(img)
|
||||
decode_output = "decode-output-image"+activation_id+"-"+str(i)
|
||||
r.set(decode_output,pickled_object)
|
||||
result.append('Image'+str(i)+'.jpg')
|
||||
i += 1
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
aws_access_key_id = os.getenv('AWS_ACCESS_KEY_ID')
|
||||
aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
|
||||
aws_region = os.getenv('AWS_REGION')
|
||||
|
||||
|
||||
s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=aws_region)
|
||||
|
||||
|
||||
bucket_name = 'dagit-store'
|
||||
folder_path = images_dir
|
||||
folder_name = images_dir
|
||||
for subdir, dirs, files in os.walk(folder_path):
|
||||
for file in files:
|
||||
file_path = os.path.join(subdir, file)
|
||||
s3.upload_file(file_path, bucket_name, f'{folder_name}/{file_path.split("/")[-1]}')
|
||||
s3.put_object_acl(Bucket=bucket_name, Key=f'{folder_name}/{file_path.split("/")[-1]}', ACL='public-read')
|
||||
url_list=[]
|
||||
for image in result:
|
||||
url = "https://dagit-store.s3.ap-south-1.amazonaws.com/"+images_dir+"/"+image
|
||||
url_list.append(url)
|
||||
|
||||
try:
|
||||
image_height = int(params["height"])
|
||||
image_width = int(params["width"])
|
||||
|
||||
print(json.dumps({"image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts,
|
||||
"height": image_height,
|
||||
"width": image_width,
|
||||
"file_link":dwn_link
|
||||
}))
|
||||
|
||||
return({"image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts,
|
||||
"height": image_height,
|
||||
"width": image_width,
|
||||
"file_link":dwn_link
|
||||
})
|
||||
except Exception as e:
|
||||
print(json.dumps({"image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts,
|
||||
"file_link":dwn_link
|
||||
}))
|
||||
|
||||
return({"image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts,
|
||||
"file_link":dwn_link
|
||||
})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,4 @@
|
||||
{
|
||||
"filename": "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ElephantsDream.mp4",
|
||||
"parts": 10
|
||||
}
|
@ -0,0 +1,3 @@
|
||||
boto3
|
||||
redis
|
||||
ffmpeg-python
|
@ -0,0 +1,25 @@
|
||||
# Dockerfile for Python whisk docker action
|
||||
FROM openwhisk/dockerskeleton
|
||||
|
||||
ENV FLASK_PROXY_PORT 8080
|
||||
|
||||
## Install our action's Python dependencies
|
||||
ADD requirements.txt /action/requirements.txt
|
||||
ENV AWS_ACCESS_KEY_ID="AKIAYFB773UVZSOAVZN4"
|
||||
ENV AWS_SECRET_ACCESS_KEY="OZPLMjN/2ao6OlSd5PpIkT5d7cWD9WAP/DXSZbEs"
|
||||
ENV AWS_REGION="ap-south-1"
|
||||
RUN apk --update add python py-pip openssl ca-certificates py-openssl wget
|
||||
RUN apk --update add --virtual build-dependencies libffi-dev openssl-dev python-dev py-pip build-base \
|
||||
&& apk add jpeg-dev zlib-dev libjpeg \
|
||||
&& pip install --upgrade pip
|
||||
RUN cd /action; pip install --no-cache-dir -r requirements.txt
|
||||
RUN pip install opencv-python
|
||||
# Ensure source assets are not drawn from the cacheafter this date
|
||||
ENV REFRESHED_AT 2016-09-05T13:59:39Z
|
||||
# Add all source assets
|
||||
ADD . /action
|
||||
# Rename our executable Python action
|
||||
ADD blur.py /action/exec
|
||||
|
||||
# Leave CMD as is for Openwhisk
|
||||
CMD ["/bin/bash", "-c", "cd actionProxy && python3 -u actionproxy.py"]
|
@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env python3
|
||||
import requests
|
||||
import os
|
||||
import boto3
|
||||
import redis
|
||||
import pickle
|
||||
import json
|
||||
import cv2
|
||||
import sys
|
||||
|
||||
def main():
|
||||
images_dir = "blurred-images"
|
||||
is_images_dir = os.path.isdir(images_dir)
|
||||
if(is_images_dir == False):
|
||||
os.mkdir(images_dir)
|
||||
r = redis.Redis(host="10.129.28.219", port=6379, db=2)
|
||||
activation_id = os.environ.get('__OW_ACTIVATION_ID')
|
||||
params = json.loads(sys.argv[1])
|
||||
blurred_result = []
|
||||
try:
|
||||
decode_activation_id = params["activation_id"]
|
||||
parts = params["parts"]
|
||||
for i in range(0,parts):
|
||||
if os.path.exists(images_dir+'/blurred_image_'+str(i)+'.jpg'):
|
||||
os.remove(images_dir+'/blurred_image_'+str(i)+'.jpg')
|
||||
for i in range(0,parts):
|
||||
decode_output = "decode-output-image"+decode_activation_id+"-"+str(i)
|
||||
load_image = pickle.loads(r.get(decode_output))
|
||||
image_name = 'Image'+str(i)+'.jpg'
|
||||
with open(image_name, 'wb') as f:
|
||||
f.write(load_image)
|
||||
|
||||
img = cv2.imread(image_name)
|
||||
blurred_image = cv2.GaussianBlur(img, (15, 15), 0)
|
||||
output_image = images_dir+'/blurred_image_'+str(i)+'.jpg'
|
||||
cv2.imwrite(output_image, blurred_image)
|
||||
blurred_result.append('blurred_image_'+str(i)+'.jpg')
|
||||
except Exception as e: #If not running as a part of DAG workflow and implemented as a single standalone function
|
||||
image_url_list = params["image_url_links"]
|
||||
parts = len(image_url_list)
|
||||
for i in range(0,parts):
|
||||
if os.path.exists(images_dir+'/blurred_image_'+str(i)+'.jpg'):
|
||||
os.remove(images_dir+'/blurred_image_'+str(i)+'.jpg')
|
||||
for i in range(0,parts):
|
||||
response = requests.get(image_url_list[i])
|
||||
image_name = 'Image'+str(i)+'.jpg'
|
||||
with open(image_name, "wb") as f:
|
||||
f.write(response.content)
|
||||
img = cv2.imread(image_name)
|
||||
blurred_image = cv2.GaussianBlur(img, (15, 15), 0)
|
||||
output_image = images_dir+'/blurred_image_'+str(i)+'.jpg'
|
||||
cv2.imwrite(output_image, blurred_image)
|
||||
blurred_result.append('blurred_image_'+str(i)+'.jpg')
|
||||
|
||||
|
||||
|
||||
|
||||
aws_access_key_id = os.getenv('AWS_ACCESS_KEY_ID')
|
||||
aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
|
||||
aws_region = os.getenv('AWS_REGION')
|
||||
|
||||
|
||||
s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=aws_region)
|
||||
|
||||
|
||||
bucket_name = 'dagit-store'
|
||||
folder_path = images_dir
|
||||
folder_name = images_dir
|
||||
for subdir, dirs, files in os.walk(folder_path):
|
||||
for file in files:
|
||||
file_path = os.path.join(subdir, file)
|
||||
s3.upload_file(file_path, bucket_name, f'{folder_name}/{file_path.split("/")[-1]}')
|
||||
s3.put_object_acl(Bucket=bucket_name, Key=f'{folder_name}/{file_path.split("/")[-1]}', ACL='public-read')
|
||||
url_list=[]
|
||||
for image in blurred_result:
|
||||
url = "https://dagit-store.s3.ap-south-1.amazonaws.com/"+images_dir+"/"+image
|
||||
url_list.append(url)
|
||||
|
||||
print(json.dumps({"blurred_image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts
|
||||
|
||||
}))
|
||||
|
||||
return({"output_image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts
|
||||
|
||||
})
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,3 @@
|
||||
sudo ./buildAndPush.sh 10.129.28.219:5000/image-blur-image
|
||||
wsk -i action create image-blur --docker 10.129.28.219:5000/image-blur-image --web=true --timeout=300000
|
||||
./register.sh /image-blur-api /image-blur-path image-blur --response-type=json
|
@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script will build the docker image and push it to dockerhub.
|
||||
#
|
||||
# Usage: buildAndPush.sh imageName
|
||||
#
|
||||
# Dockerhub image names look like "username/appname" and must be all lower case.
|
||||
# For example, "janesmith/calculator"
|
||||
|
||||
IMAGE_NAME=$1
|
||||
echo "Using $IMAGE_NAME as the image name"
|
||||
|
||||
# Make the docker image
|
||||
docker build -t $IMAGE_NAME .
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker build failed"
|
||||
exit
|
||||
fi
|
||||
docker push $IMAGE_NAME
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker push failed"
|
||||
exit
|
||||
fi
|
||||
|
@ -0,0 +1,4 @@
|
||||
requests
|
||||
boto3
|
||||
redis
|
||||
opencv-python
|
@ -0,0 +1,25 @@
|
||||
# Dockerfile for Python whisk docker action
|
||||
FROM openwhisk/dockerskeleton
|
||||
|
||||
ENV FLASK_PROXY_PORT 8080
|
||||
|
||||
## Install our action's Python dependencies
|
||||
ADD requirements.txt /action/requirements.txt
|
||||
ENV AWS_ACCESS_KEY_ID="AKIAYFB773UVZSOAVZN4"
|
||||
ENV AWS_SECRET_ACCESS_KEY="OZPLMjN/2ao6OlSd5PpIkT5d7cWD9WAP/DXSZbEs"
|
||||
ENV AWS_REGION="ap-south-1"
|
||||
RUN apk --update add python py-pip openssl ca-certificates py-openssl wget
|
||||
RUN apk --update add --virtual build-dependencies libffi-dev openssl-dev python-dev py-pip build-base \
|
||||
&& apk add jpeg-dev zlib-dev libjpeg \
|
||||
&& pip install --upgrade pip
|
||||
RUN cd /action; pip install --no-cache-dir -r requirements.txt
|
||||
RUN pip install opencv-python
|
||||
# Ensure source assets are not drawn from the cache after this date
|
||||
ENV REFRESHED_AT 2016-09-05T13:59:39Z
|
||||
# Add all source assets
|
||||
ADD . /action
|
||||
# Rename our executable Python action
|
||||
ADD denoise.py /action/exec
|
||||
|
||||
# Leave CMD as is for Openwhisk
|
||||
CMD ["/bin/bash", "-c", "cd actionProxy && python3 -u actionproxy.py"]
|
@ -0,0 +1,3 @@
|
||||
sudo ./buildAndPush.sh 10.129.28.219:5000/image-denoise-image
|
||||
./register.sh /image-denoise-api /image-denoise-path image-denoise --response-type=j
|
||||
wsk -i action create image-denoise --docker 10.129.28.219:5000/image-denoise-image --web=true --timeout=300000
|
@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script will build the docker image and push it to dockerhub.
|
||||
#
|
||||
# Usage: buildAndPush.sh imageName
|
||||
#
|
||||
# Dockerhub image names look like "username/appname" and must be all lower case.
|
||||
# For example, "janesmith/calculator"
|
||||
|
||||
IMAGE_NAME=$1
|
||||
echo "Using $IMAGE_NAME as the image name"
|
||||
|
||||
# Make the docker image
|
||||
docker build -t $IMAGE_NAME .
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker build failed"
|
||||
exit
|
||||
fi
|
||||
docker push $IMAGE_NAME
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker push failed"
|
||||
exit
|
||||
fi
|
||||
|
@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env python3
|
||||
import requests
|
||||
import os
|
||||
import boto3
|
||||
import redis
|
||||
import pickle
|
||||
import json
|
||||
import cv2
|
||||
import sys
|
||||
|
||||
def main():
|
||||
images_dir = "denoised-images"
|
||||
is_images_dir = os.path.isdir(images_dir)
|
||||
if(is_images_dir == False):
|
||||
os.mkdir(images_dir)
|
||||
r = redis.Redis(host="10.129.28.219", port=6379, db=2)
|
||||
activation_id = os.environ.get('__OW_ACTIVATION_ID')
|
||||
params = json.loads(sys.argv[1])
|
||||
denoised_result = []
|
||||
try:
|
||||
decode_activation_id = params["activation_id"]
|
||||
parts = params["parts"]
|
||||
for i in range(0,parts):
|
||||
if os.path.exists(images_dir+'/denoised_image_'+str(i)+'.jpg'):
|
||||
os.remove(images_dir+'/denoised_image_'+str(i)+'.jpg')
|
||||
for i in range(0,parts):
|
||||
decode_output = "decode-output-image"+decode_activation_id+"-"+str(i)
|
||||
load_image = pickle.loads(r.get(decode_output))
|
||||
image_name = 'Image'+str(i)+'.jpg'
|
||||
with open(image_name, 'wb') as f:
|
||||
f.write(load_image)
|
||||
|
||||
img = cv2.imread(image_name)
|
||||
denoised_image = cv2.bilateralFilter(img, 20, 100, 100)
|
||||
output_image = images_dir+'/denoised_image_'+str(i)+'.jpg'
|
||||
cv2.imwrite(output_image, denoised_image)
|
||||
denoised_result.append('denoised_image_'+str(i)+'.jpg')
|
||||
except Exception as e: #If not running as a part of DAG workflow and implemented as a single standalone function
|
||||
image_url_list = params["image_url_links"]
|
||||
parts = len(image_url_list)
|
||||
for i in range(0,parts):
|
||||
if os.path.exists(images_dir+'/denoised_image_'+str(i)+'.jpg'):
|
||||
os.remove(images_dir+'/denoised_image_'+str(i)+'.jpg')
|
||||
for i in range(0,parts):
|
||||
response = requests.get(image_url_list[i])
|
||||
image_name = 'Image'+str(i)+'.jpg'
|
||||
with open(image_name, "wb") as f:
|
||||
f.write(response.content)
|
||||
img = cv2.imread(image_name)
|
||||
denoised_image = cv2.bilateralFilter(img, 20, 100, 100)
|
||||
output_image = images_dir+'/denoised_image_'+str(i)+'.jpg'
|
||||
cv2.imwrite(output_image, denoised_image)
|
||||
denoised_result.append('denoised_image_'+str(i)+'.jpg')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
aws_access_key_id = os.getenv('AWS_ACCESS_KEY_ID')
|
||||
aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
|
||||
aws_region = os.getenv('AWS_REGION')
|
||||
|
||||
|
||||
s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=aws_region)
|
||||
|
||||
|
||||
bucket_name = 'dagit-store'
|
||||
folder_path = images_dir
|
||||
folder_name = images_dir
|
||||
for subdir, dirs, files in os.walk(folder_path):
|
||||
for file in files:
|
||||
file_path = os.path.join(subdir, file)
|
||||
s3.upload_file(file_path, bucket_name, f'{folder_name}/{file_path.split("/")[-1]}')
|
||||
s3.put_object_acl(Bucket=bucket_name, Key=f'{folder_name}/{file_path.split("/")[-1]}', ACL='public-read')
|
||||
url_list=[]
|
||||
for image in denoised_result:
|
||||
url = "https://dagit-store.s3.ap-south-1.amazonaws.com/"+images_dir+"/"+image
|
||||
url_list.append(url)
|
||||
|
||||
print(json.dumps({"denoised_image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts
|
||||
|
||||
}))
|
||||
|
||||
return({"denoised_image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts
|
||||
|
||||
})
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,5 @@
|
||||
requests
|
||||
boto3
|
||||
redis
|
||||
opencv-python
|
||||
|
@ -0,0 +1,25 @@
|
||||
# Dockerfile for Python whisk docker action
|
||||
FROM openwhisk/dockerskeleton
|
||||
|
||||
ENV FLASK_PROXY_PORT 8080
|
||||
|
||||
## Install our action's Python dependencies
|
||||
ADD requirements.txt /action/requirements.txt
|
||||
ENV AWS_ACCESS_KEY_ID="AKIAYFB773UVZSOAVZN4"
|
||||
ENV AWS_SECRET_ACCESS_KEY="OZPLMjN/2ao6OlSd5PpIkT5d7cWD9WAP/DXSZbEs"
|
||||
ENV AWS_REGION="ap-south-1"
|
||||
RUN apk --update add python py-pip openssl ca-certificates py-openssl wget
|
||||
RUN apk --update add --virtual build-dependencies libffi-dev openssl-dev python-dev py-pip build-base \
|
||||
&& apk add jpeg-dev zlib-dev libjpeg \
|
||||
&& pip install --upgrade pip
|
||||
RUN cd /action; pip install --no-cache-dir -r requirements.txt
|
||||
RUN pip install opencv-python
|
||||
# Ensure source assets are not drawn from the cache after this date
|
||||
ENV REFRESHED_AT 2016-09-05T13:59:39Z
|
||||
# Add all source assets
|
||||
ADD . /action
|
||||
# Rename our executable Python action
|
||||
ADD resize.py /action/exec
|
||||
|
||||
# Leave CMD as is for Openwhisk
|
||||
CMD ["/bin/bash", "-c", "cd actionProxy && python3 -u actionproxy.py"]
|
@ -0,0 +1,3 @@
|
||||
sudo ./buildAndPush.sh 10.129.28.219:5000/image-resize-image
|
||||
wsk -i action create image-resize --docker 10.129.28.219:5000/image-resize-image --web=true --timeout=300000
|
||||
./register.sh /image-resize-api /image-resize-path image-blur --response-type=json
|
@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script will build the docker image and push it to dockerhub.
|
||||
#
|
||||
# Usage: buildAndPush.sh imageName
|
||||
#
|
||||
# Dockerhub image names look like "username/appname" and must be all lower case.
|
||||
# For example, "janesmith/calculator"
|
||||
|
||||
IMAGE_NAME=$1
|
||||
echo "Using $IMAGE_NAME as the image name"
|
||||
|
||||
# Make the docker image
|
||||
docker build -t $IMAGE_NAME .
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker build failed"
|
||||
exit
|
||||
fi
|
||||
docker push $IMAGE_NAME
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker push failed"
|
||||
exit
|
||||
fi
|
||||
|
@ -0,0 +1,5 @@
|
||||
requests
|
||||
boto3
|
||||
redis
|
||||
opencv-python
|
||||
|
@ -0,0 +1,98 @@
|
||||
#!/usr/bin/env python3
|
||||
import requests
|
||||
import os
|
||||
import boto3
|
||||
import redis
|
||||
import pickle
|
||||
import json
|
||||
import cv2
|
||||
import sys
|
||||
|
||||
def main():
|
||||
images_dir = "resized-images"
|
||||
is_images_dir = os.path.isdir(images_dir)
|
||||
if(is_images_dir == False):
|
||||
os.mkdir(images_dir)
|
||||
r = redis.Redis(host="10.129.28.219", port=6379, db=2)
|
||||
activation_id = os.environ.get('__OW_ACTIVATION_ID')
|
||||
params = json.loads(sys.argv[1])
|
||||
resized_result = []
|
||||
image_resize_height = params["height"]
|
||||
image_resize_width = params["width"]
|
||||
try:
|
||||
decode_activation_id = params["activation_id"]
|
||||
parts = params["parts"]
|
||||
for i in range(0,parts):
|
||||
if os.path.exists(images_dir+'/resized_image_'+str(i)+'.jpg'):
|
||||
os.remove(images_dir+'/resized_image_'+str(i)+'.jpg')
|
||||
for i in range(0,parts):
|
||||
decode_output = "decode-output-image"+decode_activation_id+"-"+str(i)
|
||||
load_image = pickle.loads(r.get(decode_output))
|
||||
image_name = 'Image'+str(i)+'.jpg'
|
||||
with open(image_name, 'wb') as f:
|
||||
f.write(load_image)
|
||||
|
||||
img = cv2.imread(image_name)
|
||||
resized_image = cv2.resize(img, (image_resize_height,image_resize_width))
|
||||
output_image = images_dir+'/resized_image_'+str(i)+'.jpg'
|
||||
# Save the output image
|
||||
cv2.imwrite(output_image, resized_image)
|
||||
resized_result.append('resized_image_'+str(i)+'.jpg')
|
||||
except Exception as e: #If not running as a part of DAG workflow and implemented as a single standalone function
|
||||
image_url_list = params["image_url_links"]
|
||||
parts = len(image_url_list)
|
||||
for i in range(0,parts):
|
||||
if os.path.exists(images_dir+'/resized_image_'+str(i)+'.jpg'):
|
||||
os.remove(images_dir+'/resized_image_'+str(i)+'.jpg')
|
||||
for i in range(0,parts):
|
||||
response = requests.get(image_url_list[i])
|
||||
image_name = 'Image'+str(i)+'.jpg'
|
||||
with open(image_name, "wb") as f:
|
||||
f.write(response.content)
|
||||
img = cv2.imread(image_name)
|
||||
resized_image = cv2.resize(img, (image_resize_height,image_resize_width))
|
||||
output_image = images_dir+'/resized_image_'+str(i)+'.jpg'
|
||||
cv2.imwrite(output_image, resized_image)
|
||||
resized_result.append('resized_image_'+str(i)+'.jpg')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
aws_access_key_id = os.getenv('AWS_ACCESS_KEY_ID')
|
||||
aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
|
||||
aws_region = os.getenv('AWS_REGION')
|
||||
|
||||
|
||||
s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=aws_region)
|
||||
|
||||
|
||||
bucket_name = 'dagit-store'
|
||||
folder_path = images_dir
|
||||
folder_name = images_dir
|
||||
for subdir, dirs, files in os.walk(folder_path):
|
||||
for file in files:
|
||||
file_path = os.path.join(subdir, file)
|
||||
s3.upload_file(file_path, bucket_name, f'{folder_name}/{file_path.split("/")[-1]}')
|
||||
s3.put_object_acl(Bucket=bucket_name, Key=f'{folder_name}/{file_path.split("/")[-1]}', ACL='public-read')
|
||||
url_list=[]
|
||||
for image in resized_result:
|
||||
url = "https://dagit-store.s3.ap-south-1.amazonaws.com/"+images_dir+"/"+image
|
||||
url_list.append(url)
|
||||
|
||||
print(json.dumps({"resized_image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts
|
||||
|
||||
}))
|
||||
|
||||
return({"resized_image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts
|
||||
|
||||
})
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,25 @@
|
||||
# Dockerfile for Python whisk docker action
|
||||
FROM openwhisk/dockerskeleton
|
||||
|
||||
ENV FLASK_PROXY_PORT 8080
|
||||
|
||||
## Install our action's Python dependencies
|
||||
ADD requirements.txt /action/requirements.txt
|
||||
ENV AWS_ACCESS_KEY_ID="AKIAYFB773UVZSOAVZN4"
|
||||
ENV AWS_SECRET_ACCESS_KEY="OZPLMjN/2ao6OlSd5PpIkT5d7cWD9WAP/DXSZbEs"
|
||||
ENV AWS_REGION="ap-south-1"
|
||||
RUN apk --update add python py-pip openssl ca-certificates py-openssl wget
|
||||
RUN apk --update add --virtual build-dependencies libffi-dev openssl-dev python-dev py-pip build-base \
|
||||
&& apk add jpeg-dev zlib-dev libjpeg \
|
||||
&& pip install --upgrade pip
|
||||
RUN cd /action; pip install --no-cache-dir -r requirements.txt
|
||||
RUN pip install opencv-python
|
||||
# Ensure source assets are not drawn from the cache after this date
|
||||
ENV REFRESHED_AT 2016-09-05T13:59:39Z
|
||||
# Add all source assets
|
||||
ADD . /action
|
||||
# Rename our executable Python action
|
||||
ADD rotate.py /action/exec
|
||||
|
||||
# Leave CMD as is for Openwhisk
|
||||
CMD ["/bin/bash", "-c", "cd actionProxy && python3 -u actionproxy.py"]
|
@ -0,0 +1,3 @@
|
||||
sudo ./buildAndPush.sh 10.129.28.219:5000/image-rotate-image
|
||||
wsk -i action create image-rotate --docker 10.129.28.219:5000/image-rotate-image --web=true --timeout=300000
|
||||
./register.sh /image-rotate-api /image-rotate-path image-rotate --response-type=json
|
@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script will build the docker image and push it to dockerhub.
|
||||
#
|
||||
# Usage: buildAndPush.sh imageName
|
||||
#
|
||||
# Dockerhub image names look like "username/appname" and must be all lower case.
|
||||
# For example, "janesmith/calculator"
|
||||
|
||||
IMAGE_NAME=$1
|
||||
echo "Using $IMAGE_NAME as the image name"
|
||||
|
||||
# Make the docker image
|
||||
docker build -t $IMAGE_NAME .
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker build failed"
|
||||
exit
|
||||
fi
|
||||
docker push $IMAGE_NAME
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker push failed"
|
||||
exit
|
||||
fi
|
||||
|
@ -0,0 +1,5 @@
|
||||
requests
|
||||
boto3
|
||||
redis
|
||||
opencv-python
|
||||
|
@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env python3
|
||||
import requests
|
||||
import os
|
||||
import boto3
|
||||
import redis
|
||||
import pickle
|
||||
import json
|
||||
import cv2
|
||||
import sys
|
||||
|
||||
# Rotate an image by 90 degree clockwise
|
||||
def main():
|
||||
images_dir = "rotated-images"
|
||||
is_images_dir = os.path.isdir(images_dir)
|
||||
if(is_images_dir == False):
|
||||
os.mkdir(images_dir)
|
||||
r = redis.Redis(host="10.129.28.219", port=6379, db=2)
|
||||
activation_id = os.environ.get('__OW_ACTIVATION_ID')
|
||||
params = json.loads(sys.argv[1])
|
||||
rotated_result=[]
|
||||
try:
|
||||
decode_activation_id = params["activation_id"]
|
||||
parts = params["parts"]
|
||||
for i in range(0,parts):
|
||||
if os.path.exists(images_dir+'/rotated_image_'+str(i)+'.jpg'):
|
||||
os.remove(images_dir+'/rotated_image_'+str(i)+'.jpg')
|
||||
for i in range(0,parts):
|
||||
decode_output = "decode-output-image"+decode_activation_id+"-"+str(i)
|
||||
load_image = pickle.loads(r.get(decode_output))
|
||||
image_name = 'Image'+str(i)+'.jpg'
|
||||
with open(image_name, 'wb') as f:
|
||||
f.write(load_image)
|
||||
|
||||
img = cv2.imread(image_name)
|
||||
# Rotate the image by 90 degrees
|
||||
rotated = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
|
||||
output_image = images_dir+'/rotated_image_'+str(i)+'.jpg'
|
||||
cv2.imwrite(output_image, rotated)
|
||||
rotated_result.append('rotated_image_'+str(i)+'.jpg')
|
||||
except Exception as e: #If not running as a part of DAG workflow and implemented as a single standalone function
|
||||
image_url_list = params["image_url_links"]
|
||||
parts = len(image_url_list)
|
||||
for i in range(0,parts):
|
||||
if os.path.exists(images_dir+'/rotated_image_'+str(i)+'.jpg'):
|
||||
os.remove(images_dir+'/rotated_image_'+str(i)+'.jpg')
|
||||
for i in range(0,parts):
|
||||
response = requests.get(image_url_list[i])
|
||||
image_name = 'Image'+str(i)+'.jpg'
|
||||
with open(image_name, "wb") as f:
|
||||
f.write(response.content)
|
||||
img = cv2.imread(image_name)
|
||||
# Rotate the image by 90 degrees
|
||||
rotated = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
|
||||
output_image = images_dir+'/rotated_image_'+str(i)+'.jpg'
|
||||
cv2.imwrite(output_image, rotated)
|
||||
rotated_result.append('rotated_image_'+str(i)+'.jpg')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
aws_access_key_id = os.getenv('AWS_ACCESS_KEY_ID')
|
||||
aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
|
||||
aws_region = os.getenv('AWS_REGION')
|
||||
|
||||
|
||||
s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=aws_region)
|
||||
|
||||
|
||||
bucket_name = 'dagit-store'
|
||||
folder_path = images_dir
|
||||
folder_name = images_dir
|
||||
for subdir, dirs, files in os.walk(folder_path):
|
||||
for file in files:
|
||||
file_path = os.path.join(subdir, file)
|
||||
s3.upload_file(file_path, bucket_name, f'{folder_name}/{file_path.split("/")[-1]}')
|
||||
s3.put_object_acl(Bucket=bucket_name, Key=f'{folder_name}/{file_path.split("/")[-1]}', ACL='public-read')
|
||||
url_list=[]
|
||||
for image in rotated_result:
|
||||
url = "https://dagit-store.s3.ap-south-1.amazonaws.com/"+images_dir+"/"+image
|
||||
url_list.append(url)
|
||||
|
||||
print(json.dumps({"rotated_image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts
|
||||
|
||||
}))
|
||||
|
||||
return({"rotated_image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts
|
||||
|
||||
})
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,24 @@
|
||||
# Dockerfile for Python whisk docker action
|
||||
FROM openwhisk/dockerskeleton
|
||||
|
||||
ENV FLASK_PROXY_PORT 8080
|
||||
|
||||
## Install our action's Python dependencies
|
||||
ADD requirements.txt /action/requirements.txt
|
||||
ENV AWS_ACCESS_KEY_ID="AKIAYFB773UVZSOAVZN4"
|
||||
ENV AWS_SECRET_ACCESS_KEY="OZPLMjN/2ao6OlSd5PpIkT5d7cWD9WAP/DXSZbEs"
|
||||
ENV AWS_REGION="ap-south-1"
|
||||
RUN apk --update add python py-pip openssl ca-certificates py-openssl wget
|
||||
RUN apk --update add --virtual build-dependencies libffi-dev openssl-dev python-dev py-pip build-base && apk add jpeg-dev zlib-dev libjpeg \
|
||||
&& pip install --upgrade pip
|
||||
RUN cd /action; pip install -r requirements.txt
|
||||
RUN pip install opencv-python
|
||||
# Ensure source assets are not drawn from the cache after this date
|
||||
ENV REFRESHED_AT 2016-09-05T13:59:39Z
|
||||
# Add all source assets
|
||||
ADD . /action
|
||||
# Rename our executable Python action
|
||||
ADD threshold.py /action/exec
|
||||
|
||||
# Leave CMD as is for Openwhisk
|
||||
CMD ["/bin/bash", "-c", "cd actionProxy && python3 -u actionproxy.py"]
|
@ -0,0 +1,3 @@
|
||||
sudo ./buildAndPush.sh 10.129.28.219:5000/image-threshold-image
|
||||
wsk -i action create image-thresholding --docker 10.129.28.219:5000/image-threshold-image --web=true --timeout=300000
|
||||
./register.sh /image-thresholding-api /image-thresholding-path image-thresholding --response-type=json
|
@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script will build the docker image and push it to dockerhub.
|
||||
#
|
||||
# Usage: buildAndPush.sh imageName
|
||||
#
|
||||
# Dockerhub image names look like "username/appname" and must be all lower case.
|
||||
# For example, "janesmith/calculator"
|
||||
|
||||
IMAGE_NAME=$1
|
||||
echo "Using $IMAGE_NAME as the image name"
|
||||
|
||||
# Make the docker image
|
||||
docker build -t $IMAGE_NAME .
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker build failed"
|
||||
exit
|
||||
fi
|
||||
docker push $IMAGE_NAME
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker push failed"
|
||||
exit
|
||||
fi
|
||||
|
@ -0,0 +1,5 @@
|
||||
requests
|
||||
boto3
|
||||
redis
|
||||
opencv-python
|
||||
|
@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import requests
|
||||
import boto3
|
||||
import redis
|
||||
import pickle
|
||||
import json
|
||||
import cv2
|
||||
import sys
|
||||
# Image thresholding is a simple, yet effective,
|
||||
# way of partitioning an image into a foreground and background. T
|
||||
# his image analysis technique is a
|
||||
# type of image segmentation that isolates objects by converting grayscale images into binary images.
|
||||
def main():
|
||||
images_dir = "thresholded-images"
|
||||
is_images_dir = os.path.isdir(images_dir)
|
||||
if(is_images_dir == False):
|
||||
os.mkdir(images_dir)
|
||||
r = redis.Redis(host="10.129.28.219", port=6379, db=2)
|
||||
activation_id = os.environ.get('__OW_ACTIVATION_ID')
|
||||
params = json.loads(sys.argv[1])
|
||||
thresholded_result = []
|
||||
try:
|
||||
decode_activation_id = params["activation_id"]
|
||||
parts = params["parts"]
|
||||
for i in range(0,parts):
|
||||
if os.path.exists(images_dir+'/thresholded_image_'+str(i)+'.jpg'):
|
||||
os.remove(images_dir+'/thresholded_image_'+str(i)+'.jpg')
|
||||
for i in range(0,parts):
|
||||
decode_output = "decode-output-image"+decode_activation_id+"-"+str(i)
|
||||
load_image = pickle.loads(r.get(decode_output))
|
||||
image_name = 'Image'+str(i)+'.jpg'
|
||||
with open(image_name, 'wb') as f:
|
||||
f.write(load_image)
|
||||
|
||||
img = cv2.imread(image_name)
|
||||
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
_, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
|
||||
output_image = images_dir+'/thresholded_image_'+str(i)+'.jpg'
|
||||
cv2.imwrite(output_image, thresh)
|
||||
thresholded_result.append('thresholded_image_'+str(i)+'.jpg')
|
||||
except Exception as e: #If not running as a part of DAG workflow and implemented as a single standalone function
|
||||
image_url_list = params["image_url_links"]
|
||||
parts = len(image_url_list)
|
||||
for i in range(0,parts):
|
||||
if os.path.exists(images_dir+'/thresholded_image_'+str(i)+'.jpg'):
|
||||
os.remove(images_dir+'/thresholded_image_'+str(i)+'.jpg')
|
||||
for i in range(0,parts):
|
||||
response = requests.get(image_url_list[i])
|
||||
image_name = 'Image'+str(i)+'.jpg'
|
||||
with open(image_name, "wb") as f:
|
||||
f.write(response.content)
|
||||
img = cv2.imread(image_name)
|
||||
|
||||
|
||||
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
_, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
|
||||
output_image = images_dir+'/thresholded_image_'+str(i)+'.jpg'
|
||||
cv2.imwrite(output_image, thresh)
|
||||
thresholded_result.append('thresholded_image_'+str(i)+'.jpg')
|
||||
|
||||
|
||||
aws_access_key_id = os.getenv('AWS_ACCESS_KEY_ID')
|
||||
aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
|
||||
aws_region = os.getenv('AWS_REGION')
|
||||
|
||||
|
||||
s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=aws_region)
|
||||
|
||||
|
||||
bucket_name = 'dagit-store'
|
||||
folder_path = images_dir
|
||||
folder_name = images_dir
|
||||
for subdir, dirs, files in os.walk(folder_path):
|
||||
for file in files:
|
||||
file_path = os.path.join(subdir, file)
|
||||
s3.upload_file(file_path, bucket_name, f'{folder_name}/{file_path.split("/")[-1]}')
|
||||
s3.put_object_acl(Bucket=bucket_name, Key=f'{folder_name}/{file_path.split("/")[-1]}', ACL='public-read')
|
||||
url_list=[]
|
||||
for image in thresholded_result:
|
||||
url = "https://dagit-store.s3.ap-south-1.amazonaws.com/"+images_dir+"/"+image
|
||||
url_list.append(url)
|
||||
|
||||
print(json.dumps({"thresholded_image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts
|
||||
|
||||
}))
|
||||
|
||||
return({"thresholded_image_url_links":url_list,
|
||||
"activation_id": str(activation_id),
|
||||
"parts": parts
|
||||
|
||||
})
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in new issue