from
djitellopy
import
tello
KeyPressModule as kp
cv2
requests
json
PIL
Image
ImageDraw
kp.init()
me
=
tello.Tello()
me.connect()
print
(me.get_battery())
me.streamon()
global
img
Local_Custom_Vision_ENDPOINT
"http://IP:Port/image"
# Send an image to the image classifying server
# Return the JSON response from the server with the prediction result
def
sendFrameForProcessing(imagePath, imageProcessingEndpoint):
headers
{
'Content-Type'
:
'application/octet-stream'
}
with open(imagePath, mode
"rb"
) as test_image:
try
response
requests.post(imageProcessingEndpoint, headers
headers, data
test_image)
(
"Response from custom vision service: ("
+
str(response.status_code)
") "
json.dumps(response.json())
"\n"
)
except
Exception as e:
(e)
"No response from custom vision service"
return
None
getKeyboardInput():
lr, fb, ud, yv
0
,
speed
50
if
kp.getKey(
"LEFT"
):
lr
-
elif
"RIGHT"
"UP"
fb
"DOWN"
"w"
ud
"s"
"a"
yv
"d"
"q"
): me.land()
"e"
): me.takeoff()
"z"
cv2.imwrite(f
'Resources/Images/capture.jpg'
, img)
target
# open and detect the captured image
results
sendFrameForProcessing(f
, Local_Custom_Vision_ENDPOINT)
for
index
in
range(len(results[
'predictions'
])):
(results[
][index][
'probability'
] >
0.5
1
"\t"
results[
'tagName'
]
": {0:.2f}%"
.format(results[
*
100
))
bbox
'boundingBox'
im
Image.open(f
draw
ImageDraw.Draw(im)
draw.rectangle([int(bbox[
'left'
1280
), int(bbox[
'top'
720
), int((bbox[
bbox[
'width'
])
),
int((bbox[
'height'
)], outline
'red'
, width
5
im.save(f
'Resources/Images/results.jpg'
cv2.imread(f
, cv2.IMREAD_COLOR)
cv2.imshow(
"results"
cv2.waitKey(
1000
cv2.destroyWindow(
else
"capture"
[lr, fb, ud, yv]
# Send an image to the custom vision server
"Response from local custom vision service: ("
response.json()
main():
"Tello Custom Vision IoT Edge Demo"
while
True
vals
getKeyboardInput()
me.send_rc_control(vals[
], vals[
2
3
me.get_frame_read().frame
cv2.resize(img, (
cv2.putText(img, str(me.get_current_state()), (
10
60
), cv2.FONT_HERSHEY_PLAIN,
0.9
, (
255
"image"
__name__
"__main__"
main()