1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
| import cv2 import math import io import numpy as np import zipfile from tqdm import tqdm from pwn import *
def Img_Outline(original_img): gray_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray_img, (9, 9), 0) _, RedThresh = cv2.threshold(blurred, 220, 255, cv2.THRESH_BINARY) h, w = original_img.shape[:2] kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5)) marker = np.zeros_like(gray_img) marker[0, :] = 255 marker[-1, :] = 255 marker[:, 0] = 255 marker[:, -1] = 255 while True: marker_pre = marker dilation = cv2.dilate(marker, kernel=kernel) marker = np.min((dilation, RedThresh), axis=0) if (marker_pre == marker).all(): break
marker = cv2.morphologyEx(marker, cv2.MORPH_OPEN, kernel) marker[0:5,:] = 255 marker[-5:,:] = 255 marker[:,0:5] = 255 marker[:,-5:] = 255 return original_img, gray_img, marker
def findContours_img(original_img, marker): contours, hierarchy = cv2.findContours(marker, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) c = sorted(contours, key=cv2.contourArea, reverse=True)[1] rect = cv2.minAreaRect(c) box = np.int0(cv2.boxPoints(rect)) draw_img = cv2.drawContours(original_img.copy(), [box], -1, (0, 0, 255), 3)
return box,draw_img
def Perspective_transform(box,original_img): orignal_W = math.ceil(np.sqrt((box[3][1] - box[2][1])**2 + (box[3][0] - box[2][0])**2)) orignal_H= math.ceil(np.sqrt((box[3][1] - box[0][1])**2 + (box[3][0] - box[0][0])**2))
pts1 = np.float32([box[0], box[1], box[2], box[3]]) pts2 = np.float32([[int(orignal_W+1),int(orignal_H+1)], [0, int(orignal_H+1)], [0, 0], [int(orignal_W+1), 0]])
M = cv2.getPerspectiveTransform(pts1, pts2) result_img = cv2.warpPerspective(original_img, M, (int(orignal_W+3),int(orignal_H+1))) if orignal_H < orignal_W: result_img = cv2.flip(cv2.transpose(result_img), 1)
return result_img
def img_similarity(img1,img2): try:
orb = cv2.ORB_create() kp1, des1 = orb.detectAndCompute(img1, None) kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
matches = bf.knnMatch(des1, trainDescriptors=des2, k=2)
good = [m for (m, n) in matches if m.distance < 0.75 * n.distance] similary = len(good) / len(matches) return similary
except: print('无法计算两张图片相似度') exit()
if __name__=="__main__": if not os.path.exists('./result'): os.makedirs('result')
final = ''
temp1 = cv2.imread('./temp1.webp', cv2.IMREAD_GRAYSCALE) temp2 = cv2.imread('./temp2.webp', cv2.IMREAD_GRAYSCALE)
r = remote('week-3.hgame.lwsec.cn', 30802) r.recvuntil('...') r.send('\n') b64 = r.recvline()[:-1] out = open('row.zip', "wb") base64.decode(io.BytesIO(b64), out) out.close()
zipfile_path = './row.zip' with zipfile.ZipFile(zipfile_path, mode='r') as zfile: nWaitTime = 1 for name in tqdm(zfile.namelist()): if '.webp' not in name: continue
with zfile.open(name,mode='r') as image_file: content = image_file.read() image = np.asarray(bytearray(content), dtype='uint8') image = cv2.imdecode(image, cv2.IMREAD_ANYCOLOR)
original_img, gray_img, RedThresh = Img_Outline(image) box, draw_img = findContours_img(original_img, RedThresh) result_img = Perspective_transform(box,original_img) result_img[50:355,22:280] = 255.0 cv2.imwrite(f"./result/{name}", result_img) result_img = cv2.cvtColor(result_img, cv2.COLOR_BGR2GRAY) similarity1 = img_similarity(temp1, result_img) similarity2 = img_similarity(temp2, result_img) if (similarity1 + similarity2) > 0.04: final += '1' else: final += '0'
print(final) r.sendline(final) r.interactive()
|