Commit 1381b300 authored by Mario Chirinos's avatar Mario Chirinos

notebook

parent 2543425c
...@@ -99,7 +99,7 @@ ...@@ -99,7 +99,7 @@
"referenceImage = cv2.imread('../resources/img/patron.png', 0)\n", "referenceImage = cv2.imread('../resources/img/patron.png', 0)\n",
"referenceImage_rgb = cv2.imread('../resources/img/patron.png', 1)\n", "referenceImage_rgb = cv2.imread('../resources/img/patron.png', 1)\n",
"\n", "\n",
"# Show image\n", "# Mostrar Imagen\n",
"plt.imshow( cv2.cvtColor(referenceImage_rgb, cv2.COLOR_BGR2RGB))\n", "plt.imshow( cv2.cvtColor(referenceImage_rgb, cv2.COLOR_BGR2RGB))\n",
"plt.show()" "plt.show()"
] ]
...@@ -134,7 +134,7 @@ ...@@ -134,7 +134,7 @@
"sourceImage = cv2.imread('../resources/img/escena0.jpg', 0)\n", "sourceImage = cv2.imread('../resources/img/escena0.jpg', 0)\n",
"sourceImage_rgb = cv2.imread('../resources/img/escena0.jpg', 1)\n", "sourceImage_rgb = cv2.imread('../resources/img/escena0.jpg', 1)\n",
"\n", "\n",
"# Show image\n", "# Mostrar Imagen\n",
"plt.imshow(cv2.cvtColor(sourceImage_rgb, cv2.COLOR_BGR2RGB))\n", "plt.imshow(cv2.cvtColor(sourceImage_rgb, cv2.COLOR_BGR2RGB))\n",
"plt.show()" "plt.show()"
] ]
...@@ -243,26 +243,20 @@ ...@@ -243,26 +243,20 @@
"source": [ "source": [
"MIN_MATCHES = 30\n", "MIN_MATCHES = 30\n",
" \n", " \n",
"# create brute force matcher object\n", "# Crear objeto brute force matcher\n",
"bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n", "bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n",
"\n", "\n",
"# Compute model keypoints and its descriptors\n", "# Encontrar puntos de interes y generar descriptores\n",
"referenceImagePts, referenceImageDsc = orb.detectAndCompute(referenceImage, None)\n", "referenceImagePts, referenceImageDsc = orb.detectAndCompute(referenceImage, None)\n",
"\n",
"# Compute scene keypoints and its descriptors\n",
"sourceImagePts, sourceImageDsc = orb.detectAndCompute(sourceImage, None)\n", "sourceImagePts, sourceImageDsc = orb.detectAndCompute(sourceImage, None)\n",
"\n", "\n",
"# Match frame descriptors with model descriptors\n", "# Encontrar correspondencias entre puntos de interes\n",
"matches = bf.match(referenceImageDsc, sourceImageDsc)\n", "matches = bf.match(referenceImageDsc, sourceImageDsc)\n",
"\n",
"# Sort them in the order of their distance\n",
"matches = sorted(matches, key=lambda x: x.distance)\n", "matches = sorted(matches, key=lambda x: x.distance)\n",
"\n", "\n",
"if len(matches) > MIN_MATCHES:\n", "if len(matches) > MIN_MATCHES:\n",
" # draw first 15 matches.\n", " idxPairs = cv2.drawMatches(referenceImage_rgb, referenceImagePts, sourceImage_rgb, sourceImagePts, matches[:MIN_MATCHES], 0, flags=2)\n",
" idxPairs = cv2.drawMatches(referenceImage_rgb, referenceImagePts, sourceImage_rgb, sourceImagePts,\n", " # Mostrar Resultado\n",
" matches[:MIN_MATCHES], 0, flags=2)\n",
" # show result\n",
" plt.figure(figsize=(12, 6))\n", " plt.figure(figsize=(12, 6))\n",
" plt.axis('off')\n", " plt.axis('off')\n",
" plt.imshow(cv2.cvtColor(idxPairs, cv2.COLOR_BGR2RGB))\n", " plt.imshow(cv2.cvtColor(idxPairs, cv2.COLOR_BGR2RGB))\n",
...@@ -335,36 +329,34 @@ ...@@ -335,36 +329,34 @@
} }
], ],
"source": [ "source": [
"# Apply the homography transformation if we have enough good matches\n", "# Aplicar Homografia\n",
"if len(matches) > MIN_MATCHES:\n", "if len(matches) > MIN_MATCHES:\n",
" # Get the good key points positions\n", " # Obtener posicion de los puntos de interes\n",
" sourcePoints = np.float32([referenceImagePts[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n", " sourcePoints = np.float32([referenceImagePts[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n",
" destinationPoints = np.float32([sourceImagePts[m.trainIdx].pt for m in matches ]).reshape(-1, 1, 2)\n", " destinationPoints = np.float32([sourceImagePts[m.trainIdx].pt for m in matches ]).reshape(-1, 1, 2)\n",
"\n", "\n",
" # Obtain the homography matrix\n", " #Obtener la matriz de Homografia\n",
" homography, mask = cv2.findHomography(sourcePoints, destinationPoints, cv2.RANSAC, 5.0)\n", " homography, mask = cv2.findHomography(sourcePoints, destinationPoints, cv2.RANSAC, 5.0)\n",
" matchesMask = mask.ravel().tolist()\n", " matchesMask = mask.ravel().tolist()\n",
"\n", "\n",
" # Apply the perspective transformation to the source image corners\n", " # Aplicar la transformacion proyectiva a las esquinas de la imagen de referncia\n",
" h, w = referenceImage.shape\n", " h, w = referenceImage.shape\n",
" corners = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\n", " corners = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\n",
" transformedCorners = cv2.perspectiveTransform(corners, homography)\n", " transformedCorners = cv2.perspectiveTransform(corners, homography)\n",
"\n", "\n",
" # Draw a polygon on the second image joining the transformed corners\n", " #Dibujar poligono uniendo las esquinas proyectadas\n",
" sourceImageMarker = sourceImage_rgb.copy()\n", " sourceImageMarker = sourceImage_rgb.copy() \n",
" \n", " sourceImageMarker = cv2.polylines(sourceImageMarker, [np.int32(transformedCorners)], True, (255,0,0), 5, cv2.LINE_AA)\n",
" sourceImageMarker = cv2.polylines(sourceImageMarker, [np.int32(transformedCorners)], True,\n",
" (255,0,0), 5, cv2.LINE_AA)\n",
" \n", " \n",
"else:\n", "else:\n",
" print(\"Not enough matches are found - %d/%d\" % (len(matches), MIN_MATCHES))\n", " print(\"Not enough matches are found - %d/%d\" % (len(matches), MIN_MATCHES))\n",
" matchesMask = None\n", " matchesMask = None\n",
"\n", "\n",
"# Draw the matches\n", "# Dibujar Correspondencias\n",
"drawParameters = dict(matchColor=(0, 255, 0), singlePointColor=None,matchesMask=matchesMask, flags=2)\n", "drawParameters = dict(matchColor=(0, 255, 0), singlePointColor=None,matchesMask=matchesMask, flags=2)\n",
"result = cv2.drawMatches(referenceImage_rgb, referenceImagePts, sourceImageMarker, sourceImagePts, matches, None, **drawParameters)\n", "result = cv2.drawMatches(referenceImage_rgb, referenceImagePts, sourceImageMarker, sourceImagePts, matches, None, **drawParameters)\n",
"print(type(result))\n", "print(type(result))\n",
"# Show image\n", "# Mostrar Imagen\n",
"plt.figure(figsize=(12, 6))\n", "plt.figure(figsize=(12, 6))\n",
"plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))\n", "plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))\n",
"plt.show()" "plt.show()"
...@@ -414,16 +406,10 @@ ...@@ -414,16 +406,10 @@
"source": [ "source": [
"from objloader_simple import *\n", "from objloader_simple import *\n",
"import math\n", "import math\n",
"# Camera parameters\n", "# Matriz de calibracion de la camara\n",
"camera_parameters = np.array([[852, 0, 387],\n", "camera_parameters = np.array([[852, 0, 387],\n",
" [ 0,852, 294],\n", " [ 0,852, 294],\n",
" [ 0, 0, 1]])\n", " [ 0, 0, 1]])\n"
"# obtain 3D projection matrix from homography matrix and camera parameters\n",
"#projection = projection_matrix(camera_parameters, homography) \n",
"# Load 3D model from OBJ file\n",
"obj1 = OBJ('../models/chair.obj', swapyz=True)\n",
"#obj = OBJ('../models/knight.obj', swapyz=True)\n",
"\n"
] ]
}, },
{ {
...@@ -433,7 +419,6 @@ ...@@ -433,7 +419,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# project cube or model\n",
"def render(img, obj, projection, model, color=False):\n", "def render(img, obj, projection, model, color=False):\n",
"\n", "\n",
" vertices = obj.vertices\n", " vertices = obj.vertices\n",
...@@ -579,39 +564,6 @@ ...@@ -579,39 +564,6 @@
" plt.imshow(cv2.cvtColor(augmented, cv2.COLOR_BGR2RGB))\n", " plt.imshow(cv2.cvtColor(augmented, cv2.COLOR_BGR2RGB))\n",
" plt.axis('off')\n" " plt.axis('off')\n"
] ]
},
{
"cell_type": "raw",
"id": "258fea4c-8ef8-4d65-8a8d-c04188ab7e11",
"metadata": {},
"source": [
"# Defining the dimensions of checkerboard\n",
"CHECKERBOARD = (6,9)\n",
"criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n",
"\n",
"# Creating vector to store vectors of 3D points for each checkerboard image\n",
"objpoints = []\n",
"# Creating vector to store vectors of 2D points for each checkerboard image\n",
"imgpoints = [] "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a062d89a-e2d1-4e6f-8360-8806ad65d423",
"metadata": {},
"outputs": [],
"source": [
"# Extracting path of individual image stored in a given directory\n",
"images = glob.glob('../images/*.JPG')\n",
"for fname in images:\n",
" img = cv2.imread(fname)\n",
" gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n",
" # Find the chess board corners\n",
" # If desired number of corners are found in the image then ret = true\n",
" ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)\n",
" print(fname, ret)\n"
]
} }
], ],
"metadata": { "metadata": {
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment