diff --git a/README.md b/README.md
index a87224e88..615143670 100644
--- a/README.md
+++ b/README.md
@@ -21,3 +21,90 @@ Checkout this [blog post with more demos](http://blog.mapillary.com/update/2014/
[Building the library]: https://docs.opensfm.org/building.html (OpenSfM building instructions)
[Running a reconstruction]: https://docs.opensfm.org/using.html (OpenSfM usage)
[Documentation]: https://docs.opensfm.org (OpenSfM documentation)
+
+## Update Sift_GPU version
+
+### Changes:
+* added SIFT_GPU parameters to the config file.
+* added SiftGPU class.
+* added feature detection function in features.py file.
+* added matching functions to the matching.py file.
+* changed the commands to support the sift_gpu option.
+* added functions to of save and load sift_gpu keypoints to the dataset.py file.
+
+
+
+### Requirements:
+This update depends on the silx python package.
+
+To install it you will need to install pyopencl.
+Please see the installation guide [Here](http://www.silx.org/doc/silx/dev/install.html).
+
+### Usage
+In order to test if all the packages were installed correctly. Try to run my test code.
+Which match two of the images in the Berlin data. The code can be found in the main folder under the name "test_sift_gpu.py".
+
+To run this Code simply write:
+```
+cd OpenSfm_sift_gpu
+python test_sift_gpu.py
+```
+### Run OpenSfm
+
+If all the dependencies were installed correctly.
+To run the opensfm pipeline enter this commands:
+```
+cd OpenSfm_sift_gpu
+./bin/opensfm_run_all data/berlin_gpu
+```
+
+To check the sfm, enter:
+
+`python3 -m http.server`
+
+and click on this [link](http://localhost:8000/viewer/reconstruction.html#file=/data/berlin_gpu/reconstruction.meshed.json) to see the reconstruction.
+
+### Benchmark
+Here you can see the results of the Sift_GPU implementation.
+
+#### Feature detection
+
+
+
+
+
+
+
+
+
+
+
+#### Feature Matching
+
+
+
+
+
+
+
+
+
+#### SFM Creation
+
+
+
+
+
+
+
+
+
+#### Speed
+Feature Matching on image (3264x2448) on 1080-TI-GTX took 0.28 sec
+
+Feature Matching took 0.025 sec
+
+#### Contact Me
+
+For more information, you can contact me via [Email](mailto:cojosef@gmail.com)
+
diff --git a/data/berlin_gpu/config.yaml b/data/berlin_gpu/config.yaml
new file mode 100644
index 000000000..34466c955
--- /dev/null
+++ b/data/berlin_gpu/config.yaml
@@ -0,0 +1,11 @@
+
+# OpenSfM will use the default parameters from opensfm/config.py
+# Set here any parameter that you want to override for this dataset
+# For example:
+processes: 1 # Number of threads to use, SIFT_GPU does not work on parallel
+depthmap_min_consistent_views: 2 # Min number of views that should reconstruct a point for it to be valid
+depthmap_save_debug_files: yes # Save debug files with partial reconstruction results
+feature_process_size: -1
+bundle_use_gcp: yes
+feature_type: SIFT_GPU # Feature type (AKAZE, SURF, SIFT, HAHOG, ORB, SIFT_GPU)
+
diff --git a/data/berlin_gpu/ground_control_points.json b/data/berlin_gpu/ground_control_points.json
new file mode 100644
index 000000000..11b375ec1
--- /dev/null
+++ b/data/berlin_gpu/ground_control_points.json
@@ -0,0 +1,64 @@
+{
+ "points": [
+ {
+ "position": {
+ "latitude": 52.51926834404209,
+ "altitude": 14.946108041331172,
+ "longitude": 13.400703631118825
+ },
+ "id": "0",
+ "observations": [
+ {
+ "projection": [
+ -0.015689,
+ -0.0625362
+ ],
+ "shot_id": "02.jpg"
+ },
+ {
+ "projection": [
+ -0.0624397,
+ 0.0440716
+ ],
+ "shot_id": "03.jpg"
+ }
+ ]
+ },
+ {
+ "id": "1",
+ "observations": [
+ {
+ "projection": [
+ 0.049848,
+ 0.144291
+ ],
+ "shot_id": "02.jpg"
+ },
+ {
+ "projection": [
+ 0.0105317,
+ 0.244958
+ ],
+ "shot_id": "01.jpg"
+ }
+ ]
+ },
+ {
+ "position": {
+ "latitude": 52.5192651808067,
+ "altitude": 12.859175567515194,
+ "longitude": 13.400764257288497
+ },
+ "id": "3",
+ "observations": [
+ {
+ "projection": [
+ 0.0608681,
+ -0.00730461
+ ],
+ "shot_id": "02.jpg"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/data/berlin_gpu/images/01.jpg b/data/berlin_gpu/images/01.jpg
new file mode 100644
index 000000000..7b3f37b66
Binary files /dev/null and b/data/berlin_gpu/images/01.jpg differ
diff --git a/data/berlin_gpu/images/02.jpg b/data/berlin_gpu/images/02.jpg
new file mode 100644
index 000000000..73b1af915
Binary files /dev/null and b/data/berlin_gpu/images/02.jpg differ
diff --git a/data/berlin_gpu/images/03.jpg b/data/berlin_gpu/images/03.jpg
new file mode 100644
index 000000000..d2d37db62
Binary files /dev/null and b/data/berlin_gpu/images/03.jpg differ
diff --git a/data/berlin_gpu/masks/01.jpg.png b/data/berlin_gpu/masks/01.jpg.png
new file mode 100644
index 000000000..59cee8b11
Binary files /dev/null and b/data/berlin_gpu/masks/01.jpg.png differ
diff --git a/data/berlin_gpu/masks/02.jpg.png b/data/berlin_gpu/masks/02.jpg.png
new file mode 100644
index 000000000..587354dc5
Binary files /dev/null and b/data/berlin_gpu/masks/02.jpg.png differ
diff --git a/data/berlin_gpu/masks/03.jpg.png b/data/berlin_gpu/masks/03.jpg.png
new file mode 100644
index 000000000..4c27e47ea
Binary files /dev/null and b/data/berlin_gpu/masks/03.jpg.png differ
diff --git a/misc/img1_features.png b/misc/img1_features.png
new file mode 100644
index 000000000..7feb1a4ed
Binary files /dev/null and b/misc/img1_features.png differ
diff --git a/misc/img1_features_zoom.png b/misc/img1_features_zoom.png
new file mode 100644
index 000000000..c71e3c3b3
Binary files /dev/null and b/misc/img1_features_zoom.png differ
diff --git a/misc/img2_features.png b/misc/img2_features.png
new file mode 100644
index 000000000..d68891b29
Binary files /dev/null and b/misc/img2_features.png differ
diff --git a/misc/img2_features_zoom.png b/misc/img2_features_zoom.png
new file mode 100644
index 000000000..12bfaf8b6
Binary files /dev/null and b/misc/img2_features_zoom.png differ
diff --git a/misc/sfm_berlin_2.png b/misc/sfm_berlin_2.png
new file mode 100644
index 000000000..cf76cbfad
Binary files /dev/null and b/misc/sfm_berlin_2.png differ
diff --git a/misc/sfm_berlin_gpu.png b/misc/sfm_berlin_gpu.png
new file mode 100644
index 000000000..0ebd89fb8
Binary files /dev/null and b/misc/sfm_berlin_gpu.png differ
diff --git a/misc/sift_gpu_matching.png b/misc/sift_gpu_matching.png
new file mode 100644
index 000000000..e68ed0bdc
Binary files /dev/null and b/misc/sift_gpu_matching.png differ
diff --git a/misc/sift_matching_2.png b/misc/sift_matching_2.png
new file mode 100644
index 000000000..7057e7c1c
Binary files /dev/null and b/misc/sift_matching_2.png differ
diff --git a/opensfm/commands/detect_features.py b/opensfm/commands/detect_features.py
index 271af4b9e..479bbb825 100644
--- a/opensfm/commands/detect_features.py
+++ b/opensfm/commands/detect_features.py
@@ -69,12 +69,17 @@ def detect(args):
data.feature_type().upper(), image))
start = timer()
-
- p_unmasked, f_unmasked, c_unmasked = features.extract_features(
- data.load_image(image), data.config)
+ if data.config["feature_type"] == "SIFT_GPU":
+ unmasked, keypoints = features.extract_features(
+ data.load_image(image), data.config)
+ p_unmasked, f_unmasked, c_unmasked = unmasked
+ else:
+ p_unmasked, f_unmasked, c_unmasked = features.extract_features(
+ data.load_image(image), data.config)
fmask = data.load_features_mask(image, p_unmasked)
-
+ if data.config["feature_type"] == "SIFT_GPU":
+ keypoints = keypoints[fmask]
p_unsorted = p_unmasked[fmask]
f_unsorted = f_unmasked[fmask]
c_unsorted = c_unmasked[fmask]
@@ -88,6 +93,9 @@ def detect(args):
p_sorted = p_unsorted[order, :]
f_sorted = f_unsorted[order, :]
c_sorted = c_unsorted[order, :]
+ if data.config["feature_type"] == "SIFT_GPU":
+ keypoints_sorted = keypoints[order]
+ data.save_gpu_features(image, keypoints_sorted)
data.save_features(image, p_sorted, f_sorted, c_sorted)
if need_words:
diff --git a/opensfm/config.py b/opensfm/config.py
index 66a09acf5..35ec484b3 100644
--- a/opensfm/config.py
+++ b/opensfm/config.py
@@ -7,7 +7,7 @@
default_focal_prior: 0.85
# Params for features
-feature_type: HAHOG # Feature type (AKAZE, SURF, SIFT, HAHOG, ORB)
+feature_type: HAHOG # Feature type (AKAZE, SURF, SIFT, HAHOG, ORB, SIFT_GPU)
feature_root: 1 # If 1, apply square root mapping to features
feature_min_frames: 4000 # If fewer frames are detected, sift_peak_threshold/surf_hessian_threshold is reduced.
feature_process_size: 2048 # Resize the image if its size is larger than specified. Set to -1 for original size
@@ -17,6 +17,13 @@
sift_peak_threshold: 0.1 # Smaller value -> more features
sift_edge_threshold: 10 # See OpenCV doc
+# Params for SIFT_GPU
+# More information on Feature detection http://www.silx.org/doc/silx/dev/_modules/silx/opencl/sift/plan.html#SiftPlan
+# More information on Feature Matching http://www.silx.org/doc/silx/dev/_modules/silx/opencl/sift/match.html
+sift_gpu_init_sigma: 1.6 # blurring width, you should have good reasons to modify the 1.6 default value
+pix_per_keypoints: 10 # Number of key-point pre-allocated: 1 for 10 pixel
+sift_gpu_device_type: GPU # Can be 'CPU' or 'GPU'
+
# Params for SURF
surf_hessian_threshold: 3000 # Smaller value -> more features
surf_n_octaves: 4 # See OpenCV doc
@@ -39,7 +46,7 @@
# Params for general matching
lowes_ratio: 0.8 # Ratio test for matches
-matcher_type: FLANN # FLANN, BRUTEFORCE, or WORDS
+matcher_type: FLANN # FLANN, BRUTEFORCE, SIFT_GPU or WORDS
symmetric_matching: yes # Match symmetricly or one-way
# Params for FLANN matching
diff --git a/opensfm/dataset.py b/opensfm/dataset.py
index e0c8e3b77..28e373985 100644
--- a/opensfm/dataset.py
+++ b/opensfm/dataset.py
@@ -33,6 +33,7 @@ class DataSet(object):
It is possible to store data remotely or in different formats
by subclassing this class and overloading its methods.
"""
+
def __init__(self, data_path):
"""Init dataset associated to a folder."""
self.data_path = data_path
@@ -316,8 +317,8 @@ def _save_features(self, filepath, points, descriptors, colors=None):
features.save_features(filepath, points, descriptors, colors, self.config)
def features_exist(self, image):
- return os.path.isfile(self._feature_file(image)) or\
- os.path.isfile(self._feature_file_legacy(image))
+ return os.path.isfile(self._feature_file(image)) or \
+ os.path.isfile(self._feature_file_legacy(image))
def load_features(self, image):
if os.path.isfile(self._feature_file_legacy(image)):
@@ -327,9 +328,27 @@ def load_features(self, image):
def save_features(self, image, points, descriptors, colors):
self._save_features(self._feature_file(image), points, descriptors, colors)
+ def save_gpu_features(self, image, keypoints):
+ io.mkdir_p(self._feature_path())
+ path = "./"+self._gpu_feature_file(image)
+ # Store data (serialize)
+ with open(path, 'wb+') as handle:
+ pickle.dump(keypoints, handle, protocol=pickle.HIGHEST_PROTOCOL)
+
+ def load_gpu_features(self, image):
+ path = self._gpu_feature_file(image)
+ if os.path.isfile(path):
+ with open(path, 'rb') as handle:
+ keypoints = pickle.load(handle)
+ return keypoints
+ return None
+
def _words_file(self, image):
return os.path.join(self._feature_path(), image + '.words.npz')
+ def _gpu_feature_file(self, image):
+ return os.path.join(self._feature_path(), image.rsplit(".")[0] + '_gpu.pkl')
+
def words_exist(self, image):
return os.path.isfile(self._words_file(image))
diff --git a/opensfm/feature_loading.py b/opensfm/feature_loading.py
index 5fec6272d..16b575780 100644
--- a/opensfm/feature_loading.py
+++ b/opensfm/feature_loading.py
@@ -10,7 +10,6 @@
from opensfm import features as ft
-
logger = logging.getLogger(__name__)
@@ -103,3 +102,24 @@ def _load_features_nocache(self, data, image):
else:
points = np.array(points[:, :3], dtype=float)
return points, features, colors
+
+ def create_gpu_keypoints_from_features(self, p1, f1):
+ ########################################################################
+ # Merge keypoints in central memory
+ ########################################################################
+ if type(f1[0][0]) is not int:
+ f1 = np.uint8(f1 * 512)
+ total_size = len(p1)
+ dtype_kp = np.dtype([('x', np.float32),
+ ('y', np.float32),
+ ('scale', np.float32),
+ ('angle', np.float32),
+ ('desc', (np.uint8, 128))
+ ])
+ output = np.recarray(shape=(total_size,), dtype=dtype_kp)
+ output[:total_size].x = p1[:, 0]
+ output[:total_size].y = p1[:, 1]
+ output[:total_size].scale = p1[:, 2]
+ # output[:total_size].angle = p1[:, 3]
+ output[:total_size].desc = f1
+ return output
diff --git a/opensfm/features.py b/opensfm/features.py
index 80153e3ca..eb0f0a181 100644
--- a/opensfm/features.py
+++ b/opensfm/features.py
@@ -7,11 +7,21 @@
import cv2
from opensfm import context
+from opensfm.sift_gpu import SiftGpu
from opensfm import pyfeatures
logger = logging.getLogger(__name__)
+def check_gpu_initialization(config, image, data=None):
+ if 'sift_gpu' not in globals():
+ global sift_gpu
+ if data is not None:
+ sift_gpu = SiftGpu.sift_gpu_from_config(config, data.load_image(image))
+ else:
+ sift_gpu = SiftGpu.sift_gpu_from_config(config, image)
+
+
def resized_image(image, config):
"""Resize image to feature_process_size."""
max_size = config['feature_process_size']
@@ -33,6 +43,19 @@ def root_feature(desc, l2_normalization=False):
return desc
+def root_feature_sift_gpu(desc, l2_normalization=False):
+ if l2_normalization:
+ s2 = np.linalg.norm(desc, axis=1)
+ idx = np.where(s2 == 0)
+ s2[idx] = 1
+ desc = (desc.T / s2).T
+ s = np.max(desc, 1)
+ idx = np.where(s == 0)
+ s[idx] = 1
+ desc = np.sqrt(desc.T / s).T
+ return desc
+
+
def root_feature_surf(desc, l2_normalization=False, partial=False):
"""
Experimental square root mapping of surf-like feature, only work for 64-dim surf now
@@ -40,7 +63,7 @@ def root_feature_surf(desc, l2_normalization=False, partial=False):
if desc.shape[1] == 64:
if l2_normalization:
s2 = np.linalg.norm(desc, axis=1)
- desc = (desc.T/s2).T
+ desc = (desc.T / s2).T
if partial:
ii = np.array([i for i in range(64) if (i % 4 == 2 or i % 4 == 3)])
else:
@@ -50,7 +73,7 @@ def root_feature_surf(desc, l2_normalization=False, partial=False):
# s_sub = np.sum(desc_sub, 1) # This partial normalization gives slightly better results for AKAZE surf
s_sub = np.sum(np.abs(desc), 1)
desc_sub = np.sqrt(desc_sub.T / s_sub).T
- desc[:, ii] = desc_sub*desc_sub_sign
+ desc[:, ii] = desc_sub * desc_sub_sign
return desc
@@ -125,6 +148,22 @@ def extract_features_sift(image, config):
return points, desc
+def extract_features_sift_gpu(image, config):
+ check_gpu_initialization(config, image)
+ keypoints = sift_gpu.detect_image(image)
+ idx = np.where(np.sum(keypoints.desc, 1) != 0)
+ keypoints = keypoints[idx]
+
+ points = np.concatenate([np.expand_dims(keypoints[:].x, axis=1),
+ np.expand_dims(keypoints[:].y, axis=1),
+ np.expand_dims(keypoints[:].scale, axis=1),
+ np.expand_dims(keypoints[:].angle, axis=1)], axis=1)
+ desc = np.array(keypoints[:].desc, dtype=np.float32)
+ if config['feature_root']:
+ desc = root_feature_sift_gpu(desc)
+ return points, desc, keypoints
+
+
def extract_features_surf(image, config):
surf_hessian_threshold = config['surf_hessian_threshold']
if context.OPENCV3:
@@ -209,10 +248,10 @@ def extract_features_akaze(image, config):
def extract_features_hahog(image, config):
t = time.time()
points, desc = pyfeatures.hahog(image.astype(np.float32) / 255, # VlFeat expects pixel values between 0, 1
- peak_threshold=config['hahog_peak_threshold'],
- edge_threshold=config['hahog_edge_threshold'],
- target_num_features=config['feature_min_frames'],
- use_adaptive_suppression=config['feature_use_adaptive_suppression'])
+ peak_threshold=config['hahog_peak_threshold'],
+ edge_threshold=config['hahog_edge_threshold'],
+ target_num_features=config['feature_min_frames'],
+ use_adaptive_suppression=config['feature_use_adaptive_suppression'])
if config['feature_root']:
desc = np.sqrt(desc)
@@ -265,7 +304,7 @@ def extract_features(color_image, config):
assert len(color_image.shape) == 3
color_image = resized_image(color_image, config)
image = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)
-
+ keypoints = None
feature_type = config['feature_type'].upper()
if feature_type == 'SIFT':
points, desc = extract_features_sift(image, config)
@@ -277,26 +316,30 @@ def extract_features(color_image, config):
points, desc = extract_features_hahog(image, config)
elif feature_type == 'ORB':
points, desc = extract_features_orb(image, config)
+ elif feature_type == 'SIFT_GPU':
+ points, desc, keypoints = extract_features_sift_gpu(image, config)
else:
raise ValueError('Unknown feature type '
- '(must be SURF, SIFT, AKAZE, HAHOG or ORB)')
+ '(must be SURF, SIFT, AKAZE, HAHOG, SIFT_GPU or ORB)')
xs = points[:, 0].round().astype(int)
ys = points[:, 1].round().astype(int)
colors = color_image[ys, xs]
-
+ if keypoints is not None:
+ return normalize_features(points, desc, colors,
+ image.shape[1], image.shape[0]), keypoints
return normalize_features(points, desc, colors,
image.shape[1], image.shape[0])
def build_flann_index(features, config):
- FLANN_INDEX_LINEAR = 0
- FLANN_INDEX_KDTREE = 1
- FLANN_INDEX_KMEANS = 2
- FLANN_INDEX_COMPOSITE = 3
- FLANN_INDEX_KDTREE_SINGLE = 4
- FLANN_INDEX_HIERARCHICAL = 5
- FLANN_INDEX_LSH = 6
+ FLANN_INDEX_LINEAR = 0
+ FLANN_INDEX_KDTREE = 1
+ FLANN_INDEX_KMEANS = 2
+ FLANN_INDEX_COMPOSITE = 3
+ FLANN_INDEX_KDTREE_SINGLE = 4
+ FLANN_INDEX_HIERARCHICAL = 5
+ FLANN_INDEX_LSH = 6
if features.dtype.type is np.float32:
algorithm_type = config['flann_algorithm'].upper()
diff --git a/opensfm/matching.py b/opensfm/matching.py
index 62e8be0b9..b362388ac 100644
--- a/opensfm/matching.py
+++ b/opensfm/matching.py
@@ -12,11 +12,20 @@
from opensfm import multiview
from opensfm import pairs_selection
from opensfm import feature_loader
-
+from opensfm.sift_gpu import SiftGpu
logger = logging.getLogger(__name__)
+def check_gpu_initialization(config, image, data=None):
+ if 'sift_gpu' not in globals():
+ global sift_gpu
+ if data is not None:
+ sift_gpu = SiftGpu.sift_gpu_from_config(config, data.load_image(image))
+ else:
+ sift_gpu = SiftGpu.sift_gpu_from_config(config, image)
+
+
def clear_cache():
feature_loader.instance.clear_cache()
@@ -30,9 +39,9 @@ def match_images(data, ref_images, cand_images):
non-symmetric matching options like WORDS. Data will be
stored in i matching only.
"""
-
+ config = data.config
# Get EXIFs data
- all_images = list(set(ref_images+cand_images))
+ all_images = list(set(ref_images + cand_images))
exifs = {im: data.load_exif(im) for im in all_images}
# Generate pairs for matching
@@ -45,7 +54,6 @@ def match_images(data, ref_images, cand_images):
def match_images_with_pairs(data, exifs, ref_images, pairs):
""" Perform pair matchings given pairs. """
-
# Store per each image in ref for processing
per_image = {im: [] for im in ref_images}
for im1, im2 in pairs:
@@ -113,7 +121,6 @@ def save_matches(data, images_ref, matched_pairs):
""" Given pairwise matches (image 1, image 2) - > matches,
save them such as only {image E images_ref} will store the matches.
"""
-
matches_per_im1 = {im: {} for im in images_ref}
for (im1, im2), m in matched_pairs.items():
matches_per_im1[im1][im2] = m
@@ -142,11 +149,9 @@ def match_unwrap_args(args):
im1, candidates, ctx = args
im1_matches = {}
- p1, f1, _ = feature_loader.instance.load_points_features_colors(ctx.data, im1)
camera1 = ctx.cameras[ctx.exifs[im1]['camera']]
for im2 in candidates:
- p2, f2, _ = feature_loader.instance.load_points_features_colors(ctx.data, im2)
camera2 = ctx.cameras[ctx.exifs[im2]['camera']]
im1_matches[im2] = match(im1, im2, camera1, camera2, ctx.data)
@@ -196,6 +201,14 @@ def match(im1, im2, camera1, camera2, data):
matches = match_brute_force_symmetric(f1, f2, config)
else:
matches = match_brute_force(f1, f2, config)
+ elif matcher_type == 'SIFT_GPU':
+ check_gpu_initialization(config, im1, data)
+ k1 = data.load_gpu_features(im1)
+ k2 = data.load_gpu_features(im2)
+ if k1 is None or k2 is None:
+ k1 = feature_loader.instance.create_gpu_keypoints_from_features(p1, f1)
+ k2 = feature_loader.instance.create_gpu_keypoints_from_features(p2, f2)
+ matches = sift_gpu.match_images(k1, k2)
else:
raise ValueError("Invalid matcher_type: {}".format(matcher_type))
@@ -292,7 +305,7 @@ def match_flann(index, f2, config):
"""
search_params = dict(checks=config['flann_checks'])
results, dists = index.knnSearch(f2, 2, params=search_params)
- squared_ratio = config['lowes_ratio']**2 # Flann returns squared L2 distances
+ squared_ratio = config['lowes_ratio'] ** 2 # Flann returns squared L2 distances
good = dists[:, 0] < squared_ratio * dists[:, 1]
return list(zip(results[good, 0], good.nonzero()[0]))
@@ -321,7 +334,7 @@ def match_brute_force(f1, f2, config):
f2: feature descriptors of the second image
config: config parameters
"""
- assert(f1.dtype.type == f2.dtype.type)
+ assert (f1.dtype.type == f2.dtype.type)
if (f1.dtype.type == np.uint8):
matcher_type = 'BruteForce-Hamming'
else:
@@ -471,7 +484,7 @@ def _non_static_matches(p1, p2, matches, config):
res = []
for match in matches:
d = p1[match[0]] - p2[match[1]]
- if d[0]**2 + d[1]**2 >= threshold**2:
+ if d[0] ** 2 + d[1] ** 2 >= threshold ** 2:
res.append(match)
static_ratio_threshold = 0.85
diff --git a/opensfm/sift_gpu.py b/opensfm/sift_gpu.py
new file mode 100644
index 000000000..5292e81a3
--- /dev/null
+++ b/opensfm/sift_gpu.py
@@ -0,0 +1,87 @@
+import logging
+
+try:
+ from silx.image import sift
+except ImportError:
+ logging.info('Cant import silx library for running SIFT_GPU feature extractor or matching,'
+ 'please change the config file or install the silx library via pip')
+ pass
+
+logger = logging.getLogger(__name__)
+
+
+class SiftGpu:
+ """
+ This Class contain the implementation of sift feature detector and matching on GPU
+ The implementation is based on opencl and the python library pyopencl.
+ The sift code is based on the library silx
+ For more information see this link: http://www.silx.org/doc/silx/dev/index.html.
+ for info about the detection or matching please see the links bellow:
+ detection - http://www.silx.org/doc/silx/dev/_modules/silx/opencl/sift/plan.html#SiftMatch
+ matching - http://www.silx.org/doc/silx/dev/_modules/silx/opencl/sift/match.html
+
+ Note for developers: the initialization of the feature extractor takes approximately 0.2 sec. In order to prevent
+ initializing every time the feature extractor, I will use this class as global object.
+ """
+
+ def __init__(self, image, sigma=1.6, pix_per_kp=10, device='GPU'):
+ """
+ initializing the feature detector and matching
+ :param image: template image
+ :param sigma: parameter for feature detection
+ :param pix_per_kp: parameter for feature detection
+ :param device: 'CPU' or 'GPU'
+ """
+ self.sigma = sigma
+ self.pix_per_kp = pix_per_kp
+ self.device = device
+ self.gpu_matching = sift.MatchPlan()
+ img_shape = image.shape
+ self.width, self.height = img_shape[:2]
+ self.init_feature_extractor(image)
+
+ @classmethod
+ def sift_gpu_from_config(cls, config, image):
+ """
+ create class from image and config file
+ :param config:
+ :param image:
+ :return:
+ """
+ sigma = config["sift_gpu_init_sigma"]
+ pix_per_kp = config["pix_per_keypoints"]
+ device = config["sift_gpu_device_type"]
+ return cls(image, sigma, pix_per_kp, device)
+
+ def detect_image(self, image):
+ """
+ checks if the image matches the template image.
+
+ :param image: The image we want to detect
+ :return: Keypoints of the detection
+ """
+ img_shape = image.shape
+ width, height = img_shape[:2]
+ if width != self.width or height != self.height:
+ self.init_feature_extractor(image)
+ return self.gpu_sift(image)
+
+ def init_feature_extractor(self, image):
+ """
+ initialize the feature extractor with different template image
+ :param image: The new template image
+ :return: Nothing
+ """
+ self.gpu_sift = sift.SiftPlan(template=image,
+ init_sigma=self.sigma,
+ PIX_PER_KP=self.pix_per_kp,
+ devicetype=self.device)
+
+ def match_images(self, kp1, kp2):
+ """
+ return the matches between to keypoints (of two images)
+ :param kp1: keypoints of image 1 (output of detect_image())
+ :param kp2: keypoints of image 2 (output of detect_image())
+ :return: index array of size(n,2)
+ """
+ return self.gpu_matching(kp1, kp2, raw_results=True)
diff --git a/requirements.txt b/requirements.txt
index 1bceb4553..b2815d18f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -14,3 +14,4 @@ repoze.lru==0.7
scipy
six
xmltodict==0.10.2
+silx>=0.12.0
diff --git a/test_sift_gpu.py b/test_sift_gpu.py
new file mode 100644
index 000000000..c1924f317
--- /dev/null
+++ b/test_sift_gpu.py
@@ -0,0 +1,96 @@
+import cv2
+import matplotlib.pyplot as plt
+from opensfm.sift_gpu import SiftGpu
+import numpy as np
+import time
+
+
+def draw_matches(img1, kp1, img2, kp2, matches, color=None):
+ """Draws lines between matching keypoints of two images.
+ Keypoints not in a matching pair are not drawn.
+ Places the images side by side in a new image and draws circles
+ around each keypoint, with line segments connecting matching pairs.
+ You can tweak the r, thickness, and figsize values as needed.
+ Args:
+ img1: An openCV image ndarray in a grayscale or color format.
+ kp1: A list of cv2.KeyPoint objects for img1.
+ img2: An openCV image ndarray of the same format and with the same
+ element type as img1.
+ kp2: A list of cv2.KeyPoint objects for img2.
+ matches: A list of DMatch objects whose trainIdx attribute refers to
+ img1 keypoints and whose queryIdx attribute refers to img2 keypoints.
+ color: The color of the circles and connecting lines drawn on the images.
+ A 3-tuple for color images, a scalar for grayscale images. If None, these
+ values are randomly generated.
+ """
+ # We're drawing them side by side. Get dimensions accordingly.
+ # Handle both color and grayscale images.
+ if len(img1.shape) == 3:
+ new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1], img1.shape[2])
+ elif len(img1.shape) == 2:
+ new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1])
+ new_img = np.zeros(new_shape, type(img1.flat[0]))
+ # Place images onto the new image.
+ new_img[0:img1.shape[0], 0:img1.shape[1]] = img1
+ new_img[0:img2.shape[0], img1.shape[1]:img1.shape[1] + img2.shape[1]] = img2
+
+ # Draw lines between matches. Make sure to offset kp coords in second image appropriately.
+ r = 15
+ thickness = 2
+ if color:
+ c = color
+ for m in matches:
+ # Generate random color for RGB/BGR and grayscale images as needed.
+ if not color:
+ c = np.random.randint(0, 256, 3) if len(img1.shape) == 3 else np.random.randint(0, 256)
+ # So the keypoint locs are stored as a tuple of floats. cv2.line(), like most other things,
+ # wants locs as a tuple of ints.
+ end1 = tuple(np.round(kp1[m[0]].pt).astype(int))
+ end2 = tuple(np.round(kp2[m[1]].pt).astype(int) + np.array([img1.shape[1], 0]))
+ cv2.line(new_img, end1, end2, c, thickness)
+ cv2.circle(new_img, end1, r, c, thickness)
+ cv2.circle(new_img, end2, r, c, thickness)
+
+ plt.figure(figsize=(15, 15))
+ plt.imshow(new_img)
+ plt.show()
+
+
+if __name__ == '__main__':
+ img1_path = 'data/berlin/images/01.jpg'
+ img2_path = 'data/berlin/images/02.jpg'
+ # read images
+ img1 = cv2.imread(img1_path) # load img1
+ img2 = cv2.imread(img2_path) # load img1
+ img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB) # change to rgb
+ img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB) # change to rgb
+ # sift
+ sift = SiftGpu(img1) # initialize the sift_gpu class
+
+ s_time = time.time()
+ keypoints_1 = sift.detect_image(img1) # detect features
+ print("Feature Extraction took {} sec".format(time.time() - s_time))
+ idx = np.where(np.sum(keypoints_1.desc, 1) != 0) # remove zero descriptors from keypoints
+ keypoints_1 = keypoints_1[idx]
+
+ keypoints_2 = sift.detect_image(img2) # detect features
+ idx = np.where(np.sum(keypoints_2.desc, 1) != 0) # remove zero descriptors from keypoints
+ keypoints_2 = keypoints_2[idx]
+
+ # feature matching
+ s_time = time.time()
+ matches = sift.match_images(keypoints_1, keypoints_2) # matching between 2 keypoints
+ print("Feature Matching took took {} sec".format(time.time() - s_time))
+
+ # transform into cv2 keypoints
+ cv_kp1 = [cv2.KeyPoint(x=keypoints_1.x[i], y=keypoints_1.y[i], _size=20) for i in range(len(keypoints_1.x))]
+ cv_kp2 = [cv2.KeyPoint(x=keypoints_2.x[i], y=keypoints_2.y[i], _size=20) for i in range(len(keypoints_2.x))]
+
+ # draw the N first matches
+ N = 50
+ draw_matches(img1, cv_kp1, img2, cv_kp2, matches[:N])
+
+ # draw the features on img 1
+ # img3 = np.array([])
+ # img3 = cv2.drawKeypoints(img2, cv_kp2, img3, color=(0, 0, 255))
+ # plt.imshow(img3), plt.show()