diff --git a/code/eval/queries/cypher/memgraph/memgraph_queries.py b/code/eval/queries/cypher/memgraph/memgraph_queries.py
index 0ebc5629792cf0a4a219dced66f23405f13f2528..51868ec5e3f058f98e84249b2ba374c88e06e649 100644
--- a/code/eval/queries/cypher/memgraph/memgraph_queries.py
+++ b/code/eval/queries/cypher/memgraph/memgraph_queries.py
@@ -63,51 +63,52 @@ def load_queries_from_file(file_path):
 #         logging.info(f"DBMS answered in: {execution_time} ms\n")
 
 def execute_and_log_query(query_key, query):
-    start_time = time.time()
-    with driver.session() as session:
-        result = session.run(query)
-        # end_time = time.time()
-        row_count = 0
-        if query_key == '2-hop':
-                for record in result:   
-                    nodes = record.get('nodes')
-                    if nodes is not None:  # Check if 'nodes' is not None
-                        row_count += len(nodes)
+    try:
+        start_time = time.time()
+        with driver.session() as session:
+            result = session.run(query)
+            # end_time = time.time()
+            row_count = 0
+            if query_key == '2-hop':
+                    for record in result:   
+                        nodes = record.get('nodes')
+                        if nodes is not None:  # Check if 'nodes' is not None
+                            row_count += len(nodes)
+            else:
+                row_count= len(result.data())
+            summary = result.consume()
+            end_time = time.time()
+
+        # Check if the attributes are None and set them to 0 if they are
+        result_available_after = summary.result_available_after if summary.result_available_after is not None else 0
+        result_consumed_after = summary.result_consumed_after if summary.result_consumed_after is not None else 0
+        row_count = row_count if row_count is not None else 0 
+        execution_time = result_available_after
+        total_time2 = result_available_after + result_consumed_after
+        total_time = end_time - start_time
+        logging.info(f"Results for Query {query_key}:")
+        logging.info(f"Python executed in: {total_time}s")
+        
+        if row_count == 0:
+            logging.info(f"Number of rows not available")
         else:
-            row_count= len(result.data())
-        summary = result.consume()
-        end_time = time.time()
-
-    # Check if the attributes are None and set them to 0 if they are
-    result_available_after = summary.result_available_after if summary.result_available_after is not None else 0
-    result_consumed_after = summary.result_consumed_after if summary.result_consumed_after is not None else 0
-    row_count = row_count if row_count is not None else 0 
-    execution_time = result_available_after
-    total_time2 = result_available_after + result_consumed_after
-    total_time = end_time - start_time
-    logging.info(f"Results for Query {query_key}:")
-    logging.info(f"Python executed in: {total_time}s")
-    
-    if row_count == 0:
-        logging.info(f"Number of rows not available")
-    else:
-        logging.info(f"Number of rows {row_count}")
-    if execution_time == 0:
-        logging.warning(f"No internal DBMS metric available")
-    else:
-        logging.info(f"DBMS answered in: {execution_time} ms\n")
-        logging.info(f"App python answered in: {total_time} ms\n")
-        logging.info(f"App db answered in: {total_time2} ms\n")
-
-
-
-    # Open the CSV file in append mode and write the log information
-    writer = csv.writer(csv_log_file)
-    # Write a header if the file is newly created or empty, else append the data
-    csv_log_file.seek(0, 2)  # Move the cursor to the end of the file
-    if csv_log_file.tell() == 0:  # If file is empty, write a header
-        writer.writerow(['Query Key', 'Start Time', 'End Time','Fetched nodes', 'Execution Time (ms)', 'Total Time (s)'])
-    writer.writerow([query_key, start_time, end_time, row_count, execution_time, total_time])
+            logging.info(f"Number of rows {row_count}")
+        if execution_time == 0:
+            logging.warning(f"No internal DBMS metric available")
+        else:
+            logging.info(f"DBMS answered in: {execution_time} ms\n")
+            logging.info(f"App python answered in: {total_time} ms\n")
+            logging.info(f"App db answered in: {total_time2} ms\n")
+        # Open the CSV file in append mode and write the log information
+        writer = csv.writer(csv_log_file)
+        # Write a header if the file is newly created or empty, else append the data
+        csv_log_file.seek(0, 2)  # Move the cursor to the end of the file
+        if csv_log_file.tell() == 0:  # If file is empty, write a header
+            writer.writerow(['Query Key', 'Start Time', 'End Time','Fetched nodes', 'Execution Time (ms)', 'Total Time (s)'])
+        writer.writerow([query_key, start_time, end_time, row_count, execution_time, total_time])
+    except Exception as e:
+        logging.error(f"Failed to execute query {query_key}: {e}")
+        # Continue with the next query even if the current one fails
 
 # Function to schedule and execute all queries
 def schedule_and_execute_queries():
diff --git a/code/eval/queries/cypher/neo4j/neo4j_queries.py b/code/eval/queries/cypher/neo4j/neo4j_queries.py
index 046936d0f7d5d43e4959bc9a71c3bd918ad2aa8a..7b8e3796b4f372cdde2744626d12d2ded5fe9c0d 100644
--- a/code/eval/queries/cypher/neo4j/neo4j_queries.py
+++ b/code/eval/queries/cypher/neo4j/neo4j_queries.py
@@ -64,44 +64,48 @@ def load_queries_from_file(file_path):
 #         logging.info(f"DBMS answered in: {execution_time} ms\n")
 
 def execute_and_log_query(query_key, query):
-    start_time = time.time()
-    with driver.session() as session:
-        result = session.run(query)
-        # end_time = time.time()
-        row_count= len(result.data())
-        summary = result.consume()
-        end_time = time.time()
-
-    # Check if the attributes are None and set them to 0 if they are
-    result_available_after = summary.result_available_after if summary.result_available_after is not None else 0
-    result_consumed_after = summary.result_consumed_after if summary.result_consumed_after is not None else 0
-    row_count = row_count if row_count is not None else 0 
-    execution_time = result_available_after
-    total_time2 = result_available_after + result_consumed_after
-    total_time = end_time - start_time
-    logging.info(f"Results for Query {query_key}:")
-    logging.info(f"Python executed in: {total_time}s")
-    
-    if row_count == 0:
-        logging.info(f"Number of rows not available")
-    else:
-        logging.info(f"Number of rows {row_count}")
-    if execution_time == 0:
-        logging.warning(f"No internal DBMS metric available")
-    else:
-        logging.info(f"DBMS answered in: {execution_time} ms\n")
-        logging.info(f"App python answered in: {total_time} ms\n")
-        logging.info(f"App db answered in: {total_time2} ms\n")
-
-
-
-    # Open the CSV file in append mode and write the log information
-    writer = csv.writer(csv_log_file)
-    # Write a header if the file is newly created or empty, else append the data
-    csv_log_file.seek(0, 2)  # Move the cursor to the end of the file
-    if csv_log_file.tell() == 0:  # If file is empty, write a header
-        writer.writerow(['Query Key', 'Start Time', 'End Time','Fetched nodes', 'Execution Time (ms)', 'Total Time (s)'])
-    writer.writerow([query_key, start_time, end_time, row_count, execution_time, total_time])
+    try:
+        start_time = time.time()
+        with driver.session() as session:
+            result = session.run(query)
+            # end_time = time.time()
+            row_count= len(result.data())
+            summary = result.consume()
+            end_time = time.time()
+
+        # Check if the attributes are None and set them to 0 if they are
+        result_available_after = summary.result_available_after if summary.result_available_after is not None else 0
+        result_consumed_after = summary.result_consumed_after if summary.result_consumed_after is not None else 0
+        row_count = row_count if row_count is not None else 0 
+        execution_time = result_available_after
+        total_time2 = result_available_after + result_consumed_after
+        total_time = end_time - start_time
+        logging.info(f"Results for Query {query_key}:")
+        logging.info(f"Python executed in: {total_time}s")
+        
+        if row_count == 0:
+            logging.info(f"Number of rows not available")
+        else:
+            logging.info(f"Number of rows {row_count}")
+        if execution_time == 0:
+            logging.warning(f"No internal DBMS metric available")
+        else:
+            logging.info(f"DBMS answered in: {execution_time} ms\n")
+            logging.info(f"App python answered in: {total_time} ms\n")
+            logging.info(f"App db answered in: {total_time2} ms\n")
+
+
+
+        # Open the CSV file in append mode and write the log information
+        writer = csv.writer(csv_log_file)
+        # Write a header if the file is newly created or empty, else append the data
+        csv_log_file.seek(0, 2)  # Move the cursor to the end of the file
+        if csv_log_file.tell() == 0:  # If file is empty, write a header
+            writer.writerow(['Query Key', 'Start Time', 'End Time','Fetched nodes', 'Execution Time (ms)', 'Total Time (s)'])
+        writer.writerow([query_key, start_time, end_time, row_count, execution_time, total_time])
+    except Exception as e:
+        logging.error(f"Failed to execute query {query_key}: {e}")
+        # Continue with the next query even if the current one fails
 
 # Function to schedule and execute all queries
 def schedule_and_execute_queries():
diff --git a/code/eval/queries/cypher/ongdb/cypher_queries.py b/code/eval/queries/cypher/ongdb/cypher_queries.py
index 59fe81184faa1058aa5f5933d123d8c71ba4be72..e03df8bfe582c152f3dcb003cf4a865017fc310e 100644
--- a/code/eval/queries/cypher/ongdb/cypher_queries.py
+++ b/code/eval/queries/cypher/ongdb/cypher_queries.py
@@ -64,43 +64,47 @@ def load_queries_from_file(file_path):
 #         logging.info(f"DBMS answered in: {execution_time} ms\n")
 
 def execute_and_log_query(query_key, query):
-    start_time = time.time()
-    with driver.session() as session:
-        result = session.run(query)
-        row_count= len(result.data())
-        summary = result.consume()
-        end_time = time.time()
-
-
-    # Check if the attributes are None and set them to 0 if they are
-    result_available_after = summary.result_available_after if summary.result_available_after is not None else 0
-    result_consumed_after = summary.result_consumed_after if summary.result_consumed_after is not None else 0
-    row_count = row_count if row_count is not None else 0 
-    execution_time = result_available_after
-    total_time2 = result_available_after + result_consumed_after
-    total_time = end_time - start_time
-
-    logging.info(f"Results for Query {query_key}:")
-    logging.info(f"Python executed in: {end_time - start_time}s")
-    
-    if row_count == 0:
-        logging.info(f"Number of rows not available")
-    else:
-        logging.info(f"Number of rows {row_count}")
-    if execution_time == 0:
-        logging.warning(f"No internal DBMS metric available")
-    else:
-        logging.info(f"DBMS answered in: {execution_time} ms\n")
-        logging.info(f"App python answered in: {total_time} ms\n")
-        logging.info(f"App db answered in: {total_time2} ms\n")
-
-    # Open the CSV file in append mode and write the log information
-    writer = csv.writer(csv_log_file)
-    # Write a header if the file is newly created or empty, else append the data
-    csv_log_file.seek(0, 2)  # Move the cursor to the end of the file
-    if csv_log_file.tell() == 0:  # If file is empty, write a header
-        writer.writerow(['Query Key', 'Start Time', 'End Time','Fetched nodes', 'Execution Time (ms)', 'Total Time (s)'])
-    writer.writerow([query_key, start_time, end_time, row_count, execution_time, total_time])
+    try:
+        start_time = time.time()
+        with driver.session() as session:
+            result = session.run(query)
+            row_count= len(result.data())
+            summary = result.consume()
+            end_time = time.time()
+
+
+        # Check if the attributes are None and set them to 0 if they are
+        result_available_after = summary.result_available_after if summary.result_available_after is not None else 0
+        result_consumed_after = summary.result_consumed_after if summary.result_consumed_after is not None else 0
+        row_count = row_count if row_count is not None else 0 
+        execution_time = result_available_after
+        total_time2 = result_available_after + result_consumed_after
+        total_time = end_time - start_time
+
+        logging.info(f"Results for Query {query_key}:")
+        logging.info(f"Python executed in: {end_time - start_time}s")
+        
+        if row_count == 0:
+            logging.info(f"Number of rows not available")
+        else:
+            logging.info(f"Number of rows {row_count}")
+        if execution_time == 0:
+            logging.warning(f"No internal DBMS metric available")
+        else:
+            logging.info(f"DBMS answered in: {execution_time} ms\n")
+            logging.info(f"App python answered in: {total_time} ms\n")
+            logging.info(f"App db answered in: {total_time2} ms\n")
+
+        # Open the CSV file in append mode and write the log information
+        writer = csv.writer(csv_log_file)
+        # Write a header if the file is newly created or empty, else append the data
+        csv_log_file.seek(0, 2)  # Move the cursor to the end of the file
+        if csv_log_file.tell() == 0:  # If file is empty, write a header
+            writer.writerow(['Query Key', 'Start Time', 'End Time','Fetched nodes', 'Execution Time (ms)', 'Total Time (s)'])
+        writer.writerow([query_key, start_time, end_time, row_count, execution_time, total_time])
+    except Exception as e:
+        logging.error(f"Failed to execute query {query_key}: {e}")
+        # Continue with the next query even if the current one fails
 
 # Function to schedule and execute all queries
 def schedule_and_execute_queries():