diff --git a/distCalc.ipynb b/distCalc.ipynb
index f151d0c97d9d86e583c3836593262e1e16462403..1eed85cf7a6e705bc3df70089d678a80f4fabca1 100644
--- a/distCalc.ipynb
+++ b/distCalc.ipynb
@@ -1,8 +1,26 @@
 {
  "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "ef5cb5f9-4c96-4c02-b8f9-6e233af4437b",
+   "metadata": {},
+   "source": [
+    "# Schema Evolution analysis\n",
+    "\n",
+    "The goal of this notebook is to analyze the results of the statistical methods used on the PDE Dataset columns and verify if it is possible to detect Schema Evolution from them. Three situations will be analyzed: columns that received data (matched columns), columns that didn't receive data (empty columns), and new information added (new columns)."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "655a12e4-babc-4a42-a3d3-a93d84269684",
+   "metadata": {},
+   "source": [
+    "## 1. Define the DistCalc class to help store the analyzed data"
+   ]
+  },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 1,
    "id": "2c81bc78-04e0-4bad-83ef-380cf3be1610",
    "metadata": {
     "tags": []
@@ -15,7 +33,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": 134,
    "id": "af419e44-d6ef-41f7-970c-78c316aeb712",
    "metadata": {
     "tags": []
@@ -40,7 +58,6 @@
     "        self.stat_ks_empty = []\n",
     "        self.stat_t_empty = []\n",
     "        self.stat_cohend_empty = []\n",
-    "        \n",
     "        self.stat_f_top3 = []\n",
     "        self.stat_ks_top3 = []\n",
     "        self.stat_t_top3 = []\n",
@@ -57,7 +74,6 @@
     "        self.stat_ks_empty_top3 = []\n",
     "        self.stat_t_empty_top3 = []\n",
     "        self.stat_cohend_empty_top3 = []\n",
-    "        \n",
     "        self.years = []\n",
     "    \n",
     "    @property\n",
@@ -95,45 +111,51 @@
     "    @property\n",
     "    def get_years(self):\n",
     "        return self.years\n",
-    "    \n",
-    "    def calc(self, df, stat_column, threshold):\n",
+    "\n",
+    "    # Calculates the most likely new column (Top1) to match each existing column in PDE Dataset along the years\n",
+    "    def calc(self, df, stat_column, stat_method, threshold, threshGreater=True):\n",
     "        anos = df.ano_coluna1.unique()\n",
     "        self.years = np.union1d(self.years, anos)\n",
     "\n",
-    "        # Itera sobre todos os anos\n",
+    "        # Iterate over the years\n",
     "        for ano in anos:\n",
-    "            # Constroi dataframe do ano\n",
+    "            # Build the year dataframe\n",
     "            ano_df = df[df.ano_coluna1 == ano]\n",
     "\n",
-    "            # Estruturas\n",
-    "            base_columns = ano_df.coluna1.unique()                        # Colunas que ja existiam na base\n",
-    "            new_columns = ano_df.coluna2.unique()                         # Colunas do próximo ano\n",
-    "            true_new_columns = np.setdiff1d(new_columns, base_columns)    # Colunas que nao existiam na base\n",
-    "            base_empty_columns = np.setdiff1d(base_columns, new_columns)  # Colunas da base que nao receberam dados\n",
-    "            all_columns = np.union1d(base_columns, new_columns)           # Todas as colunas possiveis\n",
-    "            # Alterar para um dicionario\n",
-    "            prev_col = []                                                 # Colunas da base para match\n",
-    "            next_col = []                                                 # Colunas do proximo ano para match\n",
+    "            # Structure\n",
+    "            base_columns = ano_df.coluna1.unique()                        # Existing PDE columns (base columns) \n",
+    "            new_columns = ano_df.coluna2.unique()                         # New columns information\n",
+    "            true_new_columns = np.setdiff1d(new_columns, base_columns)    # Columns that didn't exist in PDE\n",
+    "            base_empty_columns = np.setdiff1d(base_columns, new_columns)  # Columns that didn't receive data\n",
+    "            all_columns = np.union1d(base_columns, new_columns)           # All the columns\n",
+    "            prev_col = []                                                 # Support array for columns match (base columns)\n",
+    "            next_col = []                                                 # Support array for columns match (new columns)\n",
     "\n",
-    "            # Itera sobre o dataframe\n",
+    "            # Iterate over the year dataframe\n",
     "            for index, row in ano_df.iterrows():\n",
-    "                # Ignora colunas ja selecionadas\n",
+    "                # Ignore already selected columns\n",
     "                if row['coluna1'] in prev_col or row['coluna2'] in next_col:\n",
     "                    continue\n",
-    "                # Testa treshold\n",
-    "                if row[stat_column] > threshold:\n",
-    "                    break\n",
+    "                # Test threshold\n",
+    "                if(threshGreater):\n",
+    "                    if row[stat_column] < threshold:\n",
+    "                        break\n",
+    "                else:\n",
+    "                    if row[stat_column] > threshold:\n",
+    "                        break\n",
     "\n",
-    "                # Adiciona nas listas\n",
+    "                # Append the matched columns in the support arrays\n",
     "                prev_col.append(row['coluna1'])\n",
     "                next_col.append(row['coluna2'])\n",
     "\n",
-    "            all_match_columns = np.union1d(prev_col, next_col)\n",
-    "            not_match_columns = np.setdiff1d(all_columns, all_match_columns)\n",
-    "            found_new_columns = np.setdiff1d(new_columns, next_col)             # Colunas novas encontradas pelo algoritmo\n",
-    "            no_data_columns = np.setdiff1d(base_columns, prev_col)              # Colunas que não receram dados encontradas pelo algoritmo\n",
+    "            # Define other important structure subsets\n",
+    "            all_match_columns = np.union1d(prev_col, next_col)                  # All the matched columns the algorithm found\n",
+    "            not_match_columns = np.setdiff1d(all_columns, all_match_columns)    # All the matched columns the algorithm didn't find\n",
+    "            found_new_columns = np.setdiff1d(new_columns, next_col)             # All the new columns the algorithm found\n",
+    "            no_data_columns = np.setdiff1d(base_columns, prev_col)              # All the empty columns the algorithm didn't find\n",
     "\n",
-    "            # ========== CALCULA ACURACIAS ========== \n",
+    "            # ========== ACCURACY ========== \n",
+    "            # Match columns accuracy\n",
     "            acertos_p = 0\n",
     "            acertos = 0\n",
     "            for i in range(len(prev_col)):\n",
@@ -141,7 +163,8 @@
     "                    acertos_p += 1\n",
     "            acuracia_matches = acertos_p / len(prev_col)\n",
     "            acertos += acertos_p\n",
-    "            \n",
+    "\n",
+    "            # New columns accuracy\n",
     "            acertos_p = 0\n",
     "            unionNewColumns = np.union1d(found_new_columns, true_new_columns)\n",
     "            for col in unionNewColumns:\n",
@@ -152,7 +175,8 @@
     "            else:\n",
     "                acuracia_new_columns = 1.0\n",
     "            acertos += acertos_p \n",
-    "                \n",
+    "\n",
+    "            # Empty columns accuracy\n",
     "            acertos_p = 0\n",
     "            unionEmptyColumns = np.union1d(no_data_columns, base_empty_columns)\n",
     "            for col in unionEmptyColumns:\n",
@@ -163,77 +187,81 @@
     "            else:\n",
     "                acuracia_empty_columns = 1.0\n",
     "            acertos += acertos_p\n",
-    "                \n",
-    "            soma_acuracia = acuracia_matches * len(prev_col) + acuracia_new_columns * len(unionNewColumns) + acuracia_empty_columns * len(unionEmptyColumns)\n",
-    "            # acuracia_total = soma_acuracia / (len(prev_col) + len(unionNewColumns) + len(unionEmptyColumns))\n",
+    "\n",
+    "            # Total accuracy\n",
     "            acuracia_total = acertos / len(all_columns)\n",
     "        \n",
-    "            # ========== ADICIONA ACURACIAS ==========\n",
-    "            if(stat_column == 'estatistica_f'):\n",
+    "            # ========== ADDING RESULTS ON CLASS VARIABLES ==========\n",
+    "            if(stat_method == 'estatistica_f'):\n",
     "                self.stat_f.append([ano, acuracia_total])\n",
     "                self.stat_f_matches.append([ano, acuracia_matches])\n",
     "                self.stat_f_new.append([ano, acuracia_new_columns])\n",
     "                self.stat_f_empty.append([ano, acuracia_empty_columns])\n",
-    "            elif(stat_column == 'estatistica_t'):\n",
+    "            elif(stat_method == 'estatistica_t'):\n",
     "                self.stat_t.append([ano, acuracia_total])\n",
     "                self.stat_t_matches.append([ano, acuracia_matches])\n",
     "                self.stat_t_new.append([ano, acuracia_new_columns])\n",
     "                self.stat_t_empty.append([ano, acuracia_empty_columns])\n",
-    "            elif(stat_column == 'estatistica_ks'):\n",
+    "            elif(stat_method == 'estatistica_ks'):\n",
     "                self.stat_ks.append([ano, acuracia_total])\n",
     "                self.stat_ks_matches.append([ano, acuracia_matches])\n",
     "                self.stat_ks_new.append([ano, acuracia_new_columns])\n",
     "                self.stat_ks_empty.append([ano, acuracia_empty_columns])\n",
-    "            elif(stat_column == 'estatistica_cohend'):\n",
+    "            elif(stat_method == 'estatistica_cohend'):\n",
     "                self.stat_cohend.append([ano, acuracia_total])\n",
     "                self.stat_cohend_matches.append([ano, acuracia_matches])\n",
     "                self.stat_cohend_new.append([ano, acuracia_new_columns])\n",
     "                self.stat_cohend_empty.append([ano, acuracia_empty_columns])\n",
     "\n",
-    "        \n",
-    "    def calcTop3(self, df, stat_column, threshold):\n",
+    "\n",
+    "    # Calculates the three most likely new columns (Top3) to match each existing column in PDE Dataset along the years\n",
+    "    def calcTop3(self, df, stat_column, stat_method, threshold, threshGreater=True):\n",
     "        anos = df.ano_coluna1.unique()\n",
     "        \n",
-    "        # Itera sobre todos os anos\n",
+    "        # Iterate over the years\n",
     "        for ano in anos:\n",
-    "            # Constroi dataframe do ano\n",
+    "            # Build the year dataframe\n",
     "            ano_df = df[df.ano_coluna1 == ano]\n",
     "\n",
-    "            # Estruturas\n",
-    "            base_columns = ano_df.coluna1.unique()                            # Colunas que ja existiam na base\n",
-    "            new_columns = ano_df.coluna2.unique()                             # Colunas do próximo ano\n",
-    "            intersection_columns = np.intersect1d(base_columns, new_columns)  # Colunas que possuem match\n",
-    "            true_new_columns = np.setdiff1d(new_columns, base_columns)        # Colunas que nao existiam na base\n",
-    "            true_empty_columns = np.setdiff1d(base_columns, new_columns)      # Colunas da base que nao receberam dados\n",
-    "            all_columns = np.union1d(base_columns, new_columns)               # Todas as colunas possiveis\n",
-    "            resultados = []                                                   # Resultados dos matches\n",
-    "            prev_col = []                                                     # Colunas da base que tiveram match\n",
-    "            next_col = []                                                     # Colunas do proximo ano que tiveram match\n",
+    "            # Structure\n",
+    "            base_columns = ano_df.coluna1.unique()                            # Existing PDE columns (base columns) \n",
+    "            new_columns = ano_df.coluna2.unique()                             # New columns information\n",
+    "            intersection_columns = np.intersect1d(base_columns, new_columns)  # Columns that received data\n",
+    "            true_new_columns = np.setdiff1d(new_columns, base_columns)        # Columns that didn't exist in PDE\n",
+    "            true_empty_columns = np.setdiff1d(base_columns, new_columns)      # Columns that didn't receive data\n",
+    "            all_columns = np.union1d(base_columns, new_columns)               # All the columns\n",
+    "            resultados = []                                                   # Matches results\n",
+    "            prev_col = []                                                     # Support array for columns match (base columns)\n",
+    "            next_col = []                                                     # Support array for columns match (new columns)\n",
     "\n",
-    "            # Encontra as top3 novas colunas que mais se encaixam com as colunas base\n",
+    "            # Find the Top3 columns for each base column\n",
     "            for col in base_columns:\n",
-    "                top3 = ano_df[(ano_df.coluna1 == col) & (ano_df[stat_column] < threshold)].iloc[:3,:]\n",
+    "                if(threshGreater):\n",
+    "                    top3 = ano_df[(ano_df.coluna1 == col) & (ano_df[stat_column] > threshold)].iloc[:3,:]\n",
+    "                else:\n",
+    "                    top3 = ano_df[(ano_df.coluna1 == col) & (ano_df[stat_column] < threshold)].iloc[:3,:]\n",
     "                resultados.append(top3.values)\n",
     "\n",
-    "            # Preenche prev_col e next_col\n",
+    "            # Fill the support arrays\n",
     "            for res in resultados:\n",
+    "                if(len(res) == 0):\n",
+    "                    continue\n",
     "                for i in res:\n",
     "                    prev_col = np.union1d(prev_col, i[0])\n",
     "                    next_col = np.union1d(next_col, i[2])\n",
+    "                    \n",
+    "            # Define other important structure subsets\n",
+    "            all_match_columns = np.union1d(prev_col, next_col)                  # All the matched columns the algorithm found\n",
+    "            not_match_columns = np.setdiff1d(all_columns, all_match_columns)    # All the matched columns the algorithm didn't find\n",
+    "            found_new_columns = np.setdiff1d(new_columns, next_col)             # All the new columns the algorithm found\n",
+    "            no_data_columns = np.setdiff1d(base_columns, prev_col)              # All the empty columns the algorithm didn't find\n",
     "\n",
-    "            # Determina alguns c\n",
-    "            all_match_columns = np.union1d(next_col, prev_col)                  # Colunas que tiveram algum match\n",
-    "            not_match_columns = np.setdiff1d(all_columns, all_match_columns)    # Colunas que não tiveram nenhum match\n",
-    "            found_new_columns = np.setdiff1d(new_columns, next_col)             # Colunas novas encontradas pelo algoritmo\n",
-    "            no_data_columns = np.setdiff1d(base_columns, prev_col)              # Colunas que não receram dados encontradas pelo algoritmo\n",
-    "\n",
-    "            # Calcula acurácia\n",
+    "            # ========== ACCURACY ========== \n",
     "            acuracia_matches = 0\n",
     "            acuracia_novas_colunas = 0\n",
     "            acuracia_colunas_vazias = 0\n",
     "            \n",
-    "            # ========== CALCULA ACURACIA TOTAL ==========\n",
-    "            # Acurácia matches\n",
+    "            # Total accuracy\n",
     "            acertos = 0\n",
     "            for res in resultados:\n",
     "                if(len(res) == 0):\n",
@@ -243,21 +271,18 @@
     "                        acertos += 1\n",
     "                        break\n",
     "                        \n",
-    "            # Acurácia novas colunas\n",
     "            for new in found_new_columns:\n",
     "                if new in true_new_columns:\n",
     "                    acertos += 1\n",
     "\n",
-    "            # Acurácia colunas vazias\n",
     "            for no_data in no_data_columns:\n",
     "                if no_data in true_empty_columns:\n",
     "                    acertos += 1\n",
     "\n",
-    "            # Acurácia total\n",
     "            acuracia_total = acertos / len(all_columns)\n",
     "            \n",
     "            \n",
-    "            # ========== CALCULA ACURACIA PARCIAL ==========\n",
+    "            # New columns accuracy\n",
     "            acertos_p = 0\n",
     "            unionNewColumns = np.union1d(found_new_columns, true_new_columns)\n",
     "            if len(unionNewColumns) > 0:\n",
@@ -268,6 +293,7 @@
     "            else:\n",
     "                acuracia_new_columns = 1.0\n",
     "\n",
+    "            # Empty columns accuracy\n",
     "            acertos_p = 0\n",
     "            unionEmptyColumns = np.union1d(no_data_columns, true_empty_columns)\n",
     "            if len(unionEmptyColumns) > 0:\n",
@@ -277,7 +303,8 @@
     "                acuracia_empty_columns = acertos_p / len(unionEmptyColumns)            \n",
     "            else:\n",
     "                acuracia_empty_columns = 1.0\n",
-    "            \n",
+    "\n",
+    "            # Match columns accuracy\n",
     "            acertos_p = 0\n",
     "            results_len = 0\n",
     "            for res in resultados:\n",
@@ -289,42 +316,24 @@
     "                        acertos_p += 1\n",
     "                        break\n",
     "                        \n",
-    "            acuracia_matches = acertos_p / len(prev_col)\n",
-    "            # soma_acuracia = acuracia_matches * results_len + acuracia_new_columns * len(unionNewColumns) + acuracia_empty_columns * len(unionEmptyColumns)\n",
-    "            # acuracia_total = soma_acuracia / (results_len + len(unionNewColumns) + len(unionEmptyColumns))\n",
-    "            \n",
-    "            # print(ano)\n",
-    "            # print(f'{acuracia_matches} matches')\n",
-    "            # print(f'{acuracia_new_columns} new')\n",
-    "            # print(f'{acuracia_empty_columns} empty')\n",
-    "            # print(f'{acuracia_total} total')\n",
-    "            \n",
-    "            # =========================\n",
-    "            \n",
-    "            \n",
-    "            \n",
-    "            \n",
-    "            \n",
-    "            \n",
-    "            \n",
-    "                        \n",
-    "            # Adiciona acuracia\n",
-    "            if(stat_column == 'estatistica_f'):\n",
+    "            acuracia_matches = acertos_p / results_len      \n",
+    "            # ========== ADDING RESULTS ON CLASS VARIABLES ==========\n",
+    "            if(stat_method == 'estatistica_f'):\n",
     "                self.stat_f_top3.append([ano, acuracia_total])\n",
     "                self.stat_f_matches_top3.append([ano, acuracia_matches])\n",
     "                self.stat_f_new_top3.append([ano, acuracia_new_columns])\n",
     "                self.stat_f_empty_top3.append([ano, acuracia_empty_columns])\n",
-    "            elif(stat_column == 'estatistica_t'):\n",
+    "            elif(stat_method == 'estatistica_t'):\n",
     "                self.stat_t_top3.append([ano, acuracia_total])\n",
     "                self.stat_t_matches_top3.append([ano, acuracia_matches])\n",
     "                self.stat_t_new_top3.append([ano, acuracia_new_columns])\n",
     "                self.stat_t_empty_top3.append([ano, acuracia_empty_columns])\n",
-    "            elif(stat_column == 'estatistica_ks'):\n",
+    "            elif(stat_method == 'estatistica_ks'):\n",
     "                self.stat_ks_top3.append([ano, acuracia_total])\n",
     "                self.stat_ks_matches_top3.append([ano, acuracia_matches])\n",
     "                self.stat_ks_new_top3.append([ano, acuracia_new_columns])\n",
     "                self.stat_ks_empty_top3.append([ano, acuracia_empty_columns])\n",
-    "            elif(stat_column == 'estatistica_cohend'):\n",
+    "            elif(stat_method == 'estatistica_cohend'):\n",
     "                self.stat_cohend_top3.append([ano, acuracia_total])\n",
     "                self.stat_cohend_matches_top3.append([ano, acuracia_matches])\n",
     "                self.stat_cohend_new_top3.append([ano, acuracia_new_columns])\n",
@@ -336,12 +345,12 @@
    "id": "9eaff904-7ee7-45a0-9768-0f21989c65bd",
    "metadata": {},
    "source": [
-    "## Import the results for each statistical method"
+    "## 2. Import the R results for each statistical method and prepare the data"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 135,
    "id": "26287a6f-5537-4509-a09d-52dd59b3a76d",
    "metadata": {
     "tags": []
@@ -350,16 +359,16 @@
    "source": [
     "# Import F results\n",
     "df_f = pd.read_csv('Testes_hist/Result_F/F_subsequente.csv', sep=',')\n",
-    "stat_column = 'estatistica_f'\n",
-    "df_f[stat_column] = (df_f[stat_column] - 1).abs()\n",
-    "df_f = df_f.sort_values(by=['ano_coluna1', stat_column])\n",
+    "stat_column = 'p_valor'\n",
+    "df_f[stat_column] = df_f[stat_column].abs()\n",
+    "df_f = df_f.sort_values(by=['ano_coluna1', stat_column], ascending=[True, False])\n",
     "df_f = df_f[~df_f['coluna1'].str.contains('ANO_CENSO') & ~df_f['coluna2'].str.contains('ANO_CENSO')]\n",
     "\n",
     "# Import T results\n",
     "df_t = pd.read_csv('Testes_hist/Result_T/T_subsequente.csv', sep=',')\n",
-    "stat_column = 'estatistica_t'\n",
+    "stat_column = 'p_valor'\n",
     "df_t[stat_column] = df_t[stat_column].abs()\n",
-    "df_t = df_t.sort_values(by=['ano_coluna1', stat_column])\n",
+    "df_t = df_t.sort_values(by=['ano_coluna1', stat_column], ascending=[True, False])\n",
     "df_t = df_t[~df_t['coluna1'].str.contains('ANO_CENSO') & ~df_t['coluna2'].str.contains('ANO_CENSO')]\n",
     "\n",
     "# Import COHEND results\n",
@@ -371,9 +380,9 @@
     "\n",
     "# Import KS results\n",
     "df_ks = pd.read_csv('Testes_hist/Result_KS/KS_subsequente.csv', sep=',')\n",
-    "stat_column = 'estatistica_ks'\n",
+    "stat_column = 'p_valor'\n",
     "df_ks[stat_column] = (df_ks[stat_column]).abs()\n",
-    "df_ks = df_ks.sort_values(by=['ano_coluna1', stat_column])\n",
+    "df_ks = df_ks.sort_values(by=['ano_coluna1', stat_column], ascending=[True, False])\n",
     "df_ks = df_ks[~df_ks['coluna1'].str.contains('ANO_CENSO') & ~df_ks['coluna2'].str.contains('ANO_CENSO')]"
    ]
   },
@@ -382,12 +391,12 @@
    "id": "e25f4f2d-3fb9-4cfc-8a92-c2e8b887262c",
    "metadata": {},
    "source": [
-    "## Calcule the columns matches"
+    "## 3. Calculates the matches"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": 136,
    "id": "f9541a11-c1bf-4318-847a-100917e13204",
    "metadata": {
     "tags": []
@@ -395,15 +404,15 @@
    "outputs": [],
    "source": [
     "dist = DistCalc()\n",
-    "dist.calc(df_f, 'estatistica_f', 0.7)\n",
-    "dist.calc(df_t, 'estatistica_t', 40)\n",
-    "dist.calc(df_c, 'estatistica_cohend', 0.15)\n",
-    "dist.calc(df_ks, 'estatistica_ks', 0.10)\n",
+    "dist.calc(df_f, 'p_valor', 'estatistica_f', 0.05)\n",
+    "dist.calc(df_t, 'p_valor', 'estatistica_t', 0.05)\n",
+    "dist.calc(df_c, 'estatistica_cohend', 'estatistica_cohend', 0.15, threshGreater=False)\n",
+    "dist.calc(df_ks, 'p_valor', 'estatistica_ks', 0.05)\n",
     "\n",
-    "dist.calcTop3(df_f, 'estatistica_f', 0.7)\n",
-    "dist.calcTop3(df_t, 'estatistica_t', 40)\n",
-    "dist.calcTop3(df_c, 'estatistica_cohend', 0.15)\n",
-    "dist.calcTop3(df_ks, 'estatistica_ks', 0.10)"
+    "dist.calcTop3(df_f, 'p_valor', 'estatistica_f', 0.05)\n",
+    "dist.calcTop3(df_t, 'p_valor', 'estatistica_t', 0.05)\n",
+    "dist.calcTop3(df_c, 'estatistica_cohend', 'estatistica_cohend', 0.15, threshGreater=False)\n",
+    "dist.calcTop3(df_ks, 'p_valor', 'estatistica_ks', 0.05)"
    ]
   },
   {
@@ -411,12 +420,12 @@
    "id": "47bcb19b-6aba-4d4a-9de0-4633bfa0eb20",
    "metadata": {},
    "source": [
-    "## Create the result dataframes"
+    "## 4. Create the result dataframes and store all the data on it"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": 137,
    "id": "527ff27d-f321-4749-a94d-dd7d824ef682",
    "metadata": {
     "tags": []
@@ -488,9 +497,17 @@
     "resultTop3_t = resultTop3_t.round(3)"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "5c9338be-a29e-4fd4-9aa2-905eadf59cf7",
+   "metadata": {},
+   "source": [
+    "## 5. Export the results into CSVs"
+   ]
+  },
   {
    "cell_type": "code",
-   "execution_count": 7,
+   "execution_count": 138,
    "id": "4cb4afc8-6149-40a7-8f77-af06183d4d23",
    "metadata": {
     "tags": []
@@ -527,7 +544,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.12.3"
+   "version": "3.11.9"
   }
  },
  "nbformat": 4,