diff --git a/PGPE_equality_test.ipynb b/PGPE_equality_test.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..c8c18b0dd3825a7e70ac2ba3c6b62315d0e87b5d
--- /dev/null
+++ b/PGPE_equality_test.ipynb
@@ -0,0 +1,519 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "id": "aboriginal-clarity",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from matplotlib import cm\n",
+    "import math\n",
+    "import numpy as np\n",
+    "from benchmarks.functions import rastrigin\n",
+    "from black_box_optimizers.pgpe_spielplatz import SyS_PGPE_neweps, PGPE, RandomSearch, Learner, SyS_PGPE_MS_neweps\n",
+    "from black_box_optimizers.hillclimber import HillClimber, SmartHillClimber\n",
+    "from optimization.optimizer import OptimizeableParameterDict as OP\n",
+    "from scipy import stats\n",
+    "import pickle as cp\n",
+    "import datetime\n",
+    "from scipy.stats import levene"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "00b3ddd1-0389-4682-a9b6-a63e2d6f50e4",
+   "metadata": {},
+   "source": [
+    "# Fragen\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "a3fc4808-a040-44d6-8a33-cd058ae3a684",
+   "metadata": {},
+   "source": [
+    "# PGPE Parameter initialization"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "id": "88d5bfcb-0a75-4486-a535-6f53c3bbc891",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# shared parameters\n",
+    "\n",
+    "# Path to witch the results are saved\n",
+    "res_path = \"./benchmarks/bench_results/\"\n",
+    "# Path to witch the result-plots are saved\n",
+    "fig_path = \"./benchmarks/bench_plots/\"\n",
+    "# Experiment name (becomes part of the save file names)\n",
+    "exp_name = \"_fast\"\n",
+    "\n",
+    "# List of benchmark problems that will be conducted \n",
+    "problem_list = [\"rastrigin\"]\n",
+    "\n",
+    "# problem dimension to probe\n",
+    "dim_list = [2, 10, 100]\n",
+    "\n",
+    "# Number of evaluations per algorithm per problem allowed to be used\n",
+    "opt_iter = [3000, 6000, 16000] \n",
+    "\n",
+    "trainings = 100\n",
+    "\n",
+    "# standard pgpe parameters\n",
+    "mue_alpha_s = 0.1 \n",
+    "sigma_alpha_s = 0.05\n",
+    "\n",
+    "# norm pgpe parameters\n",
+    "mue_alpha_n = mue_alpha_s \n",
+    "sigma_alpha_n = sigma_alpha_s \n",
+    "\n",
+    "# new pgpge parameters\n",
+    "mue_alpha_ms = mue_alpha_s\n",
+    "sigma_alpha_ms = sigma_alpha_s\n",
+    "\n",
+    "# combined version parameters\n",
+    "mue_alpha_c = 0.1\n",
+    "sigma_alpha_c = 0.05\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "b2361909-20f2-4863-80b6-7d24c936bc8f",
+   "metadata": {},
+   "source": [
+    "## Functions"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "id": "boring-stage",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Creating the Optimizable Parameter Dict\n",
+    "def get_para_dict(dimension):\n",
+    "    para_dict = OP()\n",
+    "    for i in range(dimension):\n",
+    "        para_dict.add_parameter(\n",
+    "                key=f\"var_{i}\",\n",
+    "                min=-10.0,\n",
+    "                max=10.0,\n",
+    "                type=\"f\",       # f=float, i=int, b=1-float\n",
+    "                scale=\"linear\", # linear, exp=exponential, ixp=inverse exponential\n",
+    "                init=8.0,       # if not provided uses center of search intervall\n",
+    "                id=i,           # if not provided uses internal counter\n",
+    "            )\n",
+    "    return para_dict"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "id": "imperial-stadium",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def get_algo(algo_kind, para_dict, mue_alpha, sigma_alpha):\n",
+    "    seed = np.array([para_dict.get_init_parameters_array()])\n",
+    "    para_dict = para_dict.parameter_dict\n",
+    "\n",
+    "    if algo_kind == \"sys_pgpe\" or algo_kind == \"sys_pgpe_norm\":\n",
+    "        algorithm = SyS_PGPE_neweps(2,\n",
+    "                                   paras=para_dict,\n",
+    "                                   plot_paras = True,\n",
+    "                                   seed=seed,\n",
+    "                                   mue_alpha=mue_alpha,\n",
+    "                                   sigma_alpha=sigma_alpha\n",
+    "                                   )\n",
+    "    elif algo_kind == \"sys_pgpe_ms\" or algo_kind == 'sys_pgpe_ms_norm':\n",
+    "        algorithm = SyS_PGPE_MS_neweps(2,\n",
+    "                                   paras=para_dict,\n",
+    "                                   plot_paras = True,\n",
+    "                                   seed=seed, \n",
+    "                                   mue_alpha = mue_alpha,\n",
+    "                                   sigma_alpha = sigma_alpha\n",
+    "                                  )\n",
+    "    \n",
+    "    return algorithm"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "id": "fluid-netherlands",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def evaluate_problem(pop, problem):\n",
+    "    if problem == 'rastrigin':\n",
+    "        return - rastrigin(pop).reshape(-1,)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "id": "7a3519fa-8725-4e32-8f75-8dc31593095c",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def create_sample(optimizer, dim, trainings, opt_iter, mue_alpha, sigma_alpha, problem='rastrigin'): \n",
+    "    #print(\"dim: \", dim)\n",
+    "    samples = []\n",
+    "    for n in range(trainings):\n",
+    "        sample = [] # samples of specific dimension\n",
+    "        para_dict = get_para_dict(dim) #initialize the parameters for the current problem dimension\n",
+    "        # get the algorithm object\n",
+    "        algorithm = get_algo(algo_kind=optimizer,      \n",
+    "                             para_dict=para_dict,\n",
+    "                             mue_alpha = mue_alpha,\n",
+    "                             sigma_alpha = sigma_alpha\n",
+    "                            )\n",
+    "        #print(\"algorithm: \", algorithm)\n",
+    "        for i in range(opt_iter):\n",
+    "            #äprint(f\"training: {n}   iteration: {i}\")\n",
+    "            #print(algorithm.baseline)\n",
+    "            population = algorithm.ask() # gibt mir das Sample\n",
+    "            #print(\"sample: \", population)\n",
+    "            rewards = evaluate_problem(pop=population, \n",
+    "                                       problem=problem\n",
+    "                                      )\n",
+    "            #print(\"rewards: \", rewards)\n",
+    "            if \"norm\" in optimizer:\n",
+    "                # tell with reward normalization \n",
+    "                algorithm.tell_norm(pop=population, \n",
+    "                                    rewards=rewards, \n",
+    "                                    cfg=None,\n",
+    "                                    logger=None\n",
+    "                                   )\n",
+    "                baseline_norm = algorithm.baseline_act\n",
+    "                baseline = baseline_norm*(algorithm.max_tell-algorithm.min_tell)+algorithm.min_tell\n",
+    "                sample.append(baseline)\n",
+    "            else:    \n",
+    "                algorithm.tell(pop=population, \n",
+    "                               rewards=rewards, \n",
+    "                               cfg=None,\n",
+    "                               logger=None\n",
+    "                              )\n",
+    "                sample.append(algorithm.baseline_act)\n",
+    "        if \"norm\" in optimizer:   \n",
+    "            x = algorithm.baseline_act*(algorithm.max_tell-algorithm.min_tell)+algorithm.min_tell\n",
+    "        else: \n",
+    "            x = algorithm.baseline_act\n",
+    "        samples.append(x)\n",
+    "        \"\"\"\n",
+    "        n = len(samples)\n",
+    "        if n>10:\n",
+    "            std = np.std(samples)\n",
+    "            con = (2.576*std)/np.sqrt(n)\n",
+    "            print(\"con: \", con)\n",
+    "            if con <= 0.01*(sum(samples)/n):\n",
+    "                break\n",
+    "        \"\"\"\n",
+    "    return np.array(samples)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "id": "70469bb7-caeb-40fc-8a77-c4ea5c75ff89",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# T-Test oder Welch-Test for equality\n",
+    "def test_equality(sample1, sample2):\n",
+    "    \n",
+    "    # Levene Test for variance equality\n",
+    "    stat, p = levene(sample1, sample2, center = 'mean')\n",
+    "    if p>0.05:\n",
+    "        result = stats.ttest_ind(sample1, \n",
+    "                      sample2,  \n",
+    "                      equal_var=True, # t-Test\n",
+    "                      nan_policy='raise'\n",
+    "                     )\n",
+    "    elif p<=0.05:\n",
+    "        result = stats.ttest_ind(sample1, \n",
+    "                      sample2,  \n",
+    "                      equal_var=False, # Welch Test\n",
+    "                      nan_policy='raise'\n",
+    "                     )\n",
+    "    else:\n",
+    "        print(\"bei levene stimmt was nicht\")\n",
+    "        print(\"levene p: \", p)\n",
+    "\n",
+    "    return result"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "id": "102e7342-7f46-436a-8797-8487d1236261",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def save_sample(sample, name):\n",
+    "    # save samples\n",
+    "    # Generate a timestamp\n",
+    "    timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n",
+    "\n",
+    "    # Define the filename with the timestamp\n",
+    "    filename = f'samples/{name}_samples_{timestamp}.npy'\n",
+    "\n",
+    "    # Save the array to a file with the timestamped filename\n",
+    "    np.save(filename, sample)\n",
+    "    return filename"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "d55505f2-9564-4851-90a2-850ae09af7bd",
+   "metadata": {},
+   "source": [
+    "## Create samples"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "id": "62f08f9f-a7d8-42de-93ef-3624d35d9382",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "all_samples = []"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "id": "0cb5a501-4c69-44f6-b1a5-78411317f60f",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/student3/.conda/envs/ml2025/repositories/optimization/black_box_optimizers/pgpe_spielplatz.py:554: RuntimeWarning: invalid value encountered in double_scalars\n",
+      "  mue_gradient = r_tilde / (self.best - self.rewards.mean() + eps)\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "dim:  2\n"
+     ]
+    },
+    {
+     "ename": "ValueError",
+     "evalue": "scale < 0",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
+      "Cell \u001b[0;32mIn[10], line 4\u001b[0m\n\u001b[1;32m      2\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m dim \u001b[38;5;129;01min\u001b[39;00m dim_list:\n\u001b[1;32m      3\u001b[0m     i \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m----> 4\u001b[0m     sys_pgpe_sample \u001b[38;5;241m=\u001b[39m \u001b[43mcreate_sample\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43msys_pgpe\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdim\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrainings\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mopt_iter\u001b[49m\u001b[43m[\u001b[49m\u001b[43mi\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmue_alpha_s\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msigma_alpha_s\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m      5\u001b[0m     \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdim: \u001b[39m\u001b[38;5;124m\"\u001b[39m, dim)\n\u001b[1;32m      6\u001b[0m     \u001b[38;5;66;03m#print(\"sys pgpe: \", sys_pgpe_sample)\u001b[39;00m\n",
+      "Cell \u001b[0;32mIn[6], line 17\u001b[0m, in \u001b[0;36mcreate_sample\u001b[0;34m(optimizer, dim, trainings, opt_iter, mue_alpha, sigma_alpha, problem)\u001b[0m\n\u001b[1;32m     13\u001b[0m \u001b[38;5;66;03m#print(\"algorithm: \", algorithm)\u001b[39;00m\n\u001b[1;32m     14\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(opt_iter):\n\u001b[1;32m     15\u001b[0m     \u001b[38;5;66;03m#äprint(f\"training: {n}   iteration: {i}\")\u001b[39;00m\n\u001b[1;32m     16\u001b[0m     \u001b[38;5;66;03m#print(algorithm.baseline)\u001b[39;00m\n\u001b[0;32m---> 17\u001b[0m     population \u001b[38;5;241m=\u001b[39m \u001b[43malgorithm\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mask\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# gibt mir das Sample\u001b[39;00m\n\u001b[1;32m     18\u001b[0m     \u001b[38;5;66;03m#print(\"sample: \", population)\u001b[39;00m\n\u001b[1;32m     19\u001b[0m     rewards \u001b[38;5;241m=\u001b[39m evaluate_problem(pop\u001b[38;5;241m=\u001b[39mpopulation, \n\u001b[1;32m     20\u001b[0m                                problem\u001b[38;5;241m=\u001b[39mproblem\n\u001b[1;32m     21\u001b[0m                               )\n",
+      "File \u001b[0;32m~/.conda/envs/ml2025/repositories/optimization/black_box_optimizers/pgpe_spielplatz.py:265\u001b[0m, in \u001b[0;36mLearner.ask\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    257\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mask\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m    258\u001b[0m     \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m    259\u001b[0m \u001b[38;5;124;03m    Ask for a set of individuals to be evaluated.\u001b[39;00m\n\u001b[1;32m    260\u001b[0m \n\u001b[1;32m    261\u001b[0m \u001b[38;5;124;03m    Returns:\u001b[39;00m\n\u001b[1;32m    262\u001b[0m \u001b[38;5;124;03m    - numpy.ndarray: List of individuals, ready to be evaluated.\u001b[39;00m\n\u001b[1;32m    263\u001b[0m \u001b[38;5;124;03m    \"\"\"\u001b[39;00m\n\u001b[0;32m--> 265\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpop \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_sample\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    266\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpop \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mclip(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpop, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msearch_min, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msearch_max)\n\u001b[1;32m    267\u001b[0m     \u001b[38;5;66;03m# print(\"pop: \", self.pop)\u001b[39;00m\n",
+      "File \u001b[0;32m~/.conda/envs/ml2025/repositories/optimization/black_box_optimizers/pgpe_spielplatz.py:543\u001b[0m, in \u001b[0;36mSyS_PGPE_neweps._sample\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    541\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_sample\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m    542\u001b[0m     \u001b[38;5;66;03m# generates a difference vector with the given standard deviations\u001b[39;00m\n\u001b[0;32m--> 543\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdeltas \u001b[38;5;241m=\u001b[39m \u001b[43mnp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrandom\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mnormal\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m0.0\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msigma\u001b[49m\u001b[43m)\u001b[49m  \u001b[38;5;66;03m# drawing the perturbation from a normal distribution with corresponding sigmas\u001b[39;00m\n\u001b[1;32m    544\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpop \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mzeros((\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpop_size, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmue\u001b[38;5;241m.\u001b[39msize), \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mfloat32\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m    545\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpop[\u001b[38;5;241m0\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmue \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdeltas  \u001b[38;5;66;03m# Adding perturbation to cur. hypo. for first sample\u001b[39;00m\n",
+      "File \u001b[0;32mmtrand.pyx:1510\u001b[0m, in \u001b[0;36mnumpy.random.mtrand.RandomState.normal\u001b[0;34m()\u001b[0m\n",
+      "File \u001b[0;32m_common.pyx:600\u001b[0m, in \u001b[0;36mnumpy.random._common.cont\u001b[0;34m()\u001b[0m\n",
+      "File \u001b[0;32m_common.pyx:505\u001b[0m, in \u001b[0;36mnumpy.random._common.cont_broadcast_2\u001b[0;34m()\u001b[0m\n",
+      "File \u001b[0;32m_common.pyx:384\u001b[0m, in \u001b[0;36mnumpy.random._common.check_array_constraint\u001b[0;34m()\u001b[0m\n",
+      "\u001b[0;31mValueError\u001b[0m: scale < 0"
+     ]
+    }
+   ],
+   "source": [
+    "sys_samples = []\n",
+    "for dim in dim_list:\n",
+    "    i = 1\n",
+    "    sys_pgpe_sample = create_sample('sys_pgpe', dim, trainings, opt_iter[i], mue_alpha_s, sigma_alpha_s)\n",
+    "    print(\"dim: \", dim)\n",
+    "    #print(\"sys pgpe: \", sys_pgpe_sample)\n",
+    "    sys_samples.append(sys_pgpe_sample)\n",
+    "    i += 1\n",
+    "sys_filename = save_sample(sys_samples, \"sys_pgpe_1000_trainings_3000_iterations_2_10_100_dim\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3233ea5d-2268-46b3-8b98-4f41ea91c336",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "sys_norm_samples = []\n",
+    "for dim in dim_list:\n",
+    "    i = 1\n",
+    "    sys_pgpe_norm_sample = create_sample('sys_pgpe_norm', dim, trainings, opt_iter[i], mue_alpha_n, sigma_alpha_n)\n",
+    "    print(\"dim: \", dim)\n",
+    "    #print(\"pgpe normalized rewards: \", sys_pgpe_norm_sample)\n",
+    "    sys_norm_samples.append(sys_pgpe_norm_sample)\n",
+    "    i += 1\n",
+    "sys_norm_filename = save_sample(sys_norm_samples, \"sys_pgpe_norm_1000_trainings_3000_iterations_2_10_100_dim\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6771ea52-fd6e-4718-ae54-a8861f54cd2a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "sys_ms_samples = []\n",
+    "for dim in dim_list:\n",
+    "    i = 1\n",
+    "    sys_pgpe_ms_sample = create_sample('sys_pgpe_ms', dim, trainings, opt_iter[i], mue_alpha_ms, sigma_alpha_ms)\n",
+    "    print(\"dim: \", dim)\n",
+    "    #print(\"pgpe new update rule: \", sys_pgpe_ms_sample)\n",
+    "    sys_ms_samples.append(sys_pgpe_ms_sample)\n",
+    "    i += 1\n",
+    "sys_ms_filename = save_sample(sys_ms_samples, \"sys_ms_pgpe_1000_trainings_3000_iterations_2_10_100_dim\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f39f1029-a2a5-4eb5-b958-84c358f92cd8",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "sys_ms_norm_samples = []\n",
+    "for dim in dim_list:\n",
+    "    i = 1\n",
+    "    sys_pgpe_ms_norm_sample = create_sample('sys_pgpe_ms_norm', dim, trainings, opt_iter[i], mue_alpha_ms, sigma_alpha_ms)\n",
+    "    print(\"dim: \", dim)\n",
+    "    #print(\"pgpe new update rule: \", sys_pgpe_ms_sample)\n",
+    "    sys_ms_norm_samples.append(sys_pgpe_ms_norm_sample)\n",
+    "    i += 1\n",
+    "sys_ms_norm_filename = save_sample(sys_ms_norm_samples, \"sys_ms_norm_pgpe_1000_trainings_3000_iterations_2_10_100_dim\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "fc068c17-4187-4e22-ba2c-9f1334ac2e5b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Load the saved array\n",
+    "sys_samples = np.load(sys_filename, allow_pickle=True)\n",
+    "sys_norm_samples = np.load(sys_norm_filename, allow_pickle=True)\n",
+    "sys_ms_samples = np.load(sys_ms_filename, allow_pickle=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "5b08c41b-5bc1-4685-ab11-3bcbdf3ea108",
+   "metadata": {},
+   "source": [
+    "## Test samples for equality"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a9ae75e4-eef1-44c1-85c4-969ecd0dc388",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# test equality to norm for 2 dimensions\n",
+    "sys_norm_result_2d = test_equality(sys_samples[0], sys_norm_samples[0])\n",
+    "print(\"sys norm result 2d: \", sys_norm_result_2d)\n",
+    "# test equality to norm for 10 dimensions\n",
+    "sys_norm_result_10d = test_equality(sys_samples[1], sys_norm_samples[1])\n",
+    "print(\"sys norm result 10d: \", sys_norm_result_10d)\n",
+    "# test equality to norm for 100 dimensions\n",
+    "sys_norm_result_100d = test_equality(sys_samples[2], sys_norm_samples[2])\n",
+    "print(\"sys norm result 100d: \", sys_norm_result_100d)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0c49cffe-4bf1-4222-8ac9-24849128b54f",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# test equality to ms for 2 dimensions\n",
+    "sys_ms_result_2d = test_equality(sys_samples[0], sys_ms_samples[0])\n",
+    "print(\"sys ms result 2d: \", sys_ms_result_2d)\n",
+    "# test equality to ms for 10 dimensions\n",
+    "sys_ms_result_10d = test_equality(sys_samples[1], sys_ms_samples[1])\n",
+    "print(\"sys ms result 10d: \", sys_ms_result_10d)\n",
+    "# test equality to ms for 10 dimensions\n",
+    "sys_ms_result_100d = test_equality(sys_samples[2], sys_ms_samples[2])\n",
+    "print(\"sys ms result 100d: \", sys_ms_result_100d)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a0e9c3ca-ac3b-4467-b977-b2a6d53c8125",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "f069f342-1e4b-4b78-9d62-75adf7b6bab3",
+   "metadata": {},
+   "source": [
+    "# T-Tests\n",
+    "## Paratmeter\n",
+    "- 100 Trainings\n",
+    "- 2D a 3000 Iterationen\n",
+    "- 10D a 6000 Iterationen\n",
+    "- 100D a 16.000 Iterationen\n",
+    "- mue_alpha = 0.1 \n",
+    "- sigma_alpha = 0.05\n",
+    "\n",
+    "## Ergebnisse\n",
+    "- result_norm_2d:  Ttest_indResult(statistic=1.000015377088226, pvalue=0.3197410700120563)\n",
+    "- result_norm_10d:  Ttest_indResult(statistic=-0.13399941843316795, pvalue=0.8935391374922611)\n",
+    "- result_norm_100d:  Ttest_indResult(statistic=1.1873941552147524, pvalue=0.23649471653035284)\n",
+    "\n",
+    "- result_new_2d:  Ttest_indResult(statistic=-0.9692005435684301, pvalue=0.3336275551349407)\n",
+    "- result_new_10d:  Ttest_indResult(statistic=0.7816646887048703, pvalue=0.4353453428317401)\n",
+    "- result_new_100d:  Ttest_indResult(statistic=-0.8944711470444833, pvalue=0.3721558659216999)\n",
+    "\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f23b2ed4-0477-4a2e-9e01-772096c1b596",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.15"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}