diff --git a/main.tex b/main.tex
index a636888ede7c74e40623bbbf465d65e1faa86b97..9dbddad363240121706701aad2442dd51093882b 100644
--- a/main.tex
+++ b/main.tex
@@ -331,7 +331,7 @@ We solve this problem by decoupling the gradient averaging from the weight avera
 
      
      % To regenerate the figure, from directory results/scaling
-% python ../../../learn-topology/tools/plot_convergence.py 10/mnist/clique-ring/all/ ../mnist/clique-ring/all/ 1000/mnist/clique-ring/all/ --labels '10 nodes bsz=1280' '100 nodes bsz=128' '1000 nodes bsz=13' --legend 'lower right' --yaxis test-accuracy --save-figure ../../figures/d-cliques-mnist-scaling-clique-ring-cst-updates.png --ymin 80 --add-min-max
+% python ../../../learn-topology/tools/plot_convergence.py 10/mnist/clique-ring/all/2021-03-13-18:22:01-CET ../mnist/clique-ring/all/2021-03-10-18:14:35-CET 1000/mnist/clique-ring/all/2021-03-13-18:22:36-CET --labels '10 nodes bsz=1280' '100 nodes bsz=128' '1000 nodes bsz=13' --legend 'lower right' --yaxis test-accuracy --save-figure ../../figures/d-cliques-mnist-scaling-clique-ring-cst-updates.png --ymin 80 --add-min-max
          \begin{subfigure}[b]{0.7\textwidth}
          \centering
          \includegraphics[width=\textwidth]{figures/d-cliques-mnist-scaling-clique-ring-cst-updates}
diff --git a/results/cifar10/random-10-diverse-unbiased-gradient-uniform-init/experiments.sh b/results/cifar10/random-10-diverse-unbiased-gradient-uniform-init/experiments.sh
new file mode 100755
index 0000000000000000000000000000000000000000..5f0ba427898615df2ad16c6ccf8e47b3734fe180
--- /dev/null
+++ b/results/cifar10/random-10-diverse-unbiased-gradient-uniform-init/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    20
+    '
+LRS='
+    0.002
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 1 1 1 1 1 1 1 1 1 1 --dist-optimization d-psgd --topology greedy-diverse-10 --metric dissimilarity --learning-momentum 0.9 --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --single-process --nb-logging-processes 10 --dataset cifar10 --model gn-lenet --accuracy-logging-interval 10 --validation-set-ratio 0.5 --unbiased-gradient --initial-averaging
+    done;
+done;
+
diff --git a/results/cifar10/random-10-diverse-unbiased-gradient/experiments.sh b/results/cifar10/random-10-diverse-unbiased-gradient/experiments.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c8480cbef17721621d526c47d291b5b0b8a39601
--- /dev/null
+++ b/results/cifar10/random-10-diverse-unbiased-gradient/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    20
+    '
+LRS='
+    0.002
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 1 1 1 1 1 1 1 1 1 1 --dist-optimization d-psgd --topology greedy-diverse-10 --metric dissimilarity --learning-momentum 0.9 --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --single-process --nb-logging-processes 10 --dataset cifar10 --model gn-lenet --accuracy-logging-interval 10 --validation-set-ratio 0.5 --unbiased-gradient
+    done;
+done;
+
diff --git a/results/cifar10/random-10-diverse/experiments.sh b/results/cifar10/random-10-diverse/experiments.sh
new file mode 100755
index 0000000000000000000000000000000000000000..deff5d63c3a3c896b991fb4b4da41c1828dedcb5
--- /dev/null
+++ b/results/cifar10/random-10-diverse/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    20
+    '
+LRS='
+    0.002
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 1 1 1 1 1 1 1 1 1 1 --dist-optimization d-psgd --topology greedy-diverse-10 --metric dissimilarity --learning-momentum 0.9 --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --single-process --nb-logging-processes 10 --dataset cifar10 --model gn-lenet --accuracy-logging-interval 10 --validation-set-ratio 0.5
+    done;
+done;
+
diff --git a/results/cifar10/random-10/experiments.sh b/results/cifar10/random-10/experiments.sh
new file mode 100755
index 0000000000000000000000000000000000000000..dc2eeb99061ff42500ffbf66909c4c701e229c51
--- /dev/null
+++ b/results/cifar10/random-10/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    20
+    '
+LRS='
+    0.002
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 1 1 1 1 1 1 1 1 1 1 --dist-optimization d-psgd --topology random-10 --metric dissimilarity --learning-momentum 0.9 --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --single-process --nb-logging-processes 10 --dataset cifar10 --model gn-lenet --accuracy-logging-interval 10 --validation-set-ratio 0.5
+    done;
+done;
+
diff --git a/results/mnist/random-10-diverse-unbiased-grad-uniform-init/experiments.sh b/results/mnist/random-10-diverse-unbiased-grad-uniform-init/experiments.sh
new file mode 100755
index 0000000000000000000000000000000000000000..238be57998387663b2f15f5959b5c505c633085f
--- /dev/null
+++ b/results/mnist/random-10-diverse-unbiased-grad-uniform-init/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    128
+    '
+LRS='
+    0.1
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 --dist-optimization d-psgd --topology greedy-diverse-10 --metric dissimilarity --learning-momentum 0. --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --parallel-training --nb-workers 10 --dataset mnist --model linear --unbiased-gradient --initial-averaging
+    done;
+done;
+
diff --git a/results/mnist/random-10-diverse-unbiased-grad/experiments.sh b/results/mnist/random-10-diverse-unbiased-grad/experiments.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b461259aa3698395ba1e41710dc859821d47fa8f
--- /dev/null
+++ b/results/mnist/random-10-diverse-unbiased-grad/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    128
+    '
+LRS='
+    0.1
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 --dist-optimization d-psgd --topology greedy-diverse-10 --metric dissimilarity --learning-momentum 0. --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --parallel-training --nb-workers 10 --dataset mnist --model linear --unbiased-gradient
+    done;
+done;
+
diff --git a/results/mnist/random-10-diverse/experiments.sh b/results/mnist/random-10-diverse/experiments.sh
new file mode 100755
index 0000000000000000000000000000000000000000..80e6b7154b383e898dde07f10526b0ad12b28ff5
--- /dev/null
+++ b/results/mnist/random-10-diverse/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    128
+    '
+LRS='
+    0.1
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 --dist-optimization d-psgd --topology greedy-diverse-10 --metric dissimilarity --learning-momentum 0. --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --parallel-training --nb-workers 10 --dataset mnist --model linear
+    done;
+done;
+
diff --git a/results/mnist/random-10/experiments.sh b/results/mnist/random-10/experiments.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ba5dc2ab6e3f8d6a0a258f95b5134224313e785d
--- /dev/null
+++ b/results/mnist/random-10/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    128
+    '
+LRS='
+    0.1
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 --dist-optimization d-psgd --topology random-10 --metric dissimilarity --learning-momentum 0. --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --parallel-training --nb-workers 10 --dataset mnist --model linear
+    done;
+done;
+