From 533a52e567ee7084190ef9ea7c48135767dee1cb Mon Sep 17 00:00:00 2001
From: Erick Lavoie <erick.lavoie@epfl.ch>
Date: Wed, 17 Mar 2021 20:25:14 +0100
Subject: [PATCH] Added random experiments

---
 main.tex                                           |  2 +-
 .../experiments.sh                                 | 14 ++++++++++++++
 .../experiments.sh                                 | 14 ++++++++++++++
 results/cifar10/random-10-diverse/experiments.sh   | 14 ++++++++++++++
 results/cifar10/random-10/experiments.sh           | 14 ++++++++++++++
 .../experiments.sh                                 | 14 ++++++++++++++
 .../random-10-diverse-unbiased-grad/experiments.sh | 14 ++++++++++++++
 results/mnist/random-10-diverse/experiments.sh     | 14 ++++++++++++++
 results/mnist/random-10/experiments.sh             | 14 ++++++++++++++
 9 files changed, 113 insertions(+), 1 deletion(-)
 create mode 100755 results/cifar10/random-10-diverse-unbiased-gradient-uniform-init/experiments.sh
 create mode 100755 results/cifar10/random-10-diverse-unbiased-gradient/experiments.sh
 create mode 100755 results/cifar10/random-10-diverse/experiments.sh
 create mode 100755 results/cifar10/random-10/experiments.sh
 create mode 100755 results/mnist/random-10-diverse-unbiased-grad-uniform-init/experiments.sh
 create mode 100755 results/mnist/random-10-diverse-unbiased-grad/experiments.sh
 create mode 100755 results/mnist/random-10-diverse/experiments.sh
 create mode 100755 results/mnist/random-10/experiments.sh

diff --git a/main.tex b/main.tex
index a636888..9dbddad 100644
--- a/main.tex
+++ b/main.tex
@@ -331,7 +331,7 @@ We solve this problem by decoupling the gradient averaging from the weight avera
 
      
      % To regenerate the figure, from directory results/scaling
-% python ../../../learn-topology/tools/plot_convergence.py 10/mnist/clique-ring/all/ ../mnist/clique-ring/all/ 1000/mnist/clique-ring/all/ --labels '10 nodes bsz=1280' '100 nodes bsz=128' '1000 nodes bsz=13' --legend 'lower right' --yaxis test-accuracy --save-figure ../../figures/d-cliques-mnist-scaling-clique-ring-cst-updates.png --ymin 80 --add-min-max
+% python ../../../learn-topology/tools/plot_convergence.py 10/mnist/clique-ring/all/2021-03-13-18:22:01-CET ../mnist/clique-ring/all/2021-03-10-18:14:35-CET 1000/mnist/clique-ring/all/2021-03-13-18:22:36-CET --labels '10 nodes bsz=1280' '100 nodes bsz=128' '1000 nodes bsz=13' --legend 'lower right' --yaxis test-accuracy --save-figure ../../figures/d-cliques-mnist-scaling-clique-ring-cst-updates.png --ymin 80 --add-min-max
          \begin{subfigure}[b]{0.7\textwidth}
          \centering
          \includegraphics[width=\textwidth]{figures/d-cliques-mnist-scaling-clique-ring-cst-updates}
diff --git a/results/cifar10/random-10-diverse-unbiased-gradient-uniform-init/experiments.sh b/results/cifar10/random-10-diverse-unbiased-gradient-uniform-init/experiments.sh
new file mode 100755
index 0000000..5f0ba42
--- /dev/null
+++ b/results/cifar10/random-10-diverse-unbiased-gradient-uniform-init/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    20
+    '
+LRS='
+    0.002
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 1 1 1 1 1 1 1 1 1 1 --dist-optimization d-psgd --topology greedy-diverse-10 --metric dissimilarity --learning-momentum 0.9 --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --single-process --nb-logging-processes 10 --dataset cifar10 --model gn-lenet --accuracy-logging-interval 10 --validation-set-ratio 0.5 --unbiased-gradient --initial-averaging
+    done;
+done;
+
diff --git a/results/cifar10/random-10-diverse-unbiased-gradient/experiments.sh b/results/cifar10/random-10-diverse-unbiased-gradient/experiments.sh
new file mode 100755
index 0000000..c8480cb
--- /dev/null
+++ b/results/cifar10/random-10-diverse-unbiased-gradient/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    20
+    '
+LRS='
+    0.002
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 1 1 1 1 1 1 1 1 1 1 --dist-optimization d-psgd --topology greedy-diverse-10 --metric dissimilarity --learning-momentum 0.9 --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --single-process --nb-logging-processes 10 --dataset cifar10 --model gn-lenet --accuracy-logging-interval 10 --validation-set-ratio 0.5 --unbiased-gradient
+    done;
+done;
+
diff --git a/results/cifar10/random-10-diverse/experiments.sh b/results/cifar10/random-10-diverse/experiments.sh
new file mode 100755
index 0000000..deff5d6
--- /dev/null
+++ b/results/cifar10/random-10-diverse/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    20
+    '
+LRS='
+    0.002
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 1 1 1 1 1 1 1 1 1 1 --dist-optimization d-psgd --topology greedy-diverse-10 --metric dissimilarity --learning-momentum 0.9 --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --single-process --nb-logging-processes 10 --dataset cifar10 --model gn-lenet --accuracy-logging-interval 10 --validation-set-ratio 0.5
+    done;
+done;
+
diff --git a/results/cifar10/random-10/experiments.sh b/results/cifar10/random-10/experiments.sh
new file mode 100755
index 0000000..dc2eeb9
--- /dev/null
+++ b/results/cifar10/random-10/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    20
+    '
+LRS='
+    0.002
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 1 1 1 1 1 1 1 1 1 1 --dist-optimization d-psgd --topology random-10 --metric dissimilarity --learning-momentum 0.9 --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --single-process --nb-logging-processes 10 --dataset cifar10 --model gn-lenet --accuracy-logging-interval 10 --validation-set-ratio 0.5
+    done;
+done;
+
diff --git a/results/mnist/random-10-diverse-unbiased-grad-uniform-init/experiments.sh b/results/mnist/random-10-diverse-unbiased-grad-uniform-init/experiments.sh
new file mode 100755
index 0000000..238be57
--- /dev/null
+++ b/results/mnist/random-10-diverse-unbiased-grad-uniform-init/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    128
+    '
+LRS='
+    0.1
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 --dist-optimization d-psgd --topology greedy-diverse-10 --metric dissimilarity --learning-momentum 0. --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --parallel-training --nb-workers 10 --dataset mnist --model linear --unbiased-gradient --initial-averaging
+    done;
+done;
+
diff --git a/results/mnist/random-10-diverse-unbiased-grad/experiments.sh b/results/mnist/random-10-diverse-unbiased-grad/experiments.sh
new file mode 100755
index 0000000..b461259
--- /dev/null
+++ b/results/mnist/random-10-diverse-unbiased-grad/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    128
+    '
+LRS='
+    0.1
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 --dist-optimization d-psgd --topology greedy-diverse-10 --metric dissimilarity --learning-momentum 0. --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --parallel-training --nb-workers 10 --dataset mnist --model linear --unbiased-gradient
+    done;
+done;
+
diff --git a/results/mnist/random-10-diverse/experiments.sh b/results/mnist/random-10-diverse/experiments.sh
new file mode 100755
index 0000000..80e6b71
--- /dev/null
+++ b/results/mnist/random-10-diverse/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    128
+    '
+LRS='
+    0.1
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 --dist-optimization d-psgd --topology greedy-diverse-10 --metric dissimilarity --learning-momentum 0. --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --parallel-training --nb-workers 10 --dataset mnist --model linear
+    done;
+done;
+
diff --git a/results/mnist/random-10/experiments.sh b/results/mnist/random-10/experiments.sh
new file mode 100755
index 0000000..ba5dc2a
--- /dev/null
+++ b/results/mnist/random-10/experiments.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+TOOLS=../../../../learn-topology/tools; CWD="$(pwd)"; cd $TOOLS
+BSZS='
+    128
+    '
+LRS='
+    0.1
+    '
+for BSZ in $BSZS; 
+    do for LR in $LRS;
+        do python sgp-mnist.py --nb-nodes 100 --nb-epochs 100 --local-classes 1 --seed 1 --nodes-per-class 10 10 10 10 10 10 10 10 10 10 --global-train-ratios 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 0.802568 --dist-optimization d-psgd --topology random-10 --metric dissimilarity --learning-momentum 0. --sync-per-mini-batch 1 --results-directory $CWD/all --learning-rate $LR --batch-size $BSZ "$@" --parallel-training --nb-workers 10 --dataset mnist --model linear
+    done;
+done;
+
-- 
GitLab