From 56f3ad4941cd8a217958df4ba666923c5ca2c16b Mon Sep 17 00:00:00 2001
From: Jeffrey Wigger <jeffrey.wigger@epfl.ch>
Date: Sat, 11 Jun 2022 16:43:56 +0200
Subject: [PATCH] only local

---
 eval/run_xtimes_reddit_rws.sh                 |  2 +-
 .../config_reddit_sharing_dpsgdrwasync0.ini   | 36 +++++++++++++++++++
 2 files changed, 37 insertions(+), 1 deletion(-)
 create mode 100644 eval/step_configs/config_reddit_sharing_dpsgdrwasync0.ini

diff --git a/eval/run_xtimes_reddit_rws.sh b/eval/run_xtimes_reddit_rws.sh
index dc67069..3726501 100755
--- a/eval/run_xtimes_reddit_rws.sh
+++ b/eval/run_xtimes_reddit_rws.sh
@@ -54,7 +54,7 @@ export PYTHONFAULTHANDLER=1
 # Base configs for which the gird search is done
 # tests=("step_configs/config_reddit_sharing_topKdynamicGraph.ini")
 # tests=("step_configs/config_reddit_sharing_topKsharingasyncrw.ini" "step_configs/config_reddit_sharing_topKdpsgdrwasync.ini" "step_configs/config_reddit_sharing_topKdpsgdrw.ini")
-tests=("step_configs/config_reddit_sharing_dpsgdrwasync4.ini")
+tests=("step_configs/config_reddit_sharing_dpsgdrwasync0.ini")
 # tests=("step_configs/config_reddit_sharing_dpsgdrw.ini" "step_configs/config_reddit_sharing_dpsgdrwasync.ini" "step_configs/config_reddit_sharing_sharingasyncrw.ini" "step_configs/config_reddit_sharing_sharingrw.ini")
 # Learning rates
 lr="1"
diff --git a/eval/step_configs/config_reddit_sharing_dpsgdrwasync0.ini b/eval/step_configs/config_reddit_sharing_dpsgdrwasync0.ini
new file mode 100644
index 0000000..94fbc94
--- /dev/null
+++ b/eval/step_configs/config_reddit_sharing_dpsgdrwasync0.ini
@@ -0,0 +1,36 @@
+[DATASET]
+dataset_package = decentralizepy.datasets.Reddit
+dataset_class = Reddit
+random_seed = 97
+model_class = RNN
+train_dir = /mnt/nfs/shared/leaf/data/reddit_new/per_user_data/train
+test_dir = /mnt/nfs/shared/leaf/data/reddit_new/new_small_data/test
+; python list of fractions below
+sizes =
+
+[OPTIMIZER_PARAMS]
+optimizer_package = torch.optim
+optimizer_class = SGD
+lr = 0.001
+
+[TRAIN_PARAMS]
+training_package = decentralizepy.training.Training
+training_class = Training
+rounds = 47
+full_epochs = False
+batch_size = 16
+shuffle = True
+loss_package = torch.nn
+loss_class = CrossEntropyLoss
+
+[COMMUNICATION]
+comm_package = decentralizepy.communication.TCPRandomWalk
+comm_class = TCPRandomWalk
+addresses_filepath = ip_addr_6Machines.json
+sampler = equi_check_history
+
+[SHARING]
+sharing_package = decentralizepy.sharing.DPSGDRWAsync
+sharing_class = DPSGDRWAsync
+rw_chance=0
+comm_interval=0.01
-- 
GitLab