Skip to content
Snippets Groups Projects
Commit 2ee342af authored by Jeffrey Wigger's avatar Jeffrey Wigger
Browse files

removing the n_proc config option and the code that replaces it

parent 64dc0f63
No related branches found
No related tags found
No related merge requests found
Showing
with 35 additions and 41 deletions
......@@ -2,7 +2,6 @@
dataset_package = decentralizepy.datasets.Celeba
dataset_class = Celeba
model_class = CNN
n_procs = 96
images_dir = /home/risharma/leaf/data/celeba/data/raw/img_align_celeba
train_dir = /home/risharma/leaf/data/celeba/per_user_data/train
test_dir = /home/risharma/leaf/data/celeba/data/test
......
......@@ -2,7 +2,6 @@
dataset_package = decentralizepy.datasets.Celeba
dataset_class = Celeba
model_class = CNN
n_procs = 96
images_dir = /home/risharma/leaf/data/celeba/data/raw/img_align_celeba
train_dir = /home/risharma/leaf/data/celeba/per_user_data/train
test_dir = /home/risharma/leaf/data/celeba/data/test
......
......@@ -2,7 +2,6 @@
dataset_package = decentralizepy.datasets.Celeba
dataset_class = Celeba
model_class = CNN
n_procs = 96
images_dir = /home/risharma/leaf/data/celeba/data/raw/img_align_celeba
train_dir = /home/risharma/leaf/data/celeba/per_user_data/train
test_dir = /home/risharma/leaf/data/celeba/data/test
......
......@@ -2,7 +2,6 @@
dataset_package = decentralizepy.datasets.Femnist
dataset_class = Femnist
model_class = CNN
n_procs = 16
train_dir = /home/risharma/leaf/data/femnist/per_user_data/train
test_dir = /home/risharma/leaf/data/femnist/data/test
; python list of fractions below
......
......@@ -2,7 +2,6 @@
dataset_package = decentralizepy.datasets.Femnist
dataset_class = Femnist
model_class = CNN
n_procs = 16
train_dir = /home/risharma/leaf/data/femnist/per_user_data/train
test_dir = /home/risharma/leaf/data/femnist/data/test
; python list of fractions below
......
......@@ -2,7 +2,6 @@
dataset_package = decentralizepy.datasets.Femnist
dataset_class = Femnist
model_class = CNN
n_procs = 16
train_dir = /home/risharma/leaf/data/femnist/per_user_data/train
test_dir = /home/risharma/leaf/data/femnist/data/test
; python list of fractions below
......
......@@ -2,7 +2,6 @@
dataset_package = decentralizepy.datasets.Celeba
dataset_class = Celeba
model_class = CNN
n_procs = 96
images_dir = /home/risharma/leaf/data/celeba/data/raw/img_align_celeba
train_dir = /home/risharma/leaf/data/celeba/per_user_data/train
test_dir = /home/risharma/leaf/data/celeba/data/test
......
......@@ -2,7 +2,6 @@
dataset_package = decentralizepy.datasets.Celeba
dataset_class = Celeba
model_class = CNN
n_procs = 96
images_dir = /home/risharma/leaf/data/celeba/data/raw/img_align_celeba
train_dir = /home/risharma/leaf/data/celeba/per_user_data/train
test_dir = /home/risharma/leaf/data/celeba/data/test
......
......@@ -2,7 +2,6 @@
dataset_package = decentralizepy.datasets.Celeba
dataset_class = Celeba
model_class = CNN
n_procs = 96
images_dir = /home/risharma/leaf/data/celeba/data/raw/img_align_celeba
train_dir = /home/risharma/leaf/data/celeba/per_user_data/train
test_dir = /home/risharma/leaf/data/celeba/data/test
......
......@@ -2,7 +2,6 @@
dataset_package = decentralizepy.datasets.Femnist
dataset_class = Femnist
model_class = CNN
n_procs = 16
train_dir = /home/risharma/leaf/data/femnist/per_user_data/train
test_dir = /home/risharma/leaf/data/femnist/data/test
; python list of fractions below
......
......@@ -2,7 +2,6 @@
dataset_package = decentralizepy.datasets.Femnist
dataset_class = Femnist
model_class = CNN
n_procs = 16
train_dir = /home/risharma/leaf/data/femnist/per_user_data/train
test_dir = /home/risharma/leaf/data/femnist/data/test
; python list of fractions below
......
......@@ -2,7 +2,6 @@
dataset_package = decentralizepy.datasets.Femnist
dataset_class = Femnist
model_class = CNN
n_procs = 16
train_dir = /home/risharma/leaf/data/femnist/per_user_data/train
test_dir = /home/risharma/leaf/data/femnist/data/test
; python list of fractions below
......
......@@ -15,6 +15,7 @@ import decentralizepy.utils as utils
from decentralizepy.datasets.Data import Data
from decentralizepy.datasets.Dataset import Dataset
from decentralizepy.datasets.Partitioner import DataPartitioner
from decentralizepy.mappings.Mapping import Mapping
from decentralizepy.models.Model import Model
IMAGE_DIM = 84
......@@ -185,10 +186,9 @@ class Celeba(Dataset):
def __init__(
self,
rank,
machine_id,
mapping,
n_procs="",
rank: int,
machine_id: int,
mapping: Mapping,
train_dir="",
test_dir="",
images_dir="",
......@@ -205,12 +205,11 @@ class Celeba(Dataset):
machine_id : int
Machine ID
mapping : decentralizepy.mappings.Mapping
Mapping to conver rank, machine_id -> uid for data partitioning
n_procs : int, optional
The number of processes among which to divide the data. Default value is assigned 1
Mapping to convert rank, machine_id -> uid for data partitioning
It also provides the total number of global processes
train_dir : str, optional
Path to the training data files. Required to instantiate the training set
The training set is partitioned according to n_procs and sizes
The training set is partitioned according to the number of global processes and sizes
test_dir : str. optional
Path to the testing data files Required to instantiate the testing set
sizes : list(int), optional
......@@ -224,7 +223,6 @@ class Celeba(Dataset):
rank,
machine_id,
mapping,
n_procs,
train_dir,
test_dir,
sizes,
......
from decentralizepy import utils
from decentralizepy.mappings.Mapping import Mapping
class Dataset:
"""
......@@ -10,10 +10,9 @@ class Dataset:
def __init__(
self,
rank,
machine_id,
mapping,
n_procs="",
rank: int,
machine_id: int,
mapping: Mapping,
train_dir="",
test_dir="",
sizes="",
......@@ -29,12 +28,11 @@ class Dataset:
machine_id : int
Machine ID
mapping : decentralizepy.mappings.Mapping
Mapping to conver rank, machine_id -> uid for data partitioning
n_procs : int, optional
The number of processes among which to divide the data. Default value is assigned 1
Mapping to convert rank, machine_id -> uid for data partitioning
It also provides the total number of global processes
train_dir : str, optional
Path to the training data files. Required to instantiate the training set
The training set is partitioned according to n_procs and sizes
The training set is partitioned according to the number of global processes and sizes
test_dir : str. optional
Path to the testing data files Required to instantiate the testing set
sizes : list(int), optional
......@@ -47,7 +45,8 @@ class Dataset:
self.rank = rank
self.machine_id = machine_id
self.mapping = mapping
self.n_procs = utils.conditional_value(n_procs, "", 1)
# the number of global processes, needed to split-up the dataset
self.n_procs = mapping.get_n_procs()
self.train_dir = utils.conditional_value(train_dir, "", None)
self.test_dir = utils.conditional_value(test_dir, "", None)
self.sizes = utils.conditional_value(sizes, "", None)
......
......@@ -13,6 +13,7 @@ from torch.utils.data import DataLoader
from decentralizepy.datasets.Data import Data
from decentralizepy.datasets.Dataset import Dataset
from decentralizepy.datasets.Partitioner import DataPartitioner
from decentralizepy.mappings.Mapping import Mapping
from decentralizepy.models.Model import Model
NUM_CLASSES = 62
......@@ -179,9 +180,9 @@ class Femnist(Dataset):
def __init__(
self,
rank,
machine_id,
mapping,
rank: int,
machine_id: int,
mapping: Mapping,
n_procs="",
train_dir="",
test_dir="",
......@@ -198,12 +199,11 @@ class Femnist(Dataset):
machine_id : int
Machine ID
mapping : decentralizepy.mappings.Mapping
Mapping to conver rank, machine_id -> uid for data partitioning
n_procs : int, optional
The number of processes among which to divide the data. Default value is assigned 1
Mapping to convert rank, machine_id -> uid for data partitioning
It also provides the total number of global processes
train_dir : str, optional
Path to the training data files. Required to instantiate the training set
The training set is partitioned according to n_procs and sizes
The training set is partitioned according to the number of global processes and sizes
test_dir : str. optional
Path to the testing data files Required to instantiate the testing set
sizes : list(int), optional
......@@ -217,7 +217,6 @@ class Femnist(Dataset):
rank,
machine_id,
mapping,
n_procs,
train_dir,
test_dir,
sizes,
......
......@@ -18,6 +18,18 @@ class Mapping:
"""
self.n_procs = n_procs
def get_n_procs(self):
"""
Gives the global sum of all processes that are spawned on the machines
Returns
-------
int
the number of global processes
"""
return self.n_procs
def get_uid(self, rank: int, machine_id: int):
"""
Gives the global unique identifier of the node
......
......@@ -394,8 +394,6 @@ class Node:
Rank of process local to the machine
machine_id : int
Machine ID on which the process in running
n_procs_local : int
Number of processes on current machine
mapping : decentralizepy.mappings
The object containing the mapping rank <--> uid
graph : decentralizepy.graphs
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment