Я новичок в распределении pytorch, и любой ваш вклад поможет. У меня есть код, работающий с одним графическим процессором. Я пытаюсь сделать это распределенным. Я получаю сообщение об ошибке подключения сокета. Ниже приведен код (я избегаю той части кода, которая может не быть проблемой). Полагаю, это ошибка сокета.
$> torchrun --nproc_per_node=4 --nnodes=1 train_dist.py
КОД:
import datetime
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import time
import sys
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
from torch.utils.data.dataloader import default_collate
from torch import nn
import torch.nn.functional as F
import torchvision
from torchvision import transforms
import torch.distributed as dist
import utils
from scheduler import WarmupMultiStepLR
from datasets.ntu60_hoi import NTU60Subject
import models.AR_pcd_flow as Models
# Function to initialize the distributed environment
def init_distributed():
# Example using torch.distributed.launch:
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
dist.init_process_group(backend='nccl', rank=rank, world_size=world_size)
# dist.init_process_group(backend='nccl')
local_rank = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
return device, rank, world_size
# training step
def train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader, device, epoch, print_freq): # training code
def evaluate(): #evaluation code
# put it all together... Define data and network models
def main(args):
if args.output_dir:
utils.mkdir(args.output_dir)
print(args)
print("torch version: ", torch.__version__)
print("torchvision version: ", torchvision.__version__)
print("Number of GPUs:", torch.cuda.device_count())
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
print("Creating model")
Model = getattr(Models, args.model)
model = Model(radius=args.radius, nsamples=args.nsamples, spatial_stride=args.spatial_stride,
temporal_kernel_size=args.temporal_kernel_size, temporal_stride=args.temporal_stride,
emb_relu=args.emb_relu,
dim=args.dim, depth=args.depth, heads=args.heads, dim_head=args.dim_head,
mlp_dim=args.mlp_dim, num_classes=60)
if torch.cuda.device_count() > 1:
device, rank, world_size = init_distributed()
model.to(device)
# model = nn.DataParallel(model)
model = nn.parallel.DistributedDataParallel(model, device_ids=[device.index], output_device=device.index) # local_rank inplace of devices
else:
device = torch.device('cuda')
model.to(device)
# Data loading code
print("Loading data")
st = time.time()
dataset_train = NTU60Subject(root = '/scratch/NTU60/', train=True)
dataset_test = NTU60Subject(root = '/scratch/NTU60/', train=False)
# dataset_test = SegDataset(root='/scratch/pgouripe/AS_data_base', train=False)
print("Creating data loaders")
if torch.cuda.device_count() > 1:
sampler_train = DistributedSampler(dataset_train, num_replicas=world_size, rank=rank, shuffle=True)
sampler_test = DistributedSampler(dataset_test, num_replicas=world_size, rank=rank, shuffle=False)
else:
sampler_train = None
sampler_test = None
data_loader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batch_size, sampler=sampler_train, num_workers=args.workers, pin_memory=True)
data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=args.batch_size, sampler=sampler_test, num_workers=args.workers, pin_memory=True)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# convert scheduler to be per iteration, not per epoch, for warmup that lasts
# between different epochs
warmup_iters = args.lr_warmup_epochs * len(data_loader)
lr_milestones = [len(data_loader) * m for m in args.lr_milestones]
lr_scheduler = WarmupMultiStepLR(optimizer, milestones=lr_milestones, gamma=args.lr_gamma, warmup_iters=warmup_iters, warmup_factor=1e-5)
# model_without_ddp = model
print("Start training")
start_time = time.time()
cur_acc = 0
acc = 0
for epoch in range(args.start_epoch, args.epochs):
train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader, device, epoch, args.print_freq)
cur_acc = max(acc, evaluate(model, criterion, data_loader_test, device, len(dataset_test), args.print_freq))
if cur_acc > acc: # > 0.7 and cur_acc > acc:
acc = cur_acc
path = os.path.join(args.output_dir, f"model_{epoch}_ntu60_DTr.pth")
torch.save(model.state_dict(), path)
print("model saved")
with open('NTU60_epoch.txt', 'a') as f:
f.write(str(epoch) + '\n')
Ниже ОШИБКА:
[2025-01-13 16:22:45,345] torch.distributed.run: [WARNING]
[2025-01-13 16:22:45,345] torch.distributed.run: [WARNING] *****************************************
[2025-01-13 16:22:45,345] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
[2025-01-13 16:22:45,345] torch.distributed.run: [WARNING] *****************************************
**[W socket.cpp:464] [c10d] The server socket cannot be initialized on [::]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).**
Traceback (most recent call last):
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 315, in _lazy_init
queued_call()
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 183, in _check_capability
capability = get_device_capability(d)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 439, in get_device_capability
prop = get_device_properties(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 457, in get_device_properties
return _get_device_properties(device) # type: ignore[name-defined]
RuntimeError: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 283, in
main(args)
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 178, in main
device, rank, world_size = init_distributed()
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 34, in init_distributed
torch.cuda.set_device(local_rank)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 408, in set_device
torch._C._cuda_setDevice(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 321, in _lazy_init
raise DeferredCudaCallError(msg) from e
torch.cuda.DeferredCudaCallError: CUDA call failed lazily at initialization with error: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
CUDA call was originally invoked at:
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 9, in
import torch
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/__init__.py", line 1427, in
_C._initExtension(manager_path())
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 247, in
_lazy_call(_check_capability)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 244, in _lazy_call
_queued_calls.append((callable, traceback.format_stack()))
Traceback (most recent call last):
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 315, in _lazy_init
queued_call()
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 183, in _check_capability
capability = get_device_capability(d)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 439, in get_device_capability
prop = get_device_properties(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 457, in get_device_properties
return _get_device_properties(device) # type: ignore[name-defined]
RuntimeError: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 283, in
main(args)
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 178, in main
device, rank, world_size = init_distributed()
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 34, in init_distributed
torch.cuda.set_device(local_rank)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 408, in set_device
torch._C._cuda_setDevice(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 321, in _lazy_init
raise DeferredCudaCallError(msg) from e
torch.cuda.DeferredCudaCallError: CUDA call failed lazily at initialization with error: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
CUDA call was originally invoked at:
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 9, in
import torch
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/__init__.py", line 1427, in
_C._initExtension(manager_path())
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 247, in
_lazy_call(_check_capability)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 244, in _lazy_call
_queued_calls.append((callable, traceback.format_stack()))
Traceback (most recent call last):
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 315, in _lazy_init
queued_call()
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 183, in _check_capability
capability = get_device_capability(d)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 439, in get_device_capability
prop = get_device_properties(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 457, in get_device_properties
return _get_device_properties(device) # type: ignore[name-defined]
RuntimeError: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 283, in
main(args)
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 178, in main
device, rank, world_size = init_distributed()
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 34, in init_distributed
torch.cuda.set_device(local_rank)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 408, in set_device
torch._C._cuda_setDevice(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 321, in _lazy_init
raise DeferredCudaCallError(msg) from e
torch.cuda.DeferredCudaCallError: CUDA call failed lazily at initialization with error: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
CUDA call was originally invoked at:
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 9, in
import torch
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/__init__.py", line 1427, in
_C._initExtension(manager_path())
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 247, in
_lazy_call(_check_capability)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 244, in _lazy_call
_queued_calls.append((callable, traceback.format_stack()))
Traceback (most recent call last):
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 315, in _lazy_init
queued_call()
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 183, in _check_capability
capability = get_device_capability(d)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 439, in get_device_capability
prop = get_device_properties(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 457, in get_device_properties
return _get_device_properties(device) # type: ignore[name-defined]
RuntimeError: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 283, in
main(args)
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 178, in main
device, rank, world_size = init_distributed()
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 34, in init_distributed
torch.cuda.set_device(local_rank)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 408, in set_device
torch._C._cuda_setDevice(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 321, in _lazy_init
raise DeferredCudaCallError(msg) from e
torch.cuda.DeferredCudaCallError: CUDA call failed lazily at initialization with error: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
CUDA call was originally invoked at:
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 9, in
import torch
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/__init__.py", line 1427, in
_C._initExtension(manager_path())
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 247, in
_lazy_call(_check_capability)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 244, in _lazy_call
_queued_calls.append((callable, traceback.format_stack()))
[2025-01-13 16:22:50,417] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 1) local_rank: 0 (pid: 4178673) of binary: /home/pgouripe/.conda/envs/py39/bin/python
Traceback (most recent call last):
File "/home/pgouripe/.conda/envs/py39/bin/torchrun", line 8, in
sys.exit(main())
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
return f(*args, **kwargs)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/distributed/run.py", line 812, in main
run(args)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/distributed/run.py", line 803, in run
elastic_launch(
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/distributed/launcher/api.py", line 135, in __call__
return launch_agent(self._config, self._entrypoint, list(args))
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/distributed/launcher/api.py", line 268, in launch_agent
raise ChildFailedError(
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
============================================================
train_dist.py FAILED
------------------------------------------------------------
Failures:
[1]:
time : 2025-01-13_16:22:50
host : sg050.sol.rc.asu.edu
rank : 1 (local_rank: 1)
exitcode : 1 (pid: 4178674)
error_file:
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
[2]:
time : 2025-01-13_16:22:50
host : sg050.sol.rc.asu.edu
rank : 2 (local_rank: 2)
exitcode : 1 (pid: 4178675)
error_file:
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
[3]:
time : 2025-01-13_16:22:50
host : sg050.sol.rc.asu.edu
rank : 3 (local_rank: 3)
exitcode : 1 (pid: 4178678)
error_file:
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
------------------------------------------------------------
Root Cause (first observed failure):
[0]:
time : 2025-01-13_16:22:50
host : sg050.sol.rc.asu.edu
rank : 0 (local_rank: 0)
exitcode : 1 (pid: 4178673)
error_file:
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
============================================================
Подробнее здесь: https://stackoverflow.com/questions/793 ... nnect-to-l
Распространен факел SLRUM: невозможно инициализировать клиентский сокет для подключения к [localhost]:29500 (ошибка: 97 ⇐ Python
Программы на Python
1736812130
Anonymous
Я новичок в распределении pytorch, и любой ваш вклад поможет. У меня есть код, работающий с одним графическим процессором. Я пытаюсь сделать это распределенным. Я получаю сообщение об ошибке подключения сокета. Ниже приведен код (я избегаю той части кода, которая может не быть проблемой). Полагаю, это ошибка сокета.
$> torchrun --nproc_per_node=4 --nnodes=1 train_dist.py
КОД:
import datetime
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import time
import sys
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
from torch.utils.data.dataloader import default_collate
from torch import nn
import torch.nn.functional as F
import torchvision
from torchvision import transforms
import torch.distributed as dist
import utils
from scheduler import WarmupMultiStepLR
from datasets.ntu60_hoi import NTU60Subject
import models.AR_pcd_flow as Models
# Function to initialize the distributed environment
def init_distributed():
# Example using torch.distributed.launch:
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
dist.init_process_group(backend='nccl', rank=rank, world_size=world_size)
# dist.init_process_group(backend='nccl')
local_rank = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
return device, rank, world_size
# training step
def train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader, device, epoch, print_freq): # training code
def evaluate(): #evaluation code
# put it all together... Define data and network models
def main(args):
if args.output_dir:
utils.mkdir(args.output_dir)
print(args)
print("torch version: ", torch.__version__)
print("torchvision version: ", torchvision.__version__)
print("Number of GPUs:", torch.cuda.device_count())
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
print("Creating model")
Model = getattr(Models, args.model)
model = Model(radius=args.radius, nsamples=args.nsamples, spatial_stride=args.spatial_stride,
temporal_kernel_size=args.temporal_kernel_size, temporal_stride=args.temporal_stride,
emb_relu=args.emb_relu,
dim=args.dim, depth=args.depth, heads=args.heads, dim_head=args.dim_head,
mlp_dim=args.mlp_dim, num_classes=60)
if torch.cuda.device_count() > 1:
device, rank, world_size = init_distributed()
model.to(device)
# model = nn.DataParallel(model)
model = nn.parallel.DistributedDataParallel(model, device_ids=[device.index], output_device=device.index) # local_rank inplace of devices
else:
device = torch.device('cuda')
model.to(device)
# Data loading code
print("Loading data")
st = time.time()
dataset_train = NTU60Subject(root = '/scratch/NTU60/', train=True)
dataset_test = NTU60Subject(root = '/scratch/NTU60/', train=False)
# dataset_test = SegDataset(root='/scratch/pgouripe/AS_data_base', train=False)
print("Creating data loaders")
if torch.cuda.device_count() > 1:
sampler_train = DistributedSampler(dataset_train, num_replicas=world_size, rank=rank, shuffle=True)
sampler_test = DistributedSampler(dataset_test, num_replicas=world_size, rank=rank, shuffle=False)
else:
sampler_train = None
sampler_test = None
data_loader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batch_size, sampler=sampler_train, num_workers=args.workers, pin_memory=True)
data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=args.batch_size, sampler=sampler_test, num_workers=args.workers, pin_memory=True)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# convert scheduler to be per iteration, not per epoch, for warmup that lasts
# between different epochs
warmup_iters = args.lr_warmup_epochs * len(data_loader)
lr_milestones = [len(data_loader) * m for m in args.lr_milestones]
lr_scheduler = WarmupMultiStepLR(optimizer, milestones=lr_milestones, gamma=args.lr_gamma, warmup_iters=warmup_iters, warmup_factor=1e-5)
# model_without_ddp = model
print("Start training")
start_time = time.time()
cur_acc = 0
acc = 0
for epoch in range(args.start_epoch, args.epochs):
train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader, device, epoch, args.print_freq)
cur_acc = max(acc, evaluate(model, criterion, data_loader_test, device, len(dataset_test), args.print_freq))
if cur_acc > acc: # > 0.7 and cur_acc > acc:
acc = cur_acc
path = os.path.join(args.output_dir, f"model_{epoch}_ntu60_DTr.pth")
torch.save(model.state_dict(), path)
print("model saved")
with open('NTU60_epoch.txt', 'a') as f:
f.write(str(epoch) + '\n')
Ниже ОШИБКА:
[2025-01-13 16:22:45,345] torch.distributed.run: [WARNING]
[2025-01-13 16:22:45,345] torch.distributed.run: [WARNING] *****************************************
[2025-01-13 16:22:45,345] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
[2025-01-13 16:22:45,345] torch.distributed.run: [WARNING] *****************************************
**[W socket.cpp:464] [c10d] The server socket cannot be initialized on [::]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).**
Traceback (most recent call last):
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 315, in _lazy_init
queued_call()
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 183, in _check_capability
capability = get_device_capability(d)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 439, in get_device_capability
prop = get_device_properties(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 457, in get_device_properties
return _get_device_properties(device) # type: ignore[name-defined]
RuntimeError: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 283, in
main(args)
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 178, in main
device, rank, world_size = init_distributed()
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 34, in init_distributed
torch.cuda.set_device(local_rank)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 408, in set_device
torch._C._cuda_setDevice(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 321, in _lazy_init
raise DeferredCudaCallError(msg) from e
torch.cuda.DeferredCudaCallError: CUDA call failed lazily at initialization with error: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
CUDA call was originally invoked at:
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 9, in
import torch
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/__init__.py", line 1427, in
_C._initExtension(manager_path())
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 247, in
_lazy_call(_check_capability)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 244, in _lazy_call
_queued_calls.append((callable, traceback.format_stack()))
Traceback (most recent call last):
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 315, in _lazy_init
queued_call()
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 183, in _check_capability
capability = get_device_capability(d)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 439, in get_device_capability
prop = get_device_properties(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 457, in get_device_properties
return _get_device_properties(device) # type: ignore[name-defined]
RuntimeError: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 283, in
main(args)
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 178, in main
device, rank, world_size = init_distributed()
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 34, in init_distributed
torch.cuda.set_device(local_rank)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 408, in set_device
torch._C._cuda_setDevice(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 321, in _lazy_init
raise DeferredCudaCallError(msg) from e
torch.cuda.DeferredCudaCallError: CUDA call failed lazily at initialization with error: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
CUDA call was originally invoked at:
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 9, in
import torch
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/__init__.py", line 1427, in
_C._initExtension(manager_path())
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 247, in
_lazy_call(_check_capability)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 244, in _lazy_call
_queued_calls.append((callable, traceback.format_stack()))
Traceback (most recent call last):
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 315, in _lazy_init
queued_call()
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 183, in _check_capability
capability = get_device_capability(d)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 439, in get_device_capability
prop = get_device_properties(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 457, in get_device_properties
return _get_device_properties(device) # type: ignore[name-defined]
RuntimeError: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 283, in
main(args)
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 178, in main
device, rank, world_size = init_distributed()
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 34, in init_distributed
torch.cuda.set_device(local_rank)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 408, in set_device
torch._C._cuda_setDevice(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 321, in _lazy_init
raise DeferredCudaCallError(msg) from e
torch.cuda.DeferredCudaCallError: CUDA call failed lazily at initialization with error: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
CUDA call was originally invoked at:
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 9, in
import torch
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/__init__.py", line 1427, in
_C._initExtension(manager_path())
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 247, in
_lazy_call(_check_capability)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 244, in _lazy_call
_queued_calls.append((callable, traceback.format_stack()))
Traceback (most recent call last):
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 315, in _lazy_init
queued_call()
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 183, in _check_capability
capability = get_device_capability(d)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 439, in get_device_capability
prop = get_device_properties(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 457, in get_device_properties
return _get_device_properties(device) # type: ignore[name-defined]
RuntimeError: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 283, in
main(args)
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 178, in main
device, rank, world_size = init_distributed()
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 34, in init_distributed
torch.cuda.set_device(local_rank)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 408, in set_device
torch._C._cuda_setDevice(device)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 321, in _lazy_init
raise DeferredCudaCallError(msg) from e
torch.cuda.DeferredCudaCallError: CUDA call failed lazily at initialization with error: device >= 0 && device < num_gpus INTERNAL ASSERT FAILED at "../aten/src/ATen/cuda/CUDAContext.cpp":50, please report a bug to PyTorch. device=1, num_gpus=
CUDA call was originally invoked at:
File "/scratch/pgouripe/HOI4D_ctr/train_dist.py", line 9, in
import torch
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/__init__.py", line 1427, in
_C._initExtension(manager_path())
File "", line 1007, in _find_and_load
File "", line 986, in _find_and_load_unlocked
File "", line 680, in _load_unlocked
File "", line 850, in exec_module
File "", line 228, in _call_with_frames_removed
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 247, in
_lazy_call(_check_capability)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/cuda/__init__.py", line 244, in _lazy_call
_queued_calls.append((callable, traceback.format_stack()))
[2025-01-13 16:22:50,417] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 1) local_rank: 0 (pid: 4178673) of binary: /home/pgouripe/.conda/envs/py39/bin/python
Traceback (most recent call last):
File "/home/pgouripe/.conda/envs/py39/bin/torchrun", line 8, in
sys.exit(main())
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
return f(*args, **kwargs)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/distributed/run.py", line 812, in main
run(args)
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/distributed/run.py", line 803, in run
elastic_launch(
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/distributed/launcher/api.py", line 135, in __call__
return launch_agent(self._config, self._entrypoint, list(args))
File "/home/pgouripe/.conda/envs/py39/lib/python3.9/site-packages/torch/distributed/launcher/api.py", line 268, in launch_agent
raise ChildFailedError(
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
============================================================
train_dist.py FAILED
------------------------------------------------------------
Failures:
[1]:
time : 2025-01-13_16:22:50
host : sg050.sol.rc.asu.edu
rank : 1 (local_rank: 1)
exitcode : 1 (pid: 4178674)
error_file:
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
[2]:
time : 2025-01-13_16:22:50
host : sg050.sol.rc.asu.edu
rank : 2 (local_rank: 2)
exitcode : 1 (pid: 4178675)
error_file:
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
[3]:
time : 2025-01-13_16:22:50
host : sg050.sol.rc.asu.edu
rank : 3 (local_rank: 3)
exitcode : 1 (pid: 4178678)
error_file:
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
------------------------------------------------------------
Root Cause (first observed failure):
[0]:
time : 2025-01-13_16:22:50
host : sg050.sol.rc.asu.edu
rank : 0 (local_rank: 0)
exitcode : 1 (pid: 4178673)
error_file:
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
============================================================
Подробнее здесь: [url]https://stackoverflow.com/questions/79353689/slrum-torch-distributedthe-client-socket-cannot-be-initialized-to-connect-to-l[/url]
-
- Похожие темы
- Ответы
- Просмотры
- Последнее сообщение
-
-
Распространен факел SLRUM: невозможно инициализировать клиентский сокет для подключения к [localhost]:29500 (ошибка: 97
Anonymous » » в форуме PythonЯ новичок в распределении pytorch, и любой ваш вклад поможет. У меня есть код, работающий с одним графическим процессором. Я пытаюсь сделать это распределенным. Я получаю сообщение об ошибке подключения сокета. Ниже приведен код (я избегаю той части... - 0 Ответы
- 14 Просмотры
-
Последнее сообщение Anonymous
-
-
-
Веб-сокет сервера в js и клиентский веб-сокет в Java Android
Anonymous » » в форуме JAVAПривет, я пытаюсь установить соединение между сервером WSS, написанным на JS, и клиентом WSS, написанным на Java, но он продолжает выдавать ошибку соединения, где говорится, что не удалось подключиться в течение 10 секунд
ЭТО СЕРВЕР:
const... - 0 Ответы
- 27 Просмотры
-
Последнее сообщение Anonymous
-
-
-
Клиентский веб-сокет прерывается после успешного подключения в MAUI C#
Anonymous » » в форуме C#Это код одной из страниц содержимого моего приложения чата MAUI.
private ClientWebSocket _clientWebSocket = new ClientWebSocket();
private readonly Uri _serverUri = new Uri( ws://10.0.2.2:6969/chat );
public MyPage()
{
InitializeComponent();... - 0 Ответы
- 15 Просмотры
-
Последнее сообщение Anonymous
-
-
-
ModuleNotFoundError: модуль с именем «факел» отсутствует, но факел установлен.
Anonymous » » в форуме PythonЯ пытаюсь использовать (и успешно установил) Layout Parser, для работы которого требуется детектор 2. При попытке установить детекторон2 столкнулся со следующей ошибкой:
> python -m pip install 'git+
ModuleNotFoundError: No module named... - 0 Ответы
- 46 Просмотры
-
Последнее сообщение Anonymous
-
-
-
ModuleNotFoundError: модуль с именем «факел» отсутствует, но факел установлен.
Anonymous » » в форуме PythonЯ пытаюсь использовать (и успешно установил) Layout Parser, для работы которого требуется детектор 2. При попытке установить детекторон2 столкнулся со следующей ошибкой:
> python -m pip install 'git+
ModuleNotFoundError: No module named... - 0 Ответы
- 38 Просмотры
-
Последнее сообщение Anonymous
-
Перейти
- Кемерово-IT
- ↳ Javascript
- ↳ C#
- ↳ JAVA
- ↳ Elasticsearch aggregation
- ↳ Python
- ↳ Php
- ↳ Android
- ↳ Html
- ↳ Jquery
- ↳ C++
- ↳ IOS
- ↳ CSS
- ↳ Excel
- ↳ Linux
- ↳ Apache
- ↳ MySql
- Детский мир
- Для души
- ↳ Музыкальные инструменты даром
- ↳ Печатная продукция даром
- Внешняя красота и здоровье
- ↳ Одежда и обувь для взрослых даром
- ↳ Товары для здоровья
- ↳ Физкультура и спорт
- Техника - даром!
- ↳ Автомобилистам
- ↳ Компьютерная техника
- ↳ Плиты: газовые и электрические
- ↳ Холодильники
- ↳ Стиральные машины
- ↳ Телевизоры
- ↳ Телефоны, смартфоны, плашеты
- ↳ Швейные машинки
- ↳ Прочая электроника и техника
- ↳ Фототехника
- Ремонт и интерьер
- ↳ Стройматериалы, инструмент
- ↳ Мебель и предметы интерьера даром
- ↳ Cантехника
- Другие темы
- ↳ Разное даром
- ↳ Давай меняться!
- ↳ Отдам\возьму за копеечку
- ↳ Работа и подработка в Кемерове
- ↳ Давай с тобой поговорим...