python_code stringlengths 0 679k | repo_name stringlengths 9 41 | file_path stringlengths 6 149 |
|---|---|---|
from .permutation_utilities import *
################################################################################################################
# Greedy Channel Swaps - iterative, deterministic, can be parallelized
# 1. Build a map of the magnitude improvement of involved stripes for all pairs of channel swaps... | apex-master | apex/contrib/sparsity/permutation_search_kernels/channel_swap.py |
import numpy as np
from .permutation_utilities import *
from .exhaustive_search import Exhaustive_Search
def accelerated_search_for_good_permutation(matrix_group, options=None, verbosity=0):
"""This function is used to call the permutation search CUDA kernels.
users can provide prefer search strategy by provid... | apex-master | apex/contrib/sparsity/permutation_search_kernels/call_permutation_search_kernels.py |
from .call_permutation_search_kernels import accelerated_search_for_good_permutation
from .permutation_utilities import sum_after_2_to_4 | apex-master | apex/contrib/sparsity/permutation_search_kernels/__init__.py |
import numpy as np
import time
import subprocess
import math
gpus_tested = False
gpus_found = 0
kernels_found = True
try:
import permutation_search_cuda as permutation_search_cuda_kernels
print(f"Found permutation search CUDA kernels")
except ImportError:
try:
from . import permutation_search_... | apex-master | apex/contrib/sparsity/permutation_search_kernels/permutation_utilities.py |
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidde... | apex-master | apex/contrib/sparsity/test/toy_problem.py |
import torch
import torch.onnx
from apex.contrib.sparsity.permutation_lib import Permutation
"""
Functional and behavioral correctness checking for network permutations
Each test class is a torch.nn.Module with three required members:
- self.input_shape is used to populate a dummy input
- self.expected_C_params indica... | apex-master | apex/contrib/sparsity/test/test_permutation_application.py |
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidde... | apex-master | apex/contrib/sparsity/test/checkpointing_test_part2.py |
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidde... | apex-master | apex/contrib/sparsity/test/checkpointing_test_part1.py |
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
#
# Reference run for checkpointing test (part1 + part2)
#
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d... | apex-master | apex/contrib/sparsity/test/checkpointing_test_reference.py |
import numpy as np
import time
import sys
# permutation-specifics
sys.path.append("../")
from permutation_search_kernels.permutation_utilities import *
from permutation_search_kernels.exhaustive_search import Exhaustive_Search
from permutation_search_kernels.channel_swap import Channel_Swap
# Arguments
import argpars... | apex-master | apex/contrib/sparsity/permutation_tests/permutation_test.py |
try:
import torch
import bnp
from .batch_norm import BatchNorm2d_NHWC
del torch
del bnp
del batch_norm
except ImportError as err:
print("apex was installed without --bnp flag, contrib.groupbn is not available")
| apex-master | apex/contrib/groupbn/__init__.py |
import torch
import numpy as np
from torch.nn.modules.batchnorm import _BatchNorm
import bnp
class bn_NHWC_impl(torch.autograd.Function):
@staticmethod
def forward(ctx, x, s, b, rm, riv, mini_m, mini_riv, ret_cta, mom, epsilon, fuse_relu, is_train, bn_group, my_data, pair_data, magic, pair_data2, pair_data3, ... | apex-master | apex/contrib/groupbn/batch_norm.py |
from .batch_norm import GroupBatchNorm2d | apex-master | apex/contrib/cudnn_gbn/__init__.py |
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn import functional as F
from torch import Tensor
import peer_memory_cuda as pm
import cudnn_gbn_lib
from torch.cuda.amp import custom_fwd, custom_bwd
class _GroupBatchNorm2d(torch.autograd.Function):
@staticmethod
@custom_fwd
def ... | apex-master | apex/contrib/cudnn_gbn/batch_norm.py |
apex-master | apex/contrib/test/__init__.py | |
apex-master | apex/contrib/test/index_mul_2d/__init__.py | |
import random
import unittest
import torch
HAS_INDEX_MUL_2D_RELU = None
try:
from apex.contrib.index_mul_2d import index_mul_2d
except ImportError as e:
HAS_INDEX_MUL_2D_RELU = False
else:
HAS_INDEX_MUL_2D_RELU = True
@unittest.skipIf(not HAS_INDEX_MUL_2D_RELU, "`apex.contrib.index_mul_2d` is not found.... | apex-master | apex/contrib/test/index_mul_2d/test_index_mul_2d.py |
import copy
import typing
import unittest
import torch
import torch.nn as nn
from torch.testing._internal import common_utils
SKIP_TEST = None
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
try:
from apex.contrib.cudnn_gbn import GroupBatchNorm2d as GBN
except ImportError as e:... | apex-master | apex/contrib/test/cudnn_gbn/test_cudnn_gbn_with_two_gpus.py |
apex-master | apex/contrib/test/cudnn_gbn/__init__.py | |
import unittest
import torch
import torch.nn.functional as F
reference_available = True
try:
from torchvision.ops.focal_loss import sigmoid_focal_loss
except ImportError:
reference_available = False
SKIP_TEST = None
try:
from apex.contrib.focal_loss import focal_loss
except ImportError as e:
SKIP_TES... | apex-master | apex/contrib/test/focal_loss/test_focal_loss.py |
apex-master | apex/contrib/test/focal_loss/__init__.py | |
apex-master | apex/contrib/test/xentropy/__init__.py | |
import unittest
import random
import time
import numpy as np
import torch
SKIP_TEST = None
try:
from apex.contrib import xentropy as label_smoothing
except ImportError as e:
SKIP_TEST = e
def label_smoothing_raw(x, target, padding_idx, smoothing):
logprobs = torch.nn.functional.log_softmax(x, dim=-1, d... | apex-master | apex/contrib/test/xentropy/test_label_smoothing.py |
import unittest
import os
import torch
from torch.testing._internal import common_utils
from torch.testing._internal.common_device_type import instantiate_device_type_tests
SKIP_TEST = None
try:
from apex import fused_dense
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
c... | apex-master | apex/contrib/test/fused_dense/test_fused_dense.py |
apex-master | apex/contrib/test/layer_norm/__init__.py | |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.layer_norm.layer_norm import FastLayerNorm
import fast_layer_norm as fln
except ImportError as e:
SKIP_TEST = e
class GPUTimer:
def __init__(self, stream):
self.start_ = torch.cuda.Event(enable_timing=True)
self.sto... | apex-master | apex/contrib/test/layer_norm/test_fast_layer_norm.py |
import os
import inspect
import torch
from torch.cuda.amp import GradScaler
from torch.testing._internal import common_utils
from apex.parallel.distributed import flat_dist_call
from apex.contrib.optimizers.distributed_fused_lamb import DistributedFusedLAMB
from apex.transformer.testing.distributed_test_base import Ncc... | apex-master | apex/contrib/test/optimizers/test_distributed_fused_lamb.py |
apex-master | apex/contrib/test/optimizers/__init__.py | |
from contextlib import contextmanager
import io
from typing import Callable, Optional, Tuple
import unittest
import warnings
import torch
from torch.testing._internal import common_utils
SKIP_TEST = None
try:
from apex.contrib.optimizers.distributed_fused_adam import DistributedFusedAdam
except ImportError as e:
... | apex-master | apex/contrib/test/optimizers/test_dist_adam.py |
import unittest
import torch
from torch.testing._internal import common_utils
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
SKIP_TEST = None
try:
from apex.contrib.bottleneck import Bottleneck, SpatialBottleneck
from apex.contrib.bottleneck import HaloExchangerPeer
fro... | apex-master | apex/contrib/test/bottleneck/test_bottleneck_module.py |
apex-master | apex/contrib/test/bottleneck/__init__.py | |
apex-master | apex/contrib/test/conv_bias_relu/__init__.py | |
import copy
import math
import random
import unittest
import torch
import torch.nn.functional as F
HAS_CONV_BIAS_RELU = None
try:
from apex.contrib.conv_bias_relu import ConvBiasReLU, ConvBias, ConvBiasMaskReLU, ConvFrozenScaleBiasReLU
except ImportError as e:
HAS_CONV_BIAS_RELU = False
else:
HAS_CONV_BIA... | apex-master | apex/contrib/test/conv_bias_relu/test_conv_bias_relu.py |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import SelfMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class SelfMultiheadAttnNormAddTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(see... | apex-master | apex/contrib/test/multihead_attn/test_self_multihead_attn_norm_add.py |
apex-master | apex/contrib/test/multihead_attn/__init__.py | |
import unittest
import torch
import torch.nn.functional as F
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import fast_mask_softmax_dropout_func
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class FusedSoftmaxTest(unittest.TestCase):
def setUp(self, seed=123... | apex-master | apex/contrib/test/multihead_attn/test_mha_fused_softmax.py |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import EncdecMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class EncdecMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
... | apex-master | apex/contrib/test/multihead_attn/test_encdec_multihead_attn.py |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import SelfMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
... | apex-master | apex/contrib/test/multihead_attn/test_fast_self_multihead_attn_bias.py |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import EncdecMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class EncdecMultiheadAttnNormAddTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed... | apex-master | apex/contrib/test/multihead_attn/test_encdec_multihead_attn_norm_add.py |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import SelfMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
... | apex-master | apex/contrib/test/multihead_attn/test_self_multihead_attn.py |
apex-master | apex/contrib/test/group_norm/__init__.py | |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are not permit-
# ted.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMP... | apex-master | apex/contrib/test/group_norm/test_group_norm.py |
import random
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.clip_grad import clip_grad_norm_
except ImportError as e:
SKIP_TEST = e
def make_params(
num_params,
sizes=[1,2,3,4,5],
num_dims=[1,2,3],
dtypes=[torch.float32],
devices=['cuda'],
... | apex-master | apex/contrib/test/clip_grad/test_clip_grad.py |
apex-master | apex/contrib/test/clip_grad/__init__.py | |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.transducer import TransducerJoint
from apex.contrib.transducer import _transducer_ref as transducer_ref
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TransducerJointTest(unittest.TestCase):
... | apex-master | apex/contrib/test/transducer/test_transducer_joint.py |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.transducer import TransducerLoss
from apex.contrib.transducer import _transducer_ref as transducer_ref
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TransducerLossTest(unittest.TestCase):
... | apex-master | apex/contrib/test/transducer/test_transducer_loss.py |
apex-master | apex/contrib/test/transducer/__init__.py | |
import unittest
import torch
from torch.testing._internal import common_utils
SKIP_TEST = None
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
try:
from apex.contrib.peer_memory import PeerMemoryPool, PeerHaloExchanger1d
except ImportError as e:
SKIP_TEST = e
# How to run:
... | apex-master | apex/contrib/test/peer_memory/test_peer_halo_exchange_module.py |
apex-master | apex/contrib/test/peer_memory/__init__.py | |
###############################################################################
# Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistribution... | apex-master | apex/contrib/test/fmha/test_fmha.py |
apex-master | apex/contrib/test/fmha/__init__.py | |
try:
import torch
import focal_loss_cuda
from .focal_loss import focal_loss
del torch
del focal_loss_cuda
del focal_loss
except ImportError as err:
print("apex was installed without --focal_loss flag, apex.contrib.focal_loss is not available")
| apex-master | apex/contrib/focal_loss/__init__.py |
import torch
import focal_loss_cuda
class FocalLoss(torch.autograd.Function):
@staticmethod
def forward(
ctx,
cls_output,
cls_targets_at_level,
num_positives_sum,
num_real_classes,
alpha,
gamma,
label_smoothing=0.0,
):
loss, partial_... | apex-master | apex/contrib/focal_loss/focal_loss.py |
import torch
import xentropy_cuda
class SoftmaxCrossEntropyLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, labels, smoothing=0.0, padding_idx=0, half_to_float=False):
losses, max_log_sum_exp = xentropy_cuda.forward(
logits, labels, smoothing, half_to_float)
l... | apex-master | apex/contrib/xentropy/softmax_xentropy.py |
from .softmax_xentropy import SoftmaxCrossEntropyLoss
__all__ = [
"SoftmaxCrossEntropyLoss",
]
| apex-master | apex/contrib/xentropy/__init__.py |
from .layer_norm import FastLayerNorm
| apex-master | apex/contrib/layer_norm/__init__.py |
import torch
from torch.nn import init
from apex._autocast_utils import _cast_if_autocast_enabled
import fast_layer_norm
class FastLayerNormFN(torch.autograd.Function):
@staticmethod
def forward(ctx, x, gamma, beta, epsilon):
x = x.contiguous()
gamma = gamma.contiguous()
beta = beta.c... | apex-master | apex/contrib/layer_norm/layer_norm.py |
import types
import torch
import importlib
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It has been proposed in `Adam:... | apex-master | apex/contrib/optimizers/fused_adam.py |
from .fp16_optimizer import FP16_Optimizer
from .fused_adam import FusedAdam
from .fused_lamb import FusedLAMB
| apex-master | apex/contrib/optimizers/__init__.py |
import collections
import contextlib
from dataclasses import dataclass
import enum
import inspect
import io
import itertools
import threading
from typing import Any, Callable, Iterable, List, Optional, Set, Tuple, Union
import warnings
import torch
from torch.distributed.distributed_c10d import _get_default_group
from... | apex-master | apex/contrib/optimizers/distributed_fused_adam.py |
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FP16_Optimizer(object):
"""
:class:`FP16_Optimizer` A cutdown version of apex.fp16_utils.FP16_Optimizer.
Designed only to wrap apex.contrib.optimizers.FusedAdam, FusedSGD.
Refer to apex.fp16_utils documents for more information... | apex-master | apex/contrib/optimizers/fp16_optimizer.py |
import torch
import importlib
import math
from apex.multi_tensor_apply import multi_tensor_applier
class FusedLAMB(torch.optim.Optimizer):
"""Implements LAMB algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cu... | apex-master | apex/contrib/optimizers/fused_lamb.py |
import types
import torch
from torch.optim.optimizer import Optimizer, required
from apex.multi_tensor_apply import multi_tensor_applier
class FusedSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
This version of fused SGD implements 2 fusions.
* Fusion of the SGD ... | apex-master | apex/contrib/optimizers/fused_sgd.py |
import os
import math
import inspect
import torch
import importlib
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
import torch.distributed.distributed_c10d as c10d
# Fallback to private fields if using older PyTorch version
try:
import torch.distributed.distributed_c10d.get_process_group_ra... | apex-master | apex/contrib/optimizers/distributed_fused_lamb.py |
import torch
import torch.distributed as dist
from torch import nn
import nccl_p2p_cuda as inc
import peer_memory_cuda as pm
# Communication free halo exchanger.
# NB! This halo exchanger does not exchange halos with neighbors as it should, it merely swaps the inputs
# NB! This is only useful for performance testing.
... | apex-master | apex/contrib/bottleneck/halo_exchangers.py |
from .bottleneck import Bottleneck, SpatialBottleneck
from .halo_exchangers import HaloExchangerNoComm, HaloExchangerAllGather, HaloExchangerSendRecv, HaloExchangerPeer
| apex-master | apex/contrib/bottleneck/__init__.py |
import torch
from bottleneck import Bottleneck
torch.manual_seed(23337)
# use True to print layerwise sum for all outputs in reference code path
DEBUG = False#True
for stride, o_channel in [(1,32), (1,128), (2,32)]:
print("testing stride ==", stride, ", in_channel == 32 , out_channel ==", o_channel)
a_ = torc... | apex-master | apex/contrib/bottleneck/test.py |
import functools as func
import torch
import torch.distributed as dist
from torch import nn
from apex import check_cudnn_version_and_warn
import fast_bottleneck
import nccl_p2p_cuda as inc
assert check_cudnn_version_and_warn(__name__, 8400)
def kaiming_uniform_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu... | apex-master | apex/contrib/bottleneck/bottleneck.py |
import pdb
import torch
from torch.autograd import gradcheck
from apex import check_cudnn_version_and_warn
import fused_conv_bias_relu
check_cudnn_version_and_warn(__name__, 8400)
class ConvBiasReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(... | apex-master | apex/contrib/conv_bias_relu/conv_bias_relu.py |
from .conv_bias_relu import ConvBiasReLU, ConvBias, ConvBiasMaskReLU, ConvFrozenScaleBiasReLU
| apex-master | apex/contrib/conv_bias_relu/__init__.py |
import torch
import fast_multihead_attn
class FastEncdecAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
... | apex-master | apex/contrib/multihead_attn/fast_encdec_multihead_attn_func.py |
import torch
import fast_multihead_attn
class MaskSoftmaxDropout(torch.autograd.Function):
@staticmethod
def forward(ctx, is_training, heads, inputs, pad_mask, mask_additive, dropout_prob):
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = tor... | apex-master | apex/contrib/multihead_attn/mask_softmax_dropout_func.py |
import torch
import torch.nn.functional as F
class SelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
scale,
inputs,
input_weights,
output_weights,
input_biases,
output_biases... | apex-master | apex/contrib/multihead_attn/self_multihead_attn_func.py |
from .self_multihead_attn import SelfMultiheadAttn
from .encdec_multihead_attn import EncdecMultiheadAttn
from .mask_softmax_dropout_func import fast_mask_softmax_dropout_func
| apex-master | apex/contrib/multihead_attn/__init__.py |
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .self_multihead_attn_func import self_attn_func
from .fast_self_multihead_attn_func import fast_self_attn_func
from .fast_self_multihead_attn_norm_add_func import fast_self_attn_norm_add_func
from apex.no... | apex-master | apex/contrib/multihead_attn/self_multihead_attn.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import fast_multihea... | apex-master | apex/contrib/multihead_attn/fast_encdec_multihead_attn_norm_add_func.py |
import torch
import torch.nn.functional as F
class EncdecAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
scale,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_w... | apex-master | apex/contrib/multihead_attn/encdec_multihead_attn_func.py |
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .encdec_multihead_attn_func import encdec_attn_func
from .fast_encdec_multihead_attn_func import fast_encdec_attn_func
from .fast_encdec_multihead_attn_norm_add_func import fast_encdec_attn_norm_add_func
... | apex-master | apex/contrib/multihead_attn/encdec_multihead_attn.py |
import torch
import fast_multihead_attn
class FastSelfAttnNormAddFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
inputs,
lyr_nrm_gamma_weights,
lyr_nrm_beta_weights,
input_weights,
output... | apex-master | apex/contrib/multihead_attn/fast_self_multihead_attn_norm_add_func.py |
import torch
import fast_multihead_attn
class FastSelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
pad_m... | apex-master | apex/contrib/multihead_attn/fast_self_multihead_attn_func.py |
import torch
import torch.nn.functional as F
import argparse
from apex.contrib.multihead_attn import SelfMultiheadAttn
from apex.contrib.multihead_attn import EncdecMultiheadAttn
parser = argparse.ArgumentParser(description='Multihead Attention Standalone Test')
parser.add_argument('--seq-length', default=64, type=in... | apex-master | apex/contrib/examples/multihead_attn/perf_test_multihead_attn.py |
import torch
import torch.nn.functional as F
import argparse
from apex.contrib.multihead_attn import SelfMultiheadAttn
from apex.contrib.multihead_attn import EncdecMultiheadAttn
parser = argparse.ArgumentParser(description='Multihead Attention Standalone Test')
parser.add_argument('--seq-length', default=64, type=in... | apex-master | apex/contrib/examples/multihead_attn/func_test_multihead_attn.py |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are not permit-
# ted.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMP... | apex-master | apex/contrib/group_norm/group_norm.py |
from .group_norm import *
| apex-master | apex/contrib/group_norm/__init__.py |
from .clip_grad import clip_grad_norm_
| apex-master | apex/contrib/clip_grad/__init__.py |
from typing import Union, Iterable
import torch
_kernel_import_succeeded = False
try:
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
_kernel_import_succeeded = True
except ImportError:
_kernel_import_succeeded = False
_tensor_or_tensors = Union[torch.Tensor, Iterable[torch.Tens... | apex-master | apex/contrib/clip_grad/clip_grad.py |
import torch
import transducer_loss_cuda
import transducer_joint_cuda
class TransducerJoint(torch.nn.Module):
"""Transducer joint
Detail of this loss function can be found in: Sequence Transduction with Recurrent Neural
Networks
Arguments:
pack_output (bool, optional): whether to pack the out... | apex-master | apex/contrib/transducer/transducer.py |
from .transducer import TransducerJoint
from .transducer import TransducerLoss
from . import _transducer_ref
| apex-master | apex/contrib/transducer/__init__.py |
import torch
def transducer_loss_reference(x, label, f_len, y_len, blank_idx, loss_grad):
def log_sum_exp(a, b):
if (a >= b):
return a + torch.log(1 + torch.exp(b-a))
else:
return b + torch.log(1 + torch.exp(a-b))
def forward_alpha(x, label, f_len, y_len, blank_idx):
... | apex-master | apex/contrib/transducer/_transducer_ref.py |
import torch
from apex.contrib.peer_memory import PeerMemoryPool
import peer_memory_cuda as pm
class PeerHaloExchanger1d:
def __init__(self, ranks, rank_in_group, peer_pool, half_halo):
self.peer_group_size = len(ranks)
self.ranks = ranks
self.peer_rank = rank_in_group
self.low_neig... | apex-master | apex/contrib/peer_memory/peer_halo_exchanger_1d.py |
from .peer_memory import PeerMemoryPool
from .peer_halo_exchanger_1d import PeerHaloExchanger1d
| apex-master | apex/contrib/peer_memory/__init__.py |
import torch
import numpy as np
import peer_memory_cuda as pm
class PeerMemoryPool(object):
def __init__(self, static_size, dynamic_size, peer_ranks=None):
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
ngpus = min(torch.cuda.device_count(), world_size)... | apex-master | apex/contrib/peer_memory/peer_memory.py |
###############################################################################
# Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributio... | apex-master | apex/contrib/fmha/fmha.py |
from .fmha import FMHAFun
| apex-master | apex/contrib/fmha/__init__.py |
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedNovoGrad(torch.optim.Optimizer):
"""Implements NovoGrad algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
Th... | apex-master | apex/optimizers/fused_novograd.py |
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This versi... | apex-master | apex/optimizers/fused_adam.py |
from .fused_sgd import FusedSGD
from .fused_adam import FusedAdam
from .fused_novograd import FusedNovoGrad
from .fused_lamb import FusedLAMB
from .fused_adagrad import FusedAdagrad
from .fused_mixed_precision_lamb import FusedMixedPrecisionLamb
| apex-master | apex/optimizers/__init__.py |
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdagrad(torch.optim.Optimizer):
"""Implements Adagrad algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This... | apex-master | apex/optimizers/fused_adagrad.py |
import torch
from copy import deepcopy
from itertools import chain
from collections import defaultdict, abc as container_abcs
from apex.multi_tensor_apply import multi_tensor_applier
class FusedMixedPrecisionLamb(torch.optim.Optimizer):
def __init__(self, params, lr=1e-3, step=0, bias_correction=True,
... | apex-master | apex/optimizers/fused_mixed_precision_lamb.py |
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedLAMB(torch.optim.Optimizer):
"""Implements LAMB algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This versi... | apex-master | apex/optimizers/fused_lamb.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.