code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : int = """audio-spectrogram-transformer"""
def __init__( self : List[Any] , __UpperCamelCase : Tuple=768 , __UpperCamelCase : str=12 , __UpperCamelCase : int=12 , __UpperCamelCase : str=3_072 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : List[Any]=0.02 , __UpperCamelCase : List[Any]=1e-1_2 , __UpperCamelCase : List[Any]=16 , __UpperCamelCase : Any=True , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Tuple=10 , __UpperCamelCase : Dict=1_024 , __UpperCamelCase : List[Any]=128 , **__UpperCamelCase : Optional[Any] , ):
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = patch_size
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = frequency_stride
_UpperCAmelCase = time_stride
_UpperCAmelCase = max_length
_UpperCAmelCase = num_mel_bins
| 684 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer
def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__UpperCamelCase )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
_UpperCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_UpperCAmelCase = value
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 | 1 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
if index == r:
for j in range(_lowerCAmelCase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_UpperCAmelCase = arr[i]
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 , _lowerCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
# A temporary array to store all combination one by one
_UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 , _lowerCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__lowerCAmelCase = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 684 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase)
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : List[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Any ):
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
requires_backends(self , "decord" )
self.check_model_type(__UpperCamelCase )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Dict=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Dict=None ):
_UpperCAmelCase = {}
if frame_sampling_rate is not None:
_UpperCAmelCase = frame_sampling_rate
if num_frames is not None:
_UpperCAmelCase = num_frames
_UpperCAmelCase = {}
if top_k is not None:
_UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , __UpperCamelCase : Union[str, List[str]] , **__UpperCamelCase : List[str] ):
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Optional[Any]=1 ):
if num_frames is None:
_UpperCAmelCase = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
_UpperCAmelCase = BytesIO(requests.get(__UpperCamelCase ).content )
_UpperCAmelCase = VideoReader(__UpperCamelCase )
videoreader.seek(0 )
_UpperCAmelCase = 0
_UpperCAmelCase = num_frames * frame_sampling_rate - 1
_UpperCAmelCase = np.linspace(__UpperCamelCase , __UpperCamelCase , num=__UpperCamelCase , dtype=np.intaa )
_UpperCAmelCase = videoreader.get_batch(__UpperCamelCase ).asnumpy()
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = self.image_processor(__UpperCamelCase , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = self.model(**__UpperCamelCase )
return model_outputs
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
_UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
_UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
_UpperCAmelCase , _UpperCAmelCase = probs.topk(__UpperCamelCase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCamelCase , __UpperCamelCase )]
| 684 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = ["""image_processor""", """tokenizer"""]
__SCREAMING_SNAKE_CASE : str = """LayoutLMv2ImageProcessor"""
__SCREAMING_SNAKE_CASE : Tuple = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , **__UpperCamelCase : Optional[int] ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("feature_extractor" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __UpperCamelCase : Union[List[List[int]], List[List[List[int]]]] = None , __UpperCamelCase : Optional[Union[List[int], List[List[int]]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : str , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_UpperCAmelCase = self.image_processor(images=__UpperCamelCase , return_tensors=__UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
_UpperCAmelCase = features["words"]
_UpperCAmelCase = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# add pixel values
_UpperCAmelCase = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_UpperCAmelCase = self.get_overflowing_images(__UpperCamelCase , encoded_inputs["overflow_to_sample_mapping"] )
_UpperCAmelCase = images
return encoded_inputs
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(__UpperCamelCase )} and {len(__UpperCamelCase )}''' )
return images_with_overflow
def UpperCAmelCase__ ( self : Any , *__UpperCamelCase : List[str] , **__UpperCamelCase : int ):
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[int] ):
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self : Optional[int] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCamelCase , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Any ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCamelCase , )
return self.image_processor
| 684 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__lowerCAmelCase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__lowerCAmelCase = [ord(letter) for letter in string.ascii_lowercase]
__lowerCAmelCase = {ord(char) for char in VALID_CHARS}
__lowerCAmelCase = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str | None:
_UpperCAmelCase = ""
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for keychar, cipherchar in zip(cycle(_lowerCAmelCase ) , _lowerCAmelCase ):
_UpperCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCAmelCase )
return decoded
def __lowerCamelCase ( _lowerCAmelCase ) -> list[str]:
_UpperCAmelCase = []
for key in product(_lowerCAmelCase , repeat=3 ):
_UpperCAmelCase = try_key(_lowerCAmelCase , _lowerCAmelCase )
if encoded is not None:
possibles.append(_lowerCAmelCase )
return possibles
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def __lowerCamelCase ( _lowerCAmelCase = "p059_cipher.txt" ) -> int:
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = Path(_lowerCAmelCase ).parent.joinpath(_lowerCAmelCase ).read_text(encoding="utf-8" )
_UpperCAmelCase = [int(_lowerCAmelCase ) for number in data.strip().split("," )]
_UpperCAmelCase = filter_valid_chars(_lowerCAmelCase )
for common_word in COMMON_WORDS:
_UpperCAmelCase = filter_common_word(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) == 1:
break
_UpperCAmelCase = possibles[0]
return sum(ord(_lowerCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__lowerCAmelCase = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
__lowerCAmelCase = F'''https://www.google.com/search?q={query}&num=100'''
__lowerCAmelCase = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
__lowerCAmelCase = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
__lowerCAmelCase = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
import argparse
import os
import re
__lowerCAmelCase = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
__lowerCAmelCase = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase = False ) -> List[Any]:
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = content.split("\n" )
_UpperCAmelCase = []
_UpperCAmelCase = 0
while line_idx < len(_lowerCAmelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_UpperCAmelCase = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_UpperCAmelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_UpperCAmelCase = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_UpperCAmelCase = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : _re_identifier.search(_lowerCAmelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(_lowerCAmelCase ) )
elif "\n".join(_lowerCAmelCase ) != content:
return True
def __lowerCamelCase ( _lowerCAmelCase = False ) -> Tuple:
_UpperCAmelCase = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for f in os.listdir(_lowerCAmelCase ) if f.endswith(".py" )]
_UpperCAmelCase = [sort_auto_mapping(_lowerCAmelCase , overwrite=_lowerCAmelCase ) for fname in fnames]
if not overwrite and any(_lowerCAmelCase ):
_UpperCAmelCase = [f for f, d in zip(_lowerCAmelCase , _lowerCAmelCase ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {", ".join(_lowerCAmelCase )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__lowerCAmelCase = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 684 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 1 |
from __future__ import annotations
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
if len(_lowerCAmelCase ) == 0:
return []
_UpperCAmelCase , _UpperCAmelCase = min(_lowerCAmelCase ), max(_lowerCAmelCase )
_UpperCAmelCase = int(max_value - min_value ) + 1
_UpperCAmelCase = [[] for _ in range(_lowerCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_lowerCAmelCase )
return [v for bucket in buckets for v in sorted(_lowerCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 684 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 500
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head:
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase__ ( self : Dict ):
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : str ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : int ):
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 684 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__lowerCAmelCase = "facebook/wmt19-en-de"
__lowerCAmelCase = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__lowerCAmelCase = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__lowerCAmelCase = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
__lowerCAmelCase = tokenizer(["Making tiny model"], return_tensors="pt")
__lowerCAmelCase = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
__lowerCAmelCase = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 684 |
import requests
__lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
# fetching a list of articles in json format
_UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 684 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
return getitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return setitem, k, v
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
return delitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]:
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 684 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 10
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
_UpperCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 684 | 1 |
from __future__ import annotations
import math
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list:
if len(_lowerCAmelCase ) != 2 or len(a[0] ) != 2 or len(_lowerCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
_UpperCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCAmelCase ) )
]
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCAmelCase ) )
]
def __lowerCamelCase ( _lowerCAmelCase ) -> tuple[list, list, list, list]:
if len(_lowerCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = matrix_length // 2
_UpperCAmelCase = [[a[i][j] for j in range(_lowerCAmelCase , _lowerCAmelCase )] for i in range(_lowerCAmelCase )]
_UpperCAmelCase = [
[a[i][j] for j in range(_lowerCAmelCase , _lowerCAmelCase )] for i in range(_lowerCAmelCase , _lowerCAmelCase )
]
_UpperCAmelCase = [[a[i][j] for j in range(_lowerCAmelCase )] for i in range(_lowerCAmelCase )]
_UpperCAmelCase = [[a[i][j] for j in range(_lowerCAmelCase )] for i in range(_lowerCAmelCase , _lowerCAmelCase )]
return top_left, top_right, bot_left, bot_right
def __lowerCamelCase ( _lowerCAmelCase ) -> tuple[int, int]:
return len(_lowerCAmelCase ), len(matrix[0] )
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
print("\n".join(str(_lowerCAmelCase ) for line in matrix ) )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list:
if matrix_dimensions(_lowerCAmelCase ) == (2, 2):
return default_matrix_multiplication(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = split_matrix(_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = split_matrix(_lowerCAmelCase )
_UpperCAmelCase = actual_strassen(_lowerCAmelCase , matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) )
_UpperCAmelCase = actual_strassen(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
_UpperCAmelCase = actual_strassen(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
_UpperCAmelCase = actual_strassen(_lowerCAmelCase , matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) )
_UpperCAmelCase = actual_strassen(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) )
_UpperCAmelCase = actual_strassen(matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) , matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) )
_UpperCAmelCase = actual_strassen(matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) , matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) )
_UpperCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) , _lowerCAmelCase )
_UpperCAmelCase = matrix_addition(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = matrix_addition(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) , _lowerCAmelCase )
# construct the new matrix from our 4 quadrants
_UpperCAmelCase = []
for i in range(len(_lowerCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_lowerCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list:
if matrix_dimensions(_lowerCAmelCase )[1] != matrix_dimensions(_lowerCAmelCase )[0]:
_UpperCAmelCase = (
"Unable to multiply these matrices, please check the dimensions.\n"
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(_lowerCAmelCase )
_UpperCAmelCase = matrix_dimensions(_lowerCAmelCase )
_UpperCAmelCase = matrix_dimensions(_lowerCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_UpperCAmelCase = max(*_lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase = int(math.pow(2 , math.ceil(math.loga(_lowerCAmelCase ) ) ) )
_UpperCAmelCase = matrixa
_UpperCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_UpperCAmelCase = actual_strassen(_lowerCAmelCase , _lowerCAmelCase )
# Removing the additional zeros
for i in range(0 , _lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
__lowerCAmelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
__lowerCAmelCase = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 684 |
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCAmelCase = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
if rng is None:
_UpperCAmelCase = random.Random()
_UpperCAmelCase = 1
for dim in shape:
total_dims *= dim
_UpperCAmelCase = []
for _ in range(_lowerCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_UpperCAmelCase = np.array(_lowerCAmelCase , dtype=jnp.intaa ).reshape(_lowerCAmelCase )
return output
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> Optional[Any]:
_UpperCAmelCase = ids_tensor(_lowerCAmelCase , vocab_size=2 , rng=_lowerCAmelCase )
# make sure that at least one token is attended to for each batch
_UpperCAmelCase = 1
return attn_mask
@require_flax
class __SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = ()
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_UpperCAmelCase = 2
_UpperCAmelCase = inputs["input_ids"].shape[-1] // 2
_UpperCAmelCase = inputs["input_ids"][:max_batch_size, :sequence_length]
_UpperCAmelCase = jnp.ones_like(__UpperCamelCase )
_UpperCAmelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_UpperCAmelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_UpperCAmelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = False
_UpperCAmelCase = max_length
_UpperCAmelCase = 0
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_UpperCAmelCase = getattr(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = pt_model_class(__UpperCamelCase ).eval()
_UpperCAmelCase = load_flax_weights_in_pytorch_model(__UpperCamelCase , flax_model.params )
_UpperCAmelCase = flax_model.generate(__UpperCamelCase ).sequences
_UpperCAmelCase = pt_model.generate(torch.tensor(__UpperCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_UpperCAmelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = False
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = True
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = False
_UpperCAmelCase = max_length
_UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = False
_UpperCAmelCase = max_length
_UpperCAmelCase = 2
_UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = True
_UpperCAmelCase = max_length
_UpperCAmelCase = 0.8
_UpperCAmelCase = 10
_UpperCAmelCase = 0.3
_UpperCAmelCase = 1
_UpperCAmelCase = 8
_UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = max_length
_UpperCAmelCase = 1
_UpperCAmelCase = 8
_UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = max_length
_UpperCAmelCase = 2
_UpperCAmelCase = 1
_UpperCAmelCase = 8
_UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
_UpperCAmelCase = False
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
_UpperCAmelCase = True
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
_UpperCAmelCase = 2
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_UpperCAmelCase = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_UpperCAmelCase = "Hello world"
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__UpperCamelCase , "do_samples" ):
model.generate(__UpperCamelCase , do_samples=__UpperCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__UpperCamelCase , "foo" ):
_UpperCAmelCase = {"foo": "bar"}
model.generate(__UpperCamelCase , **__UpperCamelCase )
| 684 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = {}
_UpperCAmelCase = job["started_at"]
_UpperCAmelCase = job["completed_at"]
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = duration_in_min
return job_info
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
_UpperCAmelCase = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
_UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = get_job_time(args.workflow_run_id)
__lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 684 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Dict = ["""pixel_values"""]
def __init__( self : List[str] , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : float = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 255 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , ):
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {"shortest_edge": 384}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCAmelCase = crop_pct if crop_pct is not None else 224 / 256
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : float , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , ):
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
_UpperCAmelCase = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCAmelCase = int(shortest_edge / crop_pct )
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__UpperCamelCase , size=(shortest_edge, shortest_edge) , data_format=__UpperCamelCase , **__UpperCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__UpperCamelCase , size=(shortest_edge, shortest_edge) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[int, float] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , ):
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Optional[int] , ):
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : float = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase : int , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , crop_pct=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 684 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : Optional[int] ):
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowerCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowerCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowerCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowerCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]:
_UpperCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_UpperCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith("net." ):
_UpperCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith("main." ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = "mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(_lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = prefix + "." + new_layer + "." + string_left
else:
_UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_UpperCAmelCase = download(_lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"]
_UpperCAmelCase = MODELS_MAP[model_name]["sample_size"]
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(_lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(_lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(_lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _lowerCAmelCase )
print("Diff max" , _lowerCAmelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
main(args)
| 684 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[str] = VideoToVideoSDPipeline
__SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""}) - {"""image""", """width""", """height"""}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""}) - {"""image"""}
__SCREAMING_SNAKE_CASE : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
__SCREAMING_SNAKE_CASE : List[Any] = False
# No `output_type`.
__SCREAMING_SNAKE_CASE : Union[str, Any] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def UpperCAmelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
_UpperCAmelCase = CLIPTextModel(__UpperCamelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=0 ):
# 3 frames
_UpperCAmelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(__UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = VideoToVideoSDPipeline(**__UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase )
_UpperCAmelCase = "np"
_UpperCAmelCase = sd_pipe(**__UpperCamelCase ).frames
_UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
_UpperCAmelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase__ ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCamelCase , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCAmelCase__ ( self : List[str] ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCAmelCase__ ( self : List[Any] ):
pass
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().test_progress_bar()
@slow
@skip_mps
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase = torch.randn((1, 10, 3, 1_024, 576) , generator=__UpperCamelCase )
_UpperCAmelCase = video.to("cuda" )
_UpperCAmelCase = "Spiderman is surfing"
_UpperCAmelCase = pipe(__UpperCamelCase , video=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=3 , output_type="pt" ).frames
_UpperCAmelCase = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 684 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 500
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head:
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase__ ( self : Dict ):
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : str ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : int ):
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 684 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
_UpperCAmelCase = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__UpperCamelCase ) , x.transpose() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__UpperCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase ) , transpose(__UpperCamelCase ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase , axes=(1, 2, 0) ) , transpose(__UpperCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase ) , transpose(__UpperCamelCase ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase , axes=(1, 2, 0) ) , transpose(__UpperCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase ) , np.asarray(transpose(__UpperCamelCase ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__UpperCamelCase , axes=(1, 2, 0) ) ) ) )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (4, 3) ) , np.reshape(__UpperCamelCase , (4, 3) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (12, 5) ) , np.reshape(__UpperCamelCase , (12, 5) ) ) )
@require_torch
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (4, 3) ) , reshape(__UpperCamelCase , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (12, 5) ) , reshape(__UpperCamelCase , (12, 5) ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (4, 3) ) , reshape(__UpperCamelCase , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (12, 5) ) , reshape(__UpperCamelCase , (12, 5) ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (4, 3) ) , np.asarray(reshape(__UpperCamelCase , (4, 3) ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (12, 5) ) , np.asarray(reshape(__UpperCamelCase , (12, 5) ) ) ) )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase ) , np.squeeze(__UpperCamelCase ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase , axis=2 ) , np.squeeze(__UpperCamelCase , axis=2 ) ) )
@require_torch
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase ) , squeeze(__UpperCamelCase ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase , axis=2 ) , squeeze(__UpperCamelCase , axis=2 ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase ) , squeeze(__UpperCamelCase ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase , axis=2 ) , squeeze(__UpperCamelCase , axis=2 ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase ) , np.asarray(squeeze(__UpperCamelCase ) ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase , axis=2 ) , np.asarray(squeeze(__UpperCamelCase , axis=2 ) ) ) )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__UpperCamelCase , axis=1 ) , np.expand_dims(__UpperCamelCase , axis=1 ) ) )
@require_torch
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(__UpperCamelCase , axis=1 ) , expand_dims(__UpperCamelCase , axis=1 ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(__UpperCamelCase , axis=1 ) , expand_dims(__UpperCamelCase , axis=1 ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(__UpperCamelCase , axis=1 ) , np.asarray(expand_dims(__UpperCamelCase , axis=1 ) ) ) )
| 684 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
return getitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return setitem, k, v
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
return delitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]:
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 684 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__lowerCAmelCase = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = question_encoder
_UpperCAmelCase = generator
_UpperCAmelCase = self.question_encoder
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : str ):
if os.path.isfile(__UpperCamelCase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
_UpperCAmelCase = os.path.join(__UpperCamelCase , "question_encoder_tokenizer" )
_UpperCAmelCase = os.path.join(__UpperCamelCase , "generator_tokenizer" )
self.question_encoder.save_pretrained(__UpperCamelCase )
self.generator.save_pretrained(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , __UpperCamelCase : Optional[Any] , **__UpperCamelCase : Tuple ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_UpperCAmelCase = kwargs.pop("config" , __UpperCamelCase )
if config is None:
_UpperCAmelCase = RagConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
__UpperCamelCase , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
__UpperCamelCase , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=__UpperCamelCase , generator=__UpperCamelCase )
def __call__( self : Optional[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Union[str, Any] ):
return self.current_tokenizer(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Any , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Union[str, Any] ):
return self.generator.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : List[Any] ):
return self.generator.decode(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.question_encoder
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = self.generator
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[List[str]] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "longest" , __UpperCamelCase : str = None , __UpperCamelCase : bool = True , **__UpperCamelCase : int , ):
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , __UpperCamelCase , )
if max_length is None:
_UpperCAmelCase = self.current_tokenizer.model_max_length
_UpperCAmelCase = self(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , max_length=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_UpperCAmelCase = self.current_tokenizer.model_max_length
_UpperCAmelCase = self(
text_target=__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = labels["input_ids"]
return model_inputs
| 684 |
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(1 , _lowerCAmelCase ):
_UpperCAmelCase = collection[i]
_UpperCAmelCase = 0
_UpperCAmelCase = i - 1
while low <= high:
_UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
_UpperCAmelCase = collection[j - 1]
_UpperCAmelCase = val
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 684 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = """efficientformer"""
def __init__( self : int , __UpperCamelCase : List[int] = [3, 2, 6, 4] , __UpperCamelCase : List[int] = [48, 96, 224, 448] , __UpperCamelCase : List[bool] = [True, True, True, True] , __UpperCamelCase : int = 448 , __UpperCamelCase : int = 32 , __UpperCamelCase : int = 4 , __UpperCamelCase : int = 7 , __UpperCamelCase : int = 5 , __UpperCamelCase : int = 8 , __UpperCamelCase : int = 4 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 3 , __UpperCamelCase : int = 3 , __UpperCamelCase : int = 3 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 1 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : float = 1e-5 , __UpperCamelCase : str = "gelu" , __UpperCamelCase : float = 0.02 , __UpperCamelCase : float = 1e-1_2 , __UpperCamelCase : int = 224 , __UpperCamelCase : float = 1e-0_5 , **__UpperCamelCase : Dict , ):
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_expansion_ratio
_UpperCAmelCase = downsamples
_UpperCAmelCase = dim
_UpperCAmelCase = key_dim
_UpperCAmelCase = attention_ratio
_UpperCAmelCase = resolution
_UpperCAmelCase = pool_size
_UpperCAmelCase = downsample_patch_size
_UpperCAmelCase = downsample_stride
_UpperCAmelCase = downsample_pad
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = num_metaad_blocks
_UpperCAmelCase = distillation
_UpperCAmelCase = use_layer_scale
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = image_size
_UpperCAmelCase = batch_norm_eps
| 684 |
__lowerCAmelCase = 2_5_6
# Modulus to hash a string
__lowerCAmelCase = 1_0_0_0_0_0_3
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
_UpperCAmelCase = "abc1abc12"
_UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_UpperCAmelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 2)
_UpperCAmelCase = "ABABX"
_UpperCAmelCase = "ABABZABABYABABX"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 3)
_UpperCAmelCase = "AAAB"
_UpperCAmelCase = "ABAAAAAB"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 4)
_UpperCAmelCase = "abcdabcy"
_UpperCAmelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 5)
_UpperCAmelCase = "Lü"
_UpperCAmelCase = "Lüsai"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = "Lue"
assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 684 | 1 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Tuple , __UpperCamelCase : int = 101 ):
_UpperCAmelCase = length
def __len__( self : Optional[Any] ):
return self.length
def __getitem__( self : Dict , __UpperCamelCase : Any ):
return i
class __SCREAMING_SNAKE_CASE :
def __call__( self : Dict , __UpperCamelCase : List[Any] ):
return {"input_ids": torch.tensor(__UpperCamelCase ), "labels": torch.tensor(__UpperCamelCase )}
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : int ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_UpperCAmelCase = nn.Linear(120 , 80 )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[str] , __UpperCamelCase : Dict=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __SCREAMING_SNAKE_CASE ( lowercase):
@require_torch_neuroncore
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F'''--output_dir {output_dir}'''.split()
_UpperCAmelCase = ["torchrun"] + distributed_args + args
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __SCREAMING_SNAKE_CASE ( lowercase):
@require_torch_multi_gpu
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F'''--output_dir {output_dir}'''.split()
_UpperCAmelCase = ["torchrun"] + distributed_args + args
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
__lowerCAmelCase = DummyDataset(dataset_length)
def __lowerCamelCase ( _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = list(range(len(_lowerCAmelCase ) ) )
_UpperCAmelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__lowerCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = 2
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = None
| 684 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 | 1 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : int = ComputeEnvironment.AMAZON_SAGEMAKER
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = """ml.p3.2xlarge"""
__SCREAMING_SNAKE_CASE : Dict = """accelerate_sagemaker_execution_role"""
__SCREAMING_SNAKE_CASE : Optional[Any] = """hf-sm"""
__SCREAMING_SNAKE_CASE : Any = """us-east-1"""
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : Tuple = """accelerate-sagemaker-1"""
__SCREAMING_SNAKE_CASE : Tuple = """1.6"""
__SCREAMING_SNAKE_CASE : int = """4.4"""
__SCREAMING_SNAKE_CASE : Optional[int] = """train.py"""
__SCREAMING_SNAKE_CASE : Dict = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
__SCREAMING_SNAKE_CASE : List[str] = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Optional[Any] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
_UpperCAmelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , __UpperCamelCase )
assert isinstance(converted_args["do_train"] , __UpperCamelCase )
assert isinstance(converted_args["epochs"] , __UpperCamelCase )
assert isinstance(converted_args["learning_rate"] , __UpperCamelCase )
assert isinstance(converted_args["max_steps"] , __UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 684 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: "
__lowerCAmelCase = "huggingface-tools/default-prompts"
__lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
_UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
_UpperCAmelCase = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 684 | 1 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCAmelCase = str(bin(_lowerCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_lowerCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(_lowerCAmelCase ) , b_binary.zfill(_lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
from itertools import permutations
def __lowerCamelCase ( _lowerCAmelCase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int:
return sum(
int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase):
__SCREAMING_SNAKE_CASE : List[str] = 1
@register_to_config
def __init__( self : Tuple , __UpperCamelCase : List[str]=2_000 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Tuple=20 , __UpperCamelCase : int=1e-3 ):
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None ):
_UpperCAmelCase = torch.linspace(1 , self.config.sampling_eps , __UpperCamelCase , device=__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Any=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_UpperCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_UpperCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_UpperCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
_UpperCAmelCase = std.unsqueeze(-1 )
_UpperCAmelCase = -score / std
# compute
_UpperCAmelCase = -1.0 / len(self.timesteps )
_UpperCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_UpperCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_UpperCAmelCase = beta_t.unsqueeze(-1 )
_UpperCAmelCase = -0.5 * beta_t * x
_UpperCAmelCase = torch.sqrt(__UpperCamelCase )
_UpperCAmelCase = drift - diffusion**2 * score
_UpperCAmelCase = x + drift * dt
# add noise
_UpperCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=__UpperCamelCase , device=x.device , dtype=x.dtype )
_UpperCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : List[str] ):
return self.config.num_train_timesteps
| 684 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer
def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__UpperCamelCase )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
_UpperCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_UpperCAmelCase = value
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 | 1 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__lowerCAmelCase = "<<<<<<< This should probably be modified because it mentions: "
__lowerCAmelCase = "=======\n>>>>>>>\n"
__lowerCAmelCase = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __SCREAMING_SNAKE_CASE ( lowercase):
@staticmethod
def UpperCAmelCase__ ( __UpperCamelCase : ArgumentParser ):
_UpperCAmelCase = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__UpperCamelCase )
def __init__( self : int , __UpperCamelCase : str , __UpperCamelCase : str , *__UpperCamelCase : List[str] ):
_UpperCAmelCase = get_logger("datasets-cli/converting" )
_UpperCAmelCase = tfds_path
_UpperCAmelCase = datasets_directory
def UpperCAmelCase__ ( self : str ):
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_UpperCAmelCase = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
_UpperCAmelCase = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase = os.listdir(__UpperCamelCase )
else:
_UpperCAmelCase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
_UpperCAmelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
if not os.path.isfile(__UpperCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__UpperCamelCase , encoding="utf-8" ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = []
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = []
for line in lines:
_UpperCAmelCase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_UpperCAmelCase = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
_UpperCAmelCase = ""
continue
elif "from absl import logging" in out_line:
_UpperCAmelCase = "from datasets import logging\n"
elif "getLogger" in out_line:
_UpperCAmelCase = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_UpperCAmelCase = True
_UpperCAmelCase = list(filter(lambda __UpperCamelCase : e in out_line , __UpperCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__UpperCamelCase ) + "\n" )
out_lines.append(__UpperCamelCase )
out_lines.append(__UpperCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_UpperCAmelCase = re.sub(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_UpperCAmelCase = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , __UpperCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
_UpperCAmelCase = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_UpperCAmelCase = True
out_lines.append(__UpperCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_UpperCAmelCase = f_name.replace(".py" , "" )
_UpperCAmelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__UpperCamelCase )
if needs_manual_update:
with_manual_update.append(__UpperCamelCase )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.writelines(__UpperCamelCase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_UpperCAmelCase = os.path.basename(__UpperCamelCase )
_UpperCAmelCase = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__UpperCamelCase , __UpperCamelCase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 684 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowercase)
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"""image""": Image()})
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"""labels""": ClassLabel})
__SCREAMING_SNAKE_CASE : str = "image"
__SCREAMING_SNAKE_CASE : str = "labels"
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : int ):
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
_UpperCAmelCase = copy.deepcopy(self )
_UpperCAmelCase = self.label_schema.copy()
_UpperCAmelCase = features[self.label_column]
_UpperCAmelCase = label_schema
return task_template
@property
def UpperCAmelCase__ ( self : Dict ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 684 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase):
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = SMALL_MODEL_IDENTIFIER
_UpperCAmelCase = "pt"
_UpperCAmelCase = "tf"
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : List[str] ):
_UpperCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCamelCase )
model_tf.save_pretrained(__UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = "mock_framework"
# Framework provided - return whatever the user provides
_UpperCAmelCase = FeaturesManager.determine_framework(self.test_model , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCamelCase )
_UpperCAmelCase = FeaturesManager.determine_framework(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCamelCase )
_UpperCAmelCase = FeaturesManager.determine_framework(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCamelCase )
_UpperCAmelCase = FeaturesManager.determine_framework(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCamelCase )
_UpperCAmelCase = FeaturesManager.determine_framework(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = FeaturesManager.determine_framework(__UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = MagicMock(return_value=__UpperCamelCase )
with patch("transformers.onnx.features.is_tf_available" , __UpperCamelCase ):
_UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCamelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_UpperCAmelCase = MagicMock(return_value=__UpperCamelCase )
with patch("transformers.onnx.features.is_torch_available" , __UpperCamelCase ):
_UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCamelCase , self.framework_tf )
# Both in environment -> use PyTorch
_UpperCAmelCase = MagicMock(return_value=__UpperCamelCase )
_UpperCAmelCase = MagicMock(return_value=__UpperCamelCase )
with patch("transformers.onnx.features.is_tf_available" , __UpperCamelCase ), patch(
"transformers.onnx.features.is_torch_available" , __UpperCamelCase ):
_UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCamelCase , self.framework_pt )
# Both not in environment -> raise error
_UpperCAmelCase = MagicMock(return_value=__UpperCamelCase )
_UpperCAmelCase = MagicMock(return_value=__UpperCamelCase )
with patch("transformers.onnx.features.is_tf_available" , __UpperCamelCase ), patch(
"transformers.onnx.features.is_torch_available" , __UpperCamelCase ):
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 684 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1_024 ) -> Optional[int]:
_UpperCAmelCase , _UpperCAmelCase = [], []
_UpperCAmelCase = list(zip(_lowerCAmelCase , _lowerCAmelCase ) )
_UpperCAmelCase , _UpperCAmelCase = sorted_examples[0]
def is_too_big(_lowerCAmelCase ):
return tok(_lowerCAmelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_UpperCAmelCase = new_src + " " + src
_UpperCAmelCase = new_tgt + " " + tgt
if is_too_big(_lowerCAmelCase ) or is_too_big(_lowerCAmelCase ): # cant fit, finalize example
finished_src.append(_lowerCAmelCase )
finished_tgt.append(_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = src, tgt
else: # can fit, keep adding
_UpperCAmelCase , _UpperCAmelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowerCAmelCase )
finished_tgt.append(_lowerCAmelCase )
return finished_src, finished_tgt
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = Path(_lowerCAmelCase )
save_path.mkdir(exist_ok=_lowerCAmelCase )
for split in ["train"]:
_UpperCAmelCase , _UpperCAmelCase = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
_UpperCAmelCase = [x.rstrip() for x in Path(_lowerCAmelCase ).open().readlines()]
_UpperCAmelCase = [x.rstrip() for x in Path(_lowerCAmelCase ).open().readlines()]
_UpperCAmelCase , _UpperCAmelCase = pack_examples(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
print(F'''packed {split} split from {len(_lowerCAmelCase )} examples -> {len(_lowerCAmelCase )}.''' )
Path(save_path / F'''{split}.source''' ).open("w" ).write("\n".join(_lowerCAmelCase ) )
Path(save_path / F'''{split}.target''' ).open("w" ).write("\n".join(_lowerCAmelCase ) )
for split in ["val", "test"]:
_UpperCAmelCase , _UpperCAmelCase = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(_lowerCAmelCase , save_path / F'''{split}.source''' )
shutil.copyfile(_lowerCAmelCase , save_path / F'''{split}.target''' )
def __lowerCamelCase ( ) -> List[str]:
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=_lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=_lowerCAmelCase , default=128 )
parser.add_argument("--data_dir" , type=_lowerCAmelCase )
parser.add_argument("--save_path" , type=_lowerCAmelCase )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = """mobilenet_v1"""
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple=3 , __UpperCamelCase : Optional[Any]=224 , __UpperCamelCase : int=1.0 , __UpperCamelCase : Tuple=8 , __UpperCamelCase : Optional[int]="relu6" , __UpperCamelCase : Dict=True , __UpperCamelCase : int=0.999 , __UpperCamelCase : List[Any]=0.02 , __UpperCamelCase : Any=0.001 , **__UpperCamelCase : Any , ):
super().__init__(**__UpperCamelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = depth_multiplier
_UpperCAmelCase = min_depth
_UpperCAmelCase = hidden_act
_UpperCAmelCase = tf_padding
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse("""1.11""")
@property
def UpperCAmelCase__ ( self : Tuple ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def UpperCAmelCase__ ( self : Any ):
return 1e-4
| 684 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Dict = """funnel"""
__SCREAMING_SNAKE_CASE : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Dict , __UpperCamelCase : str=30_522 , __UpperCamelCase : int=[4, 4, 4] , __UpperCamelCase : Any=None , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Tuple=768 , __UpperCamelCase : Tuple=12 , __UpperCamelCase : List[str]=64 , __UpperCamelCase : List[Any]=3_072 , __UpperCamelCase : Optional[Any]="gelu_new" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[Any]=1e-9 , __UpperCamelCase : int="mean" , __UpperCamelCase : Tuple="relative_shift" , __UpperCamelCase : Dict=True , __UpperCamelCase : Dict=True , __UpperCamelCase : int=True , **__UpperCamelCase : int , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = block_sizes
_UpperCAmelCase = [1] * len(__UpperCamelCase ) if block_repeats is None else block_repeats
assert len(__UpperCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_UpperCAmelCase = num_decoder_layers
_UpperCAmelCase = d_model
_UpperCAmelCase = n_head
_UpperCAmelCase = d_head
_UpperCAmelCase = d_inner
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_std
_UpperCAmelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
_UpperCAmelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
_UpperCAmelCase = attention_type
_UpperCAmelCase = separate_cls
_UpperCAmelCase = truncate_seq
_UpperCAmelCase = pool_q_only
super().__init__(**__UpperCamelCase )
@property
def UpperCAmelCase__ ( self : str ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Union[str, Any] ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def UpperCAmelCase__ ( self : int ):
return len(self.block_sizes )
@num_blocks.setter
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] ):
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 684 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
_UpperCAmelCase = tau * frequency / samplerate
_UpperCAmelCase = sin(_lowerCAmelCase )
_UpperCAmelCase = cos(_lowerCAmelCase )
_UpperCAmelCase = _sin / (2 * q_factor)
_UpperCAmelCase = (1 - _cos) / 2
_UpperCAmelCase = 1 - _cos
_UpperCAmelCase = 1 + alpha
_UpperCAmelCase = -2 * _cos
_UpperCAmelCase = 1 - alpha
_UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
_UpperCAmelCase = tau * frequency / samplerate
_UpperCAmelCase = sin(_lowerCAmelCase )
_UpperCAmelCase = cos(_lowerCAmelCase )
_UpperCAmelCase = _sin / (2 * q_factor)
_UpperCAmelCase = (1 + _cos) / 2
_UpperCAmelCase = -1 - _cos
_UpperCAmelCase = 1 + alpha
_UpperCAmelCase = -2 * _cos
_UpperCAmelCase = 1 - alpha
_UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
_UpperCAmelCase = tau * frequency / samplerate
_UpperCAmelCase = sin(_lowerCAmelCase )
_UpperCAmelCase = cos(_lowerCAmelCase )
_UpperCAmelCase = _sin / (2 * q_factor)
_UpperCAmelCase = _sin / 2
_UpperCAmelCase = 0
_UpperCAmelCase = -ba
_UpperCAmelCase = 1 + alpha
_UpperCAmelCase = -2 * _cos
_UpperCAmelCase = 1 - alpha
_UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
_UpperCAmelCase = tau * frequency / samplerate
_UpperCAmelCase = sin(_lowerCAmelCase )
_UpperCAmelCase = cos(_lowerCAmelCase )
_UpperCAmelCase = _sin / (2 * q_factor)
_UpperCAmelCase = 1 - alpha
_UpperCAmelCase = -2 * _cos
_UpperCAmelCase = 1 + alpha
_UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
_UpperCAmelCase = tau * frequency / samplerate
_UpperCAmelCase = sin(_lowerCAmelCase )
_UpperCAmelCase = cos(_lowerCAmelCase )
_UpperCAmelCase = _sin / (2 * q_factor)
_UpperCAmelCase = 10 ** (gain_db / 40)
_UpperCAmelCase = 1 + alpha * big_a
_UpperCAmelCase = -2 * _cos
_UpperCAmelCase = 1 - alpha * big_a
_UpperCAmelCase = 1 + alpha / big_a
_UpperCAmelCase = -2 * _cos
_UpperCAmelCase = 1 - alpha / big_a
_UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
_UpperCAmelCase = tau * frequency / samplerate
_UpperCAmelCase = sin(_lowerCAmelCase )
_UpperCAmelCase = cos(_lowerCAmelCase )
_UpperCAmelCase = _sin / (2 * q_factor)
_UpperCAmelCase = 10 ** (gain_db / 40)
_UpperCAmelCase = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase = 2 * sqrt(_lowerCAmelCase ) * alpha
_UpperCAmelCase = big_a * (pmc + aaa)
_UpperCAmelCase = 2 * big_a * mpc
_UpperCAmelCase = big_a * (pmc - aaa)
_UpperCAmelCase = ppmc + aaa
_UpperCAmelCase = -2 * pmpc
_UpperCAmelCase = ppmc - aaa
_UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
_UpperCAmelCase = tau * frequency / samplerate
_UpperCAmelCase = sin(_lowerCAmelCase )
_UpperCAmelCase = cos(_lowerCAmelCase )
_UpperCAmelCase = _sin / (2 * q_factor)
_UpperCAmelCase = 10 ** (gain_db / 40)
_UpperCAmelCase = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase = 2 * sqrt(_lowerCAmelCase ) * alpha
_UpperCAmelCase = big_a * (ppmc + aaa)
_UpperCAmelCase = -2 * big_a * pmpc
_UpperCAmelCase = big_a * (ppmc - aaa)
_UpperCAmelCase = pmc + aaa
_UpperCAmelCase = 2 * mpc
_UpperCAmelCase = pmc - aaa
_UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 684 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 1 |
def __lowerCamelCase ( _lowerCAmelCase ) -> List[Any]: # noqa: E741
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = 0
_UpperCAmelCase = [0] * n
_UpperCAmelCase = [False] * n
_UpperCAmelCase = [False] * n
def dfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase = True
_UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase = dfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase = True
else:
_UpperCAmelCase = min(low[at] , _lowerCAmelCase )
return out_edge_count
for i in range(_lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase = 0
_UpperCAmelCase = dfs(_lowerCAmelCase , _lowerCAmelCase , -1 , _lowerCAmelCase )
_UpperCAmelCase = out_edge_count > 1
for x in range(len(_lowerCAmelCase ) ):
if is_art[x] is True:
print(_lowerCAmelCase )
# Adjacency list of graph
__lowerCAmelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 684 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 1 |
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(1 , _lowerCAmelCase ):
_UpperCAmelCase = collection[i]
_UpperCAmelCase = 0
_UpperCAmelCase = i - 1
while low <= high:
_UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
_UpperCAmelCase = collection[j - 1]
_UpperCAmelCase = val
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 684 |
import requests
__lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
# fetching a list of articles in json format
_UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 684 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __SCREAMING_SNAKE_CASE ( lowercase):
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : float ):
return 0.0
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int | float, int | float]:
_UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> None:
_UpperCAmelCase = 512
_UpperCAmelCase = [1] + [0] * (size - 1)
_UpperCAmelCase = [filter_type.process(_lowerCAmelCase ) for item in inputs]
_UpperCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase = np.abs(np.fft.fft(_lowerCAmelCase ) )
_UpperCAmelCase = 20 * np.logaa(_lowerCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_UpperCAmelCase = get_bounds(_lowerCAmelCase , _lowerCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_lowerCAmelCase )
plt.show()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> None:
_UpperCAmelCase = 512
_UpperCAmelCase = [1] + [0] * (size - 1)
_UpperCAmelCase = [filter_type.process(_lowerCAmelCase ) for item in inputs]
_UpperCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase = np.angle(np.fft.fft(_lowerCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_lowerCAmelCase , -2 * pi ) )
plt.show()
| 684 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 10
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
_UpperCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 684 | 1 |
import datasets
from .evaluate import evaluate
__lowerCAmelCase = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
__lowerCAmelCase = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
__lowerCAmelCase = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
def UpperCAmelCase__ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_UpperCAmelCase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_UpperCAmelCase = evaluate(dataset=__UpperCamelCase , predictions=__UpperCamelCase )
return score
| 684 |
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 10
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
_UpperCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 684 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = {}
_UpperCAmelCase = job["started_at"]
_UpperCAmelCase = job["completed_at"]
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = duration_in_min
return job_info
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
_UpperCAmelCase = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
_UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = get_job_time(args.workflow_run_id)
__lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 684 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
# A local function to see if a dot lands in the circle.
def is_in_circle(_lowerCAmelCase , _lowerCAmelCase ) -> bool:
_UpperCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_UpperCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
_UpperCAmelCase = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(_lowerCAmelCase , _lowerCAmelCase ) ) for _ in range(_lowerCAmelCase ) ) * (max_value - min_value)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1.0 ) -> None:
def identity_function(_lowerCAmelCase ) -> float:
return x
_UpperCAmelCase = area_under_curve_estimator(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
def function_to_integrate(_lowerCAmelCase ) -> float:
return sqrt(4.0 - x * x )
_UpperCAmelCase = area_under_curve_estimator(
_lowerCAmelCase , _lowerCAmelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : Optional[int] ):
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowerCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowerCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowerCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowerCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]:
_UpperCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_UpperCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith("net." ):
_UpperCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith("main." ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = "mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(_lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = prefix + "." + new_layer + "." + string_left
else:
_UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_UpperCAmelCase = download(_lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"]
_UpperCAmelCase = MODELS_MAP[model_name]["sample_size"]
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(_lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(_lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(_lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _lowerCAmelCase )
print("Diff max" , _lowerCAmelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
main(args)
| 684 | 1 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
_UpperCAmelCase = _modexpt(_lowerCAmelCase , exponent // 2 , _lowerCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_lowerCAmelCase , exponent - 1 , _lowerCAmelCase )) % modulo_value
def __lowerCamelCase ( _lowerCAmelCase = 1_777 , _lowerCAmelCase = 1_855 , _lowerCAmelCase = 8 ) -> int:
_UpperCAmelCase = base
for _ in range(1 , _lowerCAmelCase ):
_UpperCAmelCase = _modexpt(_lowerCAmelCase , _lowerCAmelCase , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 500
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head:
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase__ ( self : Dict ):
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : str ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : int ):
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 684 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Optional[Any] , __UpperCamelCase : WhisperForConditionalGeneration , __UpperCamelCase : WhisperProcessor , __UpperCamelCase : AutoencoderKL , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : UNetaDConditionModel , __UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase : StableDiffusionSafetyChecker , __UpperCamelCase : CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
_UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str]=16_000 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : str , ):
_UpperCAmelCase = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
_UpperCAmelCase = self.speech_model.generate(__UpperCamelCase , max_length=480_000 )
_UpperCAmelCase = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = len(__UpperCamelCase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__UpperCamelCase )}.''' )
# get prompt text embeddings
_UpperCAmelCase = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = text_embeddings.shape
_UpperCAmelCase = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
_UpperCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCAmelCase = 42
if negative_prompt is None:
_UpperCAmelCase = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !='''
F''' {type(__UpperCamelCase )}.''' )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
_UpperCAmelCase = negative_prompt
_UpperCAmelCase = text_input_ids.shape[-1]
_UpperCAmelCase = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
_UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase = uncond_embeddings.shape[1]
_UpperCAmelCase = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
_UpperCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_UpperCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_UpperCAmelCase = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
_UpperCAmelCase = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_UpperCAmelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
_UpperCAmelCase = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = 1 / 0.18215 * latents
_UpperCAmelCase = self.vae.decode(__UpperCamelCase ).sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 684 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
return getitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return setitem, k, v
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
return delitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]:
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 684 | 1 |
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(1 , _lowerCAmelCase ):
_UpperCAmelCase = collection[i]
_UpperCAmelCase = 0
_UpperCAmelCase = i - 1
while low <= high:
_UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
_UpperCAmelCase = collection[j - 1]
_UpperCAmelCase = val
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 684 |
__lowerCAmelCase = 2_5_6
# Modulus to hash a string
__lowerCAmelCase = 1_0_0_0_0_0_3
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
_UpperCAmelCase = "abc1abc12"
_UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_UpperCAmelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 2)
_UpperCAmelCase = "ABABX"
_UpperCAmelCase = "ABABZABABYABABX"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 3)
_UpperCAmelCase = "AAAB"
_UpperCAmelCase = "ABAAAAAB"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 4)
_UpperCAmelCase = "abcdabcy"
_UpperCAmelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 5)
_UpperCAmelCase = "Lü"
_UpperCAmelCase = "Lüsai"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = "Lue"
assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 684 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCAmelCase = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = r".*/layers_(\d+)"
_UpperCAmelCase = key
if re.match(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = re.sub(r"layers_(\d+)" , r"block/\1/layer" , _lowerCAmelCase )
_UpperCAmelCase = r"(encoder|decoder)\/"
if re.match(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = re.match(_lowerCAmelCase , _lowerCAmelCase ).groups()
if groups[0] == "encoder":
_UpperCAmelCase = re.sub(r"/mlp/" , r"/1/mlp/" , _lowerCAmelCase )
_UpperCAmelCase = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , _lowerCAmelCase )
elif groups[0] == "decoder":
_UpperCAmelCase = re.sub(r"/mlp/" , r"/2/mlp/" , _lowerCAmelCase )
_UpperCAmelCase = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , _lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_UpperCAmelCase = new_key.replace(_lowerCAmelCase , _lowerCAmelCase )
print(F'''{key} -> {new_key}''' )
_UpperCAmelCase = s_dict.pop(_lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCAmelCase = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCAmelCase = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_UpperCAmelCase = s_dict[key].shape[0]
_UpperCAmelCase = s_dict[key]
for idx in range(_lowerCAmelCase ):
_UpperCAmelCase = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(_lowerCAmelCase )
return s_dict
__lowerCAmelCase = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(_lowerCAmelCase , "r" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = re.findall(r"(.*) = ([0-9.]*)" , _lowerCAmelCase )
_UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_UpperCAmelCase = float(_lowerCAmelCase ) if "." in value else int(_lowerCAmelCase )
_UpperCAmelCase = re.findall(r"(.*activations) = \(\'(.*)\',\)" , _lowerCAmelCase )[0]
_UpperCAmelCase = str(activation[1] )
_UpperCAmelCase = num_experts
_UpperCAmelCase = SwitchTransformersConfig(**_lowerCAmelCase )
return config
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="./" , _lowerCAmelCase=8 ) -> Optional[Any]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_UpperCAmelCase = checkpoints.load_tax_checkpoint(_lowerCAmelCase )
if gin_file is not None:
_UpperCAmelCase = convert_gin_to_config(_lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = SwitchTransformersConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = SwitchTransformersForConditionalGeneration(_lowerCAmelCase )
_UpperCAmelCase = flax_params["target"]
_UpperCAmelCase = flatten_dict(_lowerCAmelCase , sep="/" )
_UpperCAmelCase = rename_keys(_lowerCAmelCase )
_UpperCAmelCase = unflatten_dict(_lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_lowerCAmelCase , _lowerCAmelCase )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__lowerCAmelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 684 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 | 1 |
import requests
__lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
# fetching a list of articles in json format
_UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 684 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: "
__lowerCAmelCase = "huggingface-tools/default-prompts"
__lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
_UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
_UpperCAmelCase = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 684 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@parameterized.expand([(None,), ("foo.json",)] )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = GenerationConfig(
do_sample=__UpperCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCamelCase , config_name=__UpperCamelCase )
_UpperCAmelCase = GenerationConfig.from_pretrained(__UpperCamelCase , config_name=__UpperCamelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __UpperCamelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , __UpperCamelCase )
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = AutoConfig.from_pretrained("gpt2" )
_UpperCAmelCase = GenerationConfig.from_model_config(__UpperCamelCase )
_UpperCAmelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = GenerationConfig()
_UpperCAmelCase = {
"max_new_tokens": 1_024,
"foo": "bar",
}
_UpperCAmelCase = copy.deepcopy(__UpperCamelCase )
_UpperCAmelCase = generation_config.update(**__UpperCamelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__UpperCamelCase , {"foo": "bar"} )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = GenerationConfig()
_UpperCAmelCase = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = GenerationConfig.from_pretrained(__UpperCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
_UpperCAmelCase = GenerationConfig.from_model_config(__UpperCamelCase )
assert not hasattr(__UpperCamelCase , "foo" ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __UpperCamelCase )
self.assertEqual(default_config.num_beams , 1 )
_UpperCAmelCase = GenerationConfig(
do_sample=__UpperCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __UpperCamelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = GenerationConfig.from_pretrained(__UpperCamelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __UpperCamelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = GenerationConfig(
do_sample=__UpperCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
_UpperCAmelCase = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__UpperCamelCase , repo_id="test-generation-config" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = GenerationConfig(
do_sample=__UpperCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
_UpperCAmelCase = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
| 684 |
from itertools import permutations
def __lowerCamelCase ( _lowerCAmelCase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int:
return sum(
int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 | 1 |
def __lowerCamelCase ( ) -> Union[str, Any]:
_UpperCAmelCase = []
_UpperCAmelCase = 1
while len(_lowerCAmelCase ) < 1E6:
constant.append(str(_lowerCAmelCase ) )
i += 1
_UpperCAmelCase = "".join(_lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution())
| 684 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer
def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__UpperCamelCase )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
_UpperCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_UpperCAmelCase = value
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Tuple = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
__SCREAMING_SNAKE_CASE : Dict = """CIDAS/clipseg-rd64-refined"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = """image_segmenter"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPSegForImageSegmentation
__SCREAMING_SNAKE_CASE : Dict = ["""image""", """text"""]
__SCREAMING_SNAKE_CASE : Dict = ["""image"""]
def __init__( self : Optional[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : List[Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : "Image" , __UpperCamelCase : str ):
return self.pre_processor(text=[label] , images=[image] , padding=__UpperCamelCase , return_tensors="pt" )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Dict ):
with torch.no_grad():
_UpperCAmelCase = self.model(**__UpperCamelCase ).logits
return logits
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Any ):
_UpperCAmelCase = outputs.cpu().detach().numpy()
_UpperCAmelCase = 0
_UpperCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 684 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 684 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 1 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("T")
class __SCREAMING_SNAKE_CASE ( Generic[T]):
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self : Optional[int] , __UpperCamelCase : int ):
_UpperCAmelCase = deque()
_UpperCAmelCase = set()
if not n:
_UpperCAmelCase = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_UpperCAmelCase = n
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_UpperCAmelCase = self.dq_store.pop()
self.key_reference.remove(__UpperCamelCase )
else:
self.dq_store.remove(__UpperCamelCase )
self.dq_store.appendleft(__UpperCamelCase )
self.key_reference.add(__UpperCamelCase )
def UpperCAmelCase__ ( self : str ):
for k in self.dq_store:
print(__UpperCamelCase )
def __repr__( self : Dict ):
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 684 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__lowerCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
for attribute in key.split("." ):
_UpperCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
_UpperCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
_UpperCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.feature_extractor
_UpperCAmelCase = hf_model.adapter
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
_UpperCAmelCase = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(_lowerCAmelCase )[0].split("." )[-2]
_UpperCAmelCase = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
_UpperCAmelCase = "weight_g"
elif "weight_v" in name:
_UpperCAmelCase = "weight_v"
elif "bias" in name:
_UpperCAmelCase = "bias"
elif "weight" in name:
_UpperCAmelCase = "weight"
else:
_UpperCAmelCase = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_UpperCAmelCase = full_name.split("conv_layers." )[-1]
_UpperCAmelCase = name.split("." )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCAmelCase )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = full_name.split("adaptor." )[-1]
_UpperCAmelCase = name.split("." )
if items[1].isdigit():
_UpperCAmelCase = int(items[1] )
else:
_UpperCAmelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_UpperCAmelCase = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_UpperCAmelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_UpperCAmelCase = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_UpperCAmelCase = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_UpperCAmelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_UpperCAmelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCAmelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
_UpperCAmelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> str:
_UpperCAmelCase = WavaVecaConfig.from_pretrained(
_lowerCAmelCase , add_adapter=_lowerCAmelCase , adapter_stride=_lowerCAmelCase , adapter_kernel_size=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , output_hidden_size=_lowerCAmelCase , )
_UpperCAmelCase = MBartConfig.from_pretrained(_lowerCAmelCase )
# load model
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
_UpperCAmelCase = model[0].eval()
# load feature extractor
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase , use_auth_token=_lowerCAmelCase )
# set weights for wav2vec2 encoder
_UpperCAmelCase = WavaVecaModel(_lowerCAmelCase )
recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
# load decoder weights
_UpperCAmelCase = MBartForCausalLM(_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_UpperCAmelCase = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
_UpperCAmelCase = False
_UpperCAmelCase = MBartaaTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_UpperCAmelCase = hf_wavavec.config.to_dict()
_UpperCAmelCase = tokenizer.pad_token_id
_UpperCAmelCase = tokenizer.bos_token_id
_UpperCAmelCase = tokenizer.eos_token_id
_UpperCAmelCase = "mbart50"
_UpperCAmelCase = "wav2vec2"
_UpperCAmelCase = tokenizer.eos_token_id
_UpperCAmelCase = 250_004
_UpperCAmelCase = tokenizer.eos_token_id
_UpperCAmelCase = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_0_2_4, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=2_5_0_0_0_4, type=int, help="`decoder_start_token_id` of model config")
__lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 684 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 1 |
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : Dict = ["""flax""", """transformers"""]
def __init__( self : Optional[int] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : str ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : Any , **__UpperCamelCase : List[str] ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : List[str] , **__UpperCamelCase : Dict ):
requires_backends(cls , ["flax", "transformers"] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = ["""flax""", """transformers"""]
def __init__( self : str , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Tuple ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[Any] ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : str ):
requires_backends(cls , ["flax", "transformers"] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : Tuple = ["""flax""", """transformers"""]
def __init__( self : Dict , *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[int] ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : str , **__UpperCamelCase : str ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : str ):
requires_backends(cls , ["flax", "transformers"] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = ["""flax""", """transformers"""]
def __init__( self : str , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : List[str] ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : str , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[Any] ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Optional[int] ):
requires_backends(cls , ["flax", "transformers"] )
| 684 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 1 |
import os
def __lowerCamelCase ( ) -> int:
with open(os.path.dirname(_lowerCAmelCase ) + "/grid.txt" ) as f:
_UpperCAmelCase = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCAmelCase ) for x in f.readline().split()] )
_UpperCAmelCase = 0
# right
for i in range(20 ):
for j in range(17 ):
_UpperCAmelCase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_UpperCAmelCase = temp
# down
for i in range(17 ):
for j in range(20 ):
_UpperCAmelCase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_UpperCAmelCase = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_UpperCAmelCase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_UpperCAmelCase = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_UpperCAmelCase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_UpperCAmelCase = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 684 |
import requests
__lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
# fetching a list of articles in json format
_UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 684 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 10
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
_UpperCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 684 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , lowercase):
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = load_tool("text-classification" )
self.tool.setup()
_UpperCAmelCase = load_tool("text-classification" , remote=__UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 684 |
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
from __future__ import annotations
from statistics import mean
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
_UpperCAmelCase = [0] * no_of_processes
_UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = burst_time[i]
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_UpperCAmelCase = []
_UpperCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
_UpperCAmelCase = 0
_UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
_UpperCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase = 4
__lowerCAmelCase = [2, 5, 3, 7]
__lowerCAmelCase = [0, 0, 0, 0]
__lowerCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 684 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = {}
_UpperCAmelCase = job["started_at"]
_UpperCAmelCase = job["completed_at"]
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = duration_in_min
return job_info
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
_UpperCAmelCase = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
_UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = get_job_time(args.workflow_run_id)
__lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 684 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = """table-transformer"""
__SCREAMING_SNAKE_CASE : List[Any] = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Tuple=None , __UpperCamelCase : str=3 , __UpperCamelCase : Dict=100 , __UpperCamelCase : Any=6 , __UpperCamelCase : Optional[int]=2_048 , __UpperCamelCase : List[str]=8 , __UpperCamelCase : Union[str, Any]=6 , __UpperCamelCase : List[Any]=2_048 , __UpperCamelCase : int=8 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Any=0.0 , __UpperCamelCase : Any=True , __UpperCamelCase : Any="relu" , __UpperCamelCase : Optional[int]=256 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : List[str]=0.02 , __UpperCamelCase : Optional[int]=1.0 , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[str]="sine" , __UpperCamelCase : Optional[int]="resnet50" , __UpperCamelCase : Any=True , __UpperCamelCase : Any=False , __UpperCamelCase : Any=1 , __UpperCamelCase : int=5 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : str=1 , __UpperCamelCase : int=5 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : List[str]=0.1 , **__UpperCamelCase : int , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None, None, None
_UpperCAmelCase = use_timm_backbone
_UpperCAmelCase = backbone_config
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_queries
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = init_xavier_std
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = auxiliary_loss
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = backbone
_UpperCAmelCase = use_pretrained_backbone
_UpperCAmelCase = dilation
# Hungarian matcher
_UpperCAmelCase = class_cost
_UpperCAmelCase = bbox_cost
_UpperCAmelCase = giou_cost
# Loss coefficients
_UpperCAmelCase = mask_loss_coefficient
_UpperCAmelCase = dice_loss_coefficient
_UpperCAmelCase = bbox_loss_coefficient
_UpperCAmelCase = giou_loss_coefficient
_UpperCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def UpperCAmelCase__ ( self : int ):
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : List[str] ):
return self.d_model
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[str] = version.parse("""1.11""")
@property
def UpperCAmelCase__ ( self : int ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return 1e-5
@property
def UpperCAmelCase__ ( self : Tuple ):
return 12
| 684 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : Optional[int] ):
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowerCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowerCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowerCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowerCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]:
_UpperCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_UpperCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith("net." ):
_UpperCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith("main." ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = "mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(_lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = prefix + "." + new_layer + "." + string_left
else:
_UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_UpperCAmelCase = download(_lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"]
_UpperCAmelCase = MODELS_MAP[model_name]["sample_size"]
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(_lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(_lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(_lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _lowerCAmelCase )
print("Diff max" , _lowerCAmelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
main(args)
| 684 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowerCAmelCase = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowerCAmelCase = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowerCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[str, float]:
_UpperCAmelCase = len([g for position, g in enumerate(_lowerCAmelCase ) if g == main_target[position]] )
return (item, float(_lowerCAmelCase ))
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[str, str]:
_UpperCAmelCase = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = list(_lowerCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_UpperCAmelCase = random.choice(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> list[str]:
_UpperCAmelCase = []
# Generate more children proportionally to the fitness score.
_UpperCAmelCase = int(parent_a[1] * 100 ) + 1
_UpperCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_lowerCAmelCase ):
_UpperCAmelCase = population_score[random.randint(0 , _lowerCAmelCase )][0]
_UpperCAmelCase , _UpperCAmelCase = crossover(parent_a[0] , _lowerCAmelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCAmelCase , _lowerCAmelCase ) )
pop.append(mutate(_lowerCAmelCase , _lowerCAmelCase ) )
return pop
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_UpperCAmelCase = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_lowerCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCAmelCase = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_lowerCAmelCase )
# Generate random starting population.
_UpperCAmelCase = []
for _ in range(_lowerCAmelCase ):
population.append("".join([random.choice(_lowerCAmelCase ) for i in range(len(_lowerCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCAmelCase , _UpperCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCAmelCase = [evaluate(_lowerCAmelCase , _lowerCAmelCase ) for item in population]
# Check if there is a matching evolution.
_UpperCAmelCase = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] , reverse=_lowerCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCAmelCase )
# Normalize population score to be between 0 and 1.
_UpperCAmelCase = [
(item, score / len(_lowerCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCAmelCase ):
population.extend(select(population_score[int(_lowerCAmelCase )] , _lowerCAmelCase , _lowerCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__lowerCAmelCase = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
__lowerCAmelCase = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 684 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 500
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head:
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase__ ( self : Dict ):
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : str ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : int ):
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 684 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
return getitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return setitem, k, v
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
return delitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]:
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 684 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[str] = """speech_to_text"""
__SCREAMING_SNAKE_CASE : List[Any] = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE : Tuple = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , __UpperCamelCase : Dict=10_000 , __UpperCamelCase : Dict=12 , __UpperCamelCase : List[Any]=2_048 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : List[str]=6 , __UpperCamelCase : Optional[Any]=2_048 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Any=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : str="relu" , __UpperCamelCase : Dict=256 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Any=0.02 , __UpperCamelCase : str=2 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Any=2 , __UpperCamelCase : Any=6_000 , __UpperCamelCase : str=1_024 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Any=(5, 5) , __UpperCamelCase : Optional[Any]=1_024 , __UpperCamelCase : List[str]=80 , __UpperCamelCase : List[Any]=1 , **__UpperCamelCase : List[str] , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = max_source_positions
_UpperCAmelCase = max_target_positions
_UpperCAmelCase = num_conv_layers
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = conv_channels
_UpperCAmelCase = input_feat_per_channel
_UpperCAmelCase = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , )
| 684 |
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(1 , _lowerCAmelCase ):
_UpperCAmelCase = collection[i]
_UpperCAmelCase = 0
_UpperCAmelCase = i - 1
while low <= high:
_UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
_UpperCAmelCase = collection[j - 1]
_UpperCAmelCase = val
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 684 | 1 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __SCREAMING_SNAKE_CASE ( nn.Module):
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = []
_UpperCAmelCase = []
for i in range(self.num_layers ):
_UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=__UpperCamelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCamelCase )
_UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCamelCase )
_UpperCAmelCase = resnets
_UpperCAmelCase = attentions
if self.add_downsample:
_UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=True ):
_UpperCAmelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_UpperCAmelCase = resnet(__UpperCamelCase , __UpperCamelCase , deterministic=__UpperCamelCase )
_UpperCAmelCase = attn(__UpperCamelCase , __UpperCamelCase , deterministic=__UpperCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
_UpperCAmelCase = self.downsamplers_a(__UpperCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __SCREAMING_SNAKE_CASE ( nn.Module):
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = []
for i in range(self.num_layers ):
_UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=__UpperCamelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCamelCase )
_UpperCAmelCase = resnets
if self.add_downsample:
_UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any]=True ):
_UpperCAmelCase = ()
for resnet in self.resnets:
_UpperCAmelCase = resnet(__UpperCamelCase , __UpperCamelCase , deterministic=__UpperCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
_UpperCAmelCase = self.downsamplers_a(__UpperCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __SCREAMING_SNAKE_CASE ( nn.Module):
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = []
_UpperCAmelCase = []
for i in range(self.num_layers ):
_UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCamelCase )
_UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCamelCase )
_UpperCAmelCase = resnets
_UpperCAmelCase = attentions
if self.add_upsample:
_UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_UpperCAmelCase = res_hidden_states_tuple[-1]
_UpperCAmelCase = res_hidden_states_tuple[:-1]
_UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_UpperCAmelCase = resnet(__UpperCamelCase , __UpperCamelCase , deterministic=__UpperCamelCase )
_UpperCAmelCase = attn(__UpperCamelCase , __UpperCamelCase , deterministic=__UpperCamelCase )
if self.add_upsample:
_UpperCAmelCase = self.upsamplers_a(__UpperCamelCase )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module):
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = []
for i in range(self.num_layers ):
_UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCamelCase )
_UpperCAmelCase = resnets
if self.add_upsample:
_UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
_UpperCAmelCase = res_hidden_states_tuple[-1]
_UpperCAmelCase = res_hidden_states_tuple[:-1]
_UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_UpperCAmelCase = resnet(__UpperCamelCase , __UpperCamelCase , deterministic=__UpperCamelCase )
if self.add_upsample:
_UpperCAmelCase = self.upsamplers_a(__UpperCamelCase )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module):
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Any ):
# there is always at least one resnet
_UpperCAmelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_UpperCAmelCase = []
for _ in range(self.num_layers ):
_UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCamelCase )
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCamelCase )
_UpperCAmelCase = resnets
_UpperCAmelCase = attentions
def __call__( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : str=True ):
_UpperCAmelCase = self.resnets[0](__UpperCamelCase , __UpperCamelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_UpperCAmelCase = attn(__UpperCamelCase , __UpperCamelCase , deterministic=__UpperCamelCase )
_UpperCAmelCase = resnet(__UpperCamelCase , __UpperCamelCase , deterministic=__UpperCamelCase )
return hidden_states
| 684 |
__lowerCAmelCase = 2_5_6
# Modulus to hash a string
__lowerCAmelCase = 1_0_0_0_0_0_3
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
_UpperCAmelCase = "abc1abc12"
_UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_UpperCAmelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 2)
_UpperCAmelCase = "ABABX"
_UpperCAmelCase = "ABABZABABYABABX"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 3)
_UpperCAmelCase = "AAAB"
_UpperCAmelCase = "ABAAAAAB"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 4)
_UpperCAmelCase = "abcdabcy"
_UpperCAmelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 5)
_UpperCAmelCase = "Lü"
_UpperCAmelCase = "Lüsai"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = "Lue"
assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 684 | 1 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase = False ) -> str:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = F'''Expected string as input, found {type(_lowerCAmelCase )}'''
raise ValueError(_lowerCAmelCase )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = F'''Expected boolean as use_pascal parameter, found {type(_lowerCAmelCase )}'''
raise ValueError(_lowerCAmelCase )
_UpperCAmelCase = input_str.split("_" )
_UpperCAmelCase = 0 if use_pascal else 1
_UpperCAmelCase = words[start_index:]
_UpperCAmelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
_UpperCAmelCase = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 684 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: "
__lowerCAmelCase = "huggingface-tools/default-prompts"
__lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
_UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
_UpperCAmelCase = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 684 | 1 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Any=13 , __UpperCamelCase : List[Any]=30 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : Tuple=32 , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : int="gelu" , __UpperCamelCase : Any=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Optional[int]=10 , __UpperCamelCase : List[str]=0.02 , __UpperCamelCase : int=3 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : str=2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 2
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Any ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : List[str] ):
_UpperCAmelCase = DeiTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = DeiTForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = DeiTForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = DeiTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = DeiTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Dict = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = DeiTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Any=False ):
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Tuple ):
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCAmelCase = False
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
_UpperCAmelCase = problem_type["title"]
_UpperCAmelCase = problem_type["num_labels"]
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
_UpperCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
_UpperCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
_UpperCAmelCase = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase__ ( self : List[str] ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = DeiTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( ) -> Dict:
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
__UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="pt" )
_UpperCAmelCase = inputs.pixel_values.to(__UpperCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase )
| 684 |
from itertools import permutations
def __lowerCamelCase ( _lowerCAmelCase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int:
return sum(
int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer
def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__UpperCamelCase )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
_UpperCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_UpperCAmelCase = value
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Any = KandinskyInpaintPipeline
__SCREAMING_SNAKE_CASE : Any = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
__SCREAMING_SNAKE_CASE : str = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
__SCREAMING_SNAKE_CASE : str = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
@property
def UpperCAmelCase__ ( self : int ):
return 32
@property
def UpperCAmelCase__ ( self : Tuple ):
return 32
@property
def UpperCAmelCase__ ( self : Tuple ):
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : List[Any] ):
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return 100
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCAmelCase__ ( self : Dict ):
torch.manual_seed(0 )
_UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
_UpperCAmelCase = MultilingualCLIP(__UpperCamelCase )
_UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase__ ( self : Tuple ):
torch.manual_seed(0 )
_UpperCAmelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCAmelCase = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def UpperCAmelCase__ ( self : str ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self : str ):
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = self.dummy_tokenizer
_UpperCAmelCase = self.dummy_unet
_UpperCAmelCase = self.dummy_movq
_UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCamelCase , )
_UpperCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase__ ( self : int , __UpperCamelCase : str , __UpperCamelCase : Optional[Any]=0 ):
_UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
_UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCamelCase )
# create init_image
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
# create mask
_UpperCAmelCase = np.ones((64, 64) , dtype=np.floataa )
_UpperCAmelCase = 0
if str(__UpperCamelCase ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(__UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_UpperCAmelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = "cpu"
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__UpperCamelCase )
_UpperCAmelCase = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
_UpperCAmelCase = output.images
_UpperCAmelCase = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def UpperCAmelCase__ ( self : Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
_UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_UpperCAmelCase = np.ones((768, 768) , dtype=np.floataa )
_UpperCAmelCase = 0
_UpperCAmelCase = "a hat"
_UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
_UpperCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
_UpperCAmelCase = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase , _UpperCAmelCase = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_UpperCAmelCase = pipeline(
__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 684 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = """xmod"""
def __init__( self : str , __UpperCamelCase : Optional[int]=30_522 , __UpperCamelCase : List[Any]=768 , __UpperCamelCase : Union[str, Any]=12 , __UpperCamelCase : Dict=12 , __UpperCamelCase : Tuple=3_072 , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Optional[int]=512 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Union[str, Any]=0.02 , __UpperCamelCase : Optional[Any]=1e-1_2 , __UpperCamelCase : str=1 , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : str=2 , __UpperCamelCase : List[str]="absolute" , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : int=2 , __UpperCamelCase : List[Any]=False , __UpperCamelCase : int=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Any=("en_XX",) , __UpperCamelCase : Tuple=None , **__UpperCamelCase : Tuple , ):
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
_UpperCAmelCase = pre_norm
_UpperCAmelCase = adapter_reduction_factor
_UpperCAmelCase = adapter_layer_norm
_UpperCAmelCase = adapter_reuse_layer_norm
_UpperCAmelCase = ln_before_adapter
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = default_language
class __SCREAMING_SNAKE_CASE ( lowercase):
@property
def UpperCAmelCase__ ( self : Any ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 684 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 1 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__lowerCAmelCase = "scheduler_config.json"
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = 1
__SCREAMING_SNAKE_CASE : Tuple = 2
__SCREAMING_SNAKE_CASE : int = 3
__SCREAMING_SNAKE_CASE : Union[str, Any] = 4
__SCREAMING_SNAKE_CASE : List[Any] = 5
__SCREAMING_SNAKE_CASE : Optional[Any] = 6
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : Union[str, Any] = 8
__SCREAMING_SNAKE_CASE : Any = 9
__SCREAMING_SNAKE_CASE : List[Any] = 10
__SCREAMING_SNAKE_CASE : Any = 11
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 13
__SCREAMING_SNAKE_CASE : Optional[int] = 14
@dataclass
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
class __SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE : int = SCHEDULER_CONFIG_NAME
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : List[Any] = True
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , __UpperCamelCase : Dict[str, Any] = None , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : int=False , **__UpperCamelCase : str , ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = cls.load_config(
pretrained_model_name_or_path=__UpperCamelCase , subfolder=__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , return_commit_hash=__UpperCamelCase , **__UpperCamelCase , )
return cls.from_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Union[str, os.PathLike] , __UpperCamelCase : bool = False , **__UpperCamelCase : Union[str, Any] ):
self.save_config(save_directory=__UpperCamelCase , push_to_hub=__UpperCamelCase , **__UpperCamelCase )
@property
def UpperCAmelCase__ ( self : Dict ):
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls : Any ):
_UpperCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
_UpperCAmelCase = importlib.import_module(__name__.split("." )[0] )
_UpperCAmelCase = [
getattr(__UpperCamelCase , __UpperCamelCase ) for c in compatible_classes_str if hasattr(__UpperCamelCase , __UpperCamelCase )
]
return compatible_classes
| 684 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
from itertools import permutations
def __lowerCamelCase ( _lowerCAmelCase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int:
return sum(
int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[str] = """naver-clova-ix/donut-base-finetuned-docvqa"""
__SCREAMING_SNAKE_CASE : Any = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
__SCREAMING_SNAKE_CASE : Optional[int] = """document_qa"""
__SCREAMING_SNAKE_CASE : int = AutoProcessor
__SCREAMING_SNAKE_CASE : Any = VisionEncoderDecoderModel
__SCREAMING_SNAKE_CASE : Optional[Any] = ["""image""", """text"""]
__SCREAMING_SNAKE_CASE : Optional[int] = ["""text"""]
def __init__( self : Any , *__UpperCamelCase : str , **__UpperCamelCase : str ):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : "Image" , __UpperCamelCase : str ):
_UpperCAmelCase = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
_UpperCAmelCase = task_prompt.replace("{user_input}" , __UpperCamelCase )
_UpperCAmelCase = self.pre_processor.tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors="pt" ).input_ids
_UpperCAmelCase = self.pre_processor(__UpperCamelCase , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCAmelCase__ ( self : int , __UpperCamelCase : str ):
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCamelCase , ).sequences
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = self.pre_processor.batch_decode(__UpperCamelCase )[0]
_UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
_UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
_UpperCAmelCase = re.sub(r"<.*?>" , "" , __UpperCamelCase , count=1 ).strip() # remove first task start token
_UpperCAmelCase = self.pre_processor.tokenajson(__UpperCamelCase )
return sequence["answer"]
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_UpperCAmelCase = (boundary[1] - boundary[0]) / steps
_UpperCAmelCase = boundary[0]
_UpperCAmelCase = boundary[1]
_UpperCAmelCase = make_points(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = 0.0
y += (h / 2.0) * f(_lowerCAmelCase )
for i in x_i:
# print(i)
y += h * f(_lowerCAmelCase )
y += (h / 2.0) * f(_lowerCAmelCase )
return y
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = a + h
while x < (b - h):
yield x
_UpperCAmelCase = x + h
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: # enter your function here
_UpperCAmelCase = (x - 0) * (x - 0)
return y
def __lowerCamelCase ( ) -> Union[str, Any]:
_UpperCAmelCase = 0.0 # Lower bound of integration
_UpperCAmelCase = 1.0 # Upper bound of integration
_UpperCAmelCase = 10.0 # define number of steps or resolution
_UpperCAmelCase = [a, b] # define boundary of integration
_UpperCAmelCase = method_a(_lowerCAmelCase , _lowerCAmelCase )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 684 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 1 |
import os
from collections.abc import Iterator
def __lowerCamelCase ( _lowerCAmelCase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowerCAmelCase ):
_UpperCAmelCase = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowerCAmelCase , _lowerCAmelCase ).lstrip("./" )
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
return F'''{i * " "}*''' if i else "\n##"
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(_lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def __lowerCamelCase ( _lowerCAmelCase = "." ) -> None:
_UpperCAmelCase = ""
for filepath in sorted(good_file_paths(_lowerCAmelCase ) ):
_UpperCAmelCase , _UpperCAmelCase = os.path.split(_lowerCAmelCase )
if filepath != old_path:
_UpperCAmelCase = print_path(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
_UpperCAmelCase = F'''{filepath}/{filename}'''.replace(" " , "%20" )
_UpperCAmelCase = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F'''{md_prefix(_lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 684 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 1 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : List[str] , __UpperCamelCase : str = "▁" , __UpperCamelCase : bool = True , __UpperCamelCase : Union[str, AddedToken] = "<unk>" , __UpperCamelCase : Union[str, AddedToken] = "</s>" , __UpperCamelCase : Union[str, AddedToken] = "<pad>" , ):
_UpperCAmelCase = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
_UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_UpperCAmelCase = token_dict["token"]
_UpperCAmelCase = Tokenizer(Unigram() )
_UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
_UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCamelCase , add_prefix_space=__UpperCamelCase ),
pre_tokenizers.Digits(individual_digits=__UpperCamelCase ),
pre_tokenizers.Punctuation(),
] )
_UpperCAmelCase = decoders.Metaspace(replacement=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
_UpperCAmelCase = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
_UpperCAmelCase = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 8_000 , __UpperCamelCase : bool = True , ):
_UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=__UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCamelCase , )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = [files]
self._tokenizer.train(__UpperCamelCase , trainer=__UpperCamelCase )
self.add_unk_id()
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Union[Iterator[str], Iterator[Iterator[str]]] , __UpperCamelCase : int = 8_000 , __UpperCamelCase : bool = True , ):
_UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=__UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCamelCase , )
self._tokenizer.train_from_iterator(__UpperCamelCase , trainer=__UpperCamelCase )
self.add_unk_id()
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = json.loads(self._tokenizer.to_str() )
_UpperCAmelCase = self.special_tokens["unk"]["id"]
_UpperCAmelCase = Tokenizer.from_str(json.dumps(__UpperCamelCase ) )
| 684 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"vocab_file": "spiece.model"}
__lowerCAmelCase = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
__lowerCAmelCase = {
"AI-Sweden/gpt-sw3-126m": 2_0_4_8,
"AI-Sweden/gpt-sw3-350m": 2_0_4_8,
"AI-Sweden/gpt-sw3-1.6b": 2_0_4_8,
"AI-Sweden/gpt-sw3-6.7b": 2_0_4_8,
"AI-Sweden/gpt-sw3-20b": 2_0_4_8,
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=False , __UpperCamelCase : str=False , __UpperCamelCase : Any=None , __UpperCamelCase : Dict=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : List[str] , ):
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCAmelCase = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_UpperCAmelCase = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_UpperCAmelCase = "<|endoftext|>" if eos_token is None else eos_token
_UpperCAmelCase = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_UpperCAmelCase = unk_token if pad_token is None else pad_token
_UpperCAmelCase = eos_token if bos_token is None else bos_token
else:
_UpperCAmelCase = "<pad>" if pad_token is None else pad_token
_UpperCAmelCase = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
_UpperCAmelCase = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_UpperCAmelCase = re.compile(
F'''[{"".join(map(__UpperCamelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]''' )
def __getstate__( self : int ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : str , __UpperCamelCase : List[str] ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase__ ( self : List[Any] ):
return len(self.sp_model )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : str ):
_UpperCAmelCase = self.non_printing_characters_re.sub("" , __UpperCamelCase )
# Normalize whitespaces
_UpperCAmelCase = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_UpperCAmelCase = unicodedata.normalize("NFC" , __UpperCamelCase )
return text
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : str , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = self.preprocess_text(__UpperCamelCase )
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : str ):
return self.sp_model.PieceToId(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : int ):
return self.sp_model.IdToPiece(__UpperCamelCase )
@staticmethod
def UpperCAmelCase__ ( __UpperCamelCase : str ):
return out_string
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = []
_UpperCAmelCase = ""
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(__UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : Union[str, bool] = False ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = self.preprocess_text(__UpperCamelCase )
_UpperCAmelCase = self.sp_model.encode(__UpperCamelCase )
else:
_UpperCAmelCase = [self.preprocess_text(__UpperCamelCase ) for t in text]
_UpperCAmelCase = self.sp_model.encode(__UpperCamelCase )
if return_tensors is True or return_tensors == "pt":
_UpperCAmelCase = torch.tensor(__UpperCamelCase )
return token_ids
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Union[int, List[int]] ):
return self.sp_model.decode(__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = [F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
_UpperCAmelCase = (
F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(__UpperCamelCase ) + F'''{self.bos_token}Bot:'''
)
return self.encode(text=__UpperCamelCase )
| 684 |
import requests
__lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
# fetching a list of articles in json format
_UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 684 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : str=3 , __UpperCamelCase : Union[str, Any]=18 , __UpperCamelCase : str=30 , __UpperCamelCase : Optional[Any]=400 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : int=True , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Optional[Any]=True , ):
_UpperCAmelCase = size if size is not None else {"shortest_edge": 20}
_UpperCAmelCase = crop_size if crop_size is not None else {"height": 18, "width": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_flip_channel_order
def UpperCAmelCase__ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__UpperCamelCase , "center_crop" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_flip_channel_order" ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCAmelCase__ ( self : Union[str, Any] ):
pass
def UpperCAmelCase__ ( self : Any ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase__ ( self : Dict ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase__ ( self : Any ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 684 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 10
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
_UpperCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 684 | 1 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__lowerCAmelCase = getLogger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = 1_024 , _lowerCAmelCase="val" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , _lowerCAmelCase=1 , _lowerCAmelCase = None , _lowerCAmelCase="" , **_lowerCAmelCase , ) -> Dict:
_UpperCAmelCase = str(_lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=_lowerCAmelCase )
_UpperCAmelCase = Path(_lowerCAmelCase )
_UpperCAmelCase = save_dir.joinpath(F'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(_lowerCAmelCase )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).cuda()
if fpaa:
_UpperCAmelCase = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase ) # update config with task specific params
_UpperCAmelCase = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_UpperCAmelCase = num_return_sequences
_UpperCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
_UpperCAmelCase = tokenizer.model_max_length
if prefix is None:
_UpperCAmelCase = prefix or getattr(model.config , "prefix" , "" ) or ""
_UpperCAmelCase = SeqaSeqDataset(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_target_length=1_024 , type_path=_lowerCAmelCase , n_obs=_lowerCAmelCase , prefix=_lowerCAmelCase , **_lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_UpperCAmelCase = ds.make_sortish_sampler(_lowerCAmelCase , distributed=_lowerCAmelCase , add_extra_examples=_lowerCAmelCase , shuffle=_lowerCAmelCase )
_UpperCAmelCase = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase , batch_size=_lowerCAmelCase , collate_fn=ds.collate_fn )
_UpperCAmelCase = []
for batch in tqdm(_lowerCAmelCase ):
_UpperCAmelCase = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=_lowerCAmelCase , num_beams=_lowerCAmelCase , **_lowerCAmelCase , )
_UpperCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_UpperCAmelCase = batch["ids"]
if num_return_sequences > 1:
_UpperCAmelCase = chunks(_lowerCAmelCase , _lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(_lowerCAmelCase , _lowerCAmelCase )
return results, sampler.num_replicas
def __lowerCamelCase ( ) -> Optional[int]:
_UpperCAmelCase = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=_lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=_lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=_lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=_lowerCAmelCase , default=_lowerCAmelCase )
parser.add_argument(
"--type_path" , type=_lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=_lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=_lowerCAmelCase , default=1 , required=_lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=_lowerCAmelCase , default=600 , required=_lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase )
parser.add_argument(
"--prefix" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
_UpperCAmelCase = time.time()
_UpperCAmelCase , _UpperCAmelCase = parser.parse_known_args()
_UpperCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F'''parsed the following generate kwargs: {generate_kwargs}''' )
_UpperCAmelCase = Path(args.save_dir + "_tmp" )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) # this handles locking.
_UpperCAmelCase = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_UpperCAmelCase = {}
if args.src_lang is not None:
_UpperCAmelCase = args.src_lang
if args.tgt_lang is not None:
_UpperCAmelCase = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = eval_data_dir(
args.data_dir , _lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_lowerCAmelCase , **_lowerCAmelCase , )
if args.local_rank <= 0:
_UpperCAmelCase = Path(args.save_dir )
save_dir.mkdir(exist_ok=_lowerCAmelCase )
_UpperCAmelCase = gather_results_from_each_node(_lowerCAmelCase , _lowerCAmelCase , args.sync_timeout )
_UpperCAmelCase = combine_partial_results(_lowerCAmelCase )
if args.num_return_sequences > 1:
_UpperCAmelCase = save_dir.joinpath("pseudolabel_results.json" )
print(F'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(_lowerCAmelCase , _lowerCAmelCase )
return
_UpperCAmelCase = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(_lowerCAmelCase ) as f:
_UpperCAmelCase = [x.rstrip() for x in f.readlines()][: len(_lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
_UpperCAmelCase = "translation" in args.task
_UpperCAmelCase = calculate_bleu if calc_bleu else calculate_rouge
_UpperCAmelCase = "bleu" if calc_bleu else "rouge"
_UpperCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = time.time() - start_time
_UpperCAmelCase = round(runtime / metrics["n_obs"] , 4 )
_UpperCAmelCase = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_UpperCAmelCase = save_dir.joinpath(F'''{args.type_path}_{metric_name}.json''' )
save_json(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase )
print(_lowerCAmelCase )
write_txt_file(_lowerCAmelCase , save_dir.joinpath(F'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(_lowerCAmelCase , save_dir.joinpath(F'''{args.type_path}.target''' ) )
else:
shutil.rmtree(_lowerCAmelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> List:
_UpperCAmelCase = []
for partial_result in partial_results:
records.extend(_lowerCAmelCase )
_UpperCAmelCase = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x["id"] )
_UpperCAmelCase = [x["pred"] for x in records]
return preds
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
_UpperCAmelCase = time.time()
logger.info("waiting for all nodes to finish" )
_UpperCAmelCase = None
while (time.time() - start_wait) < timeout:
_UpperCAmelCase = list(save_dir.glob("rank_*.json" ) )
if len(_lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_UpperCAmelCase = lmap(_lowerCAmelCase , _lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 684 |
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
from __future__ import annotations
__lowerCAmelCase = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
__lowerCAmelCase = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def __lowerCamelCase ( _lowerCAmelCase ) -> list[float]:
_UpperCAmelCase = []
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = -1
for j in range(i + 1 , _lowerCAmelCase ):
if arr[i] < arr[j]:
_UpperCAmelCase = arr[j]
break
result.append(_lowerCAmelCase )
return result
def __lowerCamelCase ( _lowerCAmelCase ) -> list[float]:
_UpperCAmelCase = []
for i, outer in enumerate(_lowerCAmelCase ):
_UpperCAmelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
_UpperCAmelCase = inner
break
result.append(_lowerCAmelCase )
return result
def __lowerCamelCase ( _lowerCAmelCase ) -> list[float]:
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = [-1] * arr_size
for index in reversed(range(_lowerCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_UpperCAmelCase = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCAmelCase = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 684 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = {}
_UpperCAmelCase = job["started_at"]
_UpperCAmelCase = job["completed_at"]
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = duration_in_min
return job_info
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
_UpperCAmelCase = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
_UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = get_job_time(args.workflow_run_id)
__lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 684 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase_ :
a__ = None
a__ = False
a__ = False
a__ = False
a__ = None
a__ = None
a__ = False
a__ = False
a__ = False
a__ = True
a__ = None
a__ = 1
a__ = None
a__ = False
a__ = None
a__ = None
def A ( self ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(__lowerCAmelCase ) for k, v in self.__dict__.items()} )
| 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : Optional[int] ):
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowerCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowerCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowerCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowerCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]:
_UpperCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_UpperCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith("net." ):
_UpperCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith("main." ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = "mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(_lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = prefix + "." + new_layer + "." + string_left
else:
_UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_UpperCAmelCase = download(_lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"]
_UpperCAmelCase = MODELS_MAP[model_name]["sample_size"]
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(_lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(_lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(_lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _lowerCAmelCase )
print("Diff max" , _lowerCAmelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
main(args)
| 684 | 0 |
def _A ( _lowercase ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError('input must be a negative integer' )
__UpperCamelCase = len(bin(_lowercase )[3:] )
__UpperCamelCase = bin(abs(_lowercase ) - (1 << binary_number_length) )[3:]
__UpperCamelCase = (
(
'1'
+ '0' * (binary_number_length - len(_lowercase ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 500
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head:
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase__ ( self : Dict ):
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : str ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : int ):
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 684 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : List[Any] = "mctct"
def __init__( self : Optional[Any] , __lowerCAmelCase : Dict=80_65 , __lowerCAmelCase : Any=15_36 , __lowerCAmelCase : Optional[int]=36 , __lowerCAmelCase : Dict=61_44 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=3_84 , __lowerCAmelCase : Dict=9_20 , __lowerCAmelCase : Optional[Any]=1E-5 , __lowerCAmelCase : Optional[int]=0.3 , __lowerCAmelCase : Any="relu" , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Optional[Any]=0.3 , __lowerCAmelCase : int=0.3 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Tuple=0.3 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : List[str]=(7,) , __lowerCAmelCase : Any=(3,) , __lowerCAmelCase : List[Any]=80 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : str="sum" , __lowerCAmelCase : Tuple=False , **__lowerCAmelCase : int , ) -> Tuple:
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = intermediate_size
_A = num_attention_heads
_A = attention_head_dim
_A = max_position_embeddings
_A = layer_norm_eps
_A = layerdrop
_A = hidden_act
_A = initializer_range
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = pad_token_id
_A = bos_token_id
_A = eos_token_id
_A = conv_glu_dim
_A = conv_dropout
_A = num_conv_layers
_A = input_feat_per_channel
_A = input_channels
_A = conv_channels
_A = ctc_loss_reduction
_A = ctc_zero_infinity
# prevents config testing fail with exporting to json
_A = list(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 2 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
return getitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return setitem, k, v
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
return delitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]:
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 684 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase : Optional[int] = TypeVar('T')
class SCREAMING_SNAKE_CASE__ ( Generic[T]):
def __init__( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = data
UpperCamelCase = None
def __str__( self )-> str:
'''simple docstring'''
return F'''{self.data}'''
class SCREAMING_SNAKE_CASE__ ( Generic[T]):
def __init__( self )-> None:
'''simple docstring'''
UpperCamelCase = None
def __iter__( self )-> Iterator[T]:
'''simple docstring'''
UpperCamelCase = self.top
while node:
yield node.data
UpperCamelCase = node.next
def __str__( self )-> str:
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __len__( self )-> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def UpperCAmelCase_ ( self )-> bool:
'''simple docstring'''
return self.top is None
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
UpperCamelCase = Node(A_ )
if not self.is_empty():
UpperCamelCase = self.top
UpperCamelCase = node
def UpperCAmelCase_ ( self )-> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , A_ )
UpperCamelCase = self.top
UpperCamelCase = self.top.next
return pop_node.data
def UpperCAmelCase_ ( self )-> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCAmelCase_ ( self )-> None:
'''simple docstring'''
UpperCamelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 |
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(1 , _lowerCAmelCase ):
_UpperCAmelCase = collection[i]
_UpperCAmelCase = 0
_UpperCAmelCase = i - 1
while low <= high:
_UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
_UpperCAmelCase = collection[j - 1]
_UpperCAmelCase = val
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 684 | 0 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCamelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : np.ndarray , _UpperCAmelCase : Optional[str] , _UpperCAmelCase : Optional[str] = None ):
lowerCAmelCase = tesseract_config if tesseract_config is not None else ''
# apply OCR
lowerCAmelCase = to_pil_image(_UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = pil_image.size
lowerCAmelCase = pytesseract.image_to_data(_UpperCAmelCase , lang=_UpperCAmelCase , output_type='dict' , config=_UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowerCAmelCase = [idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
lowerCAmelCase = [word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCAmelCase = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCAmelCase = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCAmelCase = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCAmelCase = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase = []
for x, y, w, h in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
lowerCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a ( a__ ):
snake_case__ = ['''pixel_values''']
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = None , _snake_case = "" , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = size if size is not None else {'height': 2_24, 'width': 2_24}
lowerCAmelCase = get_size_dict(_snake_case )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = apply_ocr
lowerCAmelCase = ocr_lang
lowerCAmelCase = tesseract_config
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BILINEAR , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowerCAmelCase = (size['height'], size['width'])
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(_snake_case )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(_snake_case ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowerCAmelCase = []
lowerCAmelCase = []
for image in images:
lowerCAmelCase ,lowerCAmelCase = apply_tesseract(_snake_case , _snake_case , _snake_case )
words_batch.append(_snake_case )
boxes_batch.append(_snake_case )
if do_resize:
lowerCAmelCase = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowerCAmelCase = [flip_channel_order(_snake_case ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
lowerCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=_snake_case )
if apply_ocr:
lowerCAmelCase = words_batch
lowerCAmelCase = boxes_batch
return data
| 4 |
__lowerCAmelCase = 2_5_6
# Modulus to hash a string
__lowerCAmelCase = 1_0_0_0_0_0_3
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
_UpperCAmelCase = "abc1abc12"
_UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_UpperCAmelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 2)
_UpperCAmelCase = "ABABX"
_UpperCAmelCase = "ABABZABABYABABX"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 3)
_UpperCAmelCase = "AAAB"
_UpperCAmelCase = "ABAAAAAB"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 4)
_UpperCAmelCase = "abcdabcy"
_UpperCAmelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 5)
_UpperCAmelCase = "Lü"
_UpperCAmelCase = "Lüsai"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = "Lue"
assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 684 | 0 |
'''simple docstring'''
from collections import defaultdict
def A (__lowerCamelCase :str , __lowerCamelCase :str ):
_lowerCAmelCase = first_str.lower().strip()
_lowerCAmelCase = second_str.lower().strip()
# Remove whitespace
_lowerCAmelCase = first_str.replace(""" """ , """""" )
_lowerCAmelCase = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
# Default values for count should be 0
_lowerCAmelCase = defaultdict(__lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = input("""Enter the first string """).strip()
_lowercase = input("""Enter the second string """).strip()
_lowercase = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 5 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowerCamelCase = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self :Optional[int] , **__A :Tuple ) -> Union[str, Any]:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__ = deprecated_arg[3:]
SCREAMING_SNAKE_CASE__ = not kwargs.pop(__A )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""tpu_name""" , self.tpu_name )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""device_idx""" , self.device_idx )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""eager_mode""" , self.eager_mode )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__A )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "Name of TPU"} , )
lowerCamelCase_ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase_ = field(default=UpperCamelCase__ , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _snake_case ( self :Any ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
SCREAMING_SNAKE_CASE__ = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE__ = None
return tpu
@cached_property
def _snake_case ( self :Dict ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE__ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
SCREAMING_SNAKE_CASE__ = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
SCREAMING_SNAKE_CASE__ = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def _snake_case ( self :int ) -> bool:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _snake_case ( self :List[Any] ) -> "tf.distribute.Strategy":
"""simple docstring"""
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _snake_case ( self :Union[str, Any] ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _snake_case ( self :int ) -> int:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _snake_case ( self :Optional[int] ) -> bool:
"""simple docstring"""
return self.n_gpu > 0 | 6 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: "
__lowerCAmelCase = "huggingface-tools/default-prompts"
__lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
_UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
_UpperCAmelCase = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 684 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''vocab.txt'''}
a = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
a = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( _snake_case : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with open(_snake_case , 'r' ) as f:
_A = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any="<unk>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Optional[Any]="<pad>" , _UpperCAmelCase : List[str]="<mask>" , _UpperCAmelCase : List[str]="<eos>" , **_UpperCAmelCase : Dict , ):
super().__init__(**_UpperCAmelCase )
_A = load_vocab_file(_UpperCAmelCase )
_A = dict(enumerate(self.all_tokens ) )
_A = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_A = unk_token
_A = cls_token
_A = pad_token
_A = mask_token
_A = eos_token
_A = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : int ):
return self._id_to_token.get(_UpperCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str ):
return self._token_to_id.get(_UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : int , **_UpperCAmelCase : List[str] ):
return text.split()
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[str]=False ):
return len(self._id_to_token )
def lowerCAmelCase_ ( self : Optional[int] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str ):
return self._token_to_id.get(_UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : int ):
return self._id_to_token.get(_UpperCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.cls_token_id]
_A = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List , _UpperCAmelCase : Optional[List] = None , _UpperCAmelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_A = [1] + ([0] * len(_UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(_UpperCAmelCase ) + [1]
return mask
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Any ):
_A = os.path.join(_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(_UpperCAmelCase , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCAmelCase_ ( self : int ):
return self.get_vocab_size(with_added_tokens=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[List[str], List[AddedToken]] , _UpperCAmelCase : bool = False ):
return super()._add_tokens(_UpperCAmelCase , special_tokens=_UpperCAmelCase )
| 7 |
from itertools import permutations
def __lowerCamelCase ( _lowerCAmelCase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int:
return sum(
int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ):
'''simple docstring'''
__A : int = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__A : List[Any] = parent
__A : Optional[Any] = batch_size
__A : Tuple = num_channels
__A : str = min_resolution
__A : List[Any] = max_resolution
__A : int = do_resize
__A : Any = size
__A : int = do_normalize
__A : Union[str, Any] = image_mean
__A : List[str] = image_std
__A : Dict = do_rescale
__A : List[str] = rescale_factor
__A : Union[str, Any] = do_pad
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
if not batched:
__A : str = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image):
__A ,__A : Tuple = image.size
else:
__A ,__A : int = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size['shortest_edge'] * h / w)
__A : int = self.size['shortest_edge']
elif w > h:
__A : List[Any] = self.size['shortest_edge']
__A : Dict = int(self.size['shortest_edge'] * w / h)
else:
__A : Tuple = self.size['shortest_edge']
__A : List[Any] = self.size['shortest_edge']
else:
__A : List[Any] = []
for image in image_inputs:
__A ,__A : Union[str, Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__A : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase: item[0])[0]
__A : Optional[int] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = YolosImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = YolosImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean'))
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'size'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333})
self.assertEqual(image_processor.do_pad , _UpperCAmelCase)
__A : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
__A : Dict = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__A ,__A : List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A ,__A : str = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
__A : List[Any] = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray)
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__A ,__A : Tuple = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
__A ,__A : List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__A ,__A : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Any = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
__A ,__A : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
__A : Dict = self.image_processing_class(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_rescale=_UpperCAmelCase)
# create random PyTorch tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test whether the method "pad" and calling the image processor return the same tensors
__A : Any = image_processing_a.pad(_UpperCAmelCase , return_tensors='pt')
__A : Optional[int] = image_processing_a(_UpperCAmelCase , return_tensors='pt')
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
__A : int = json.loads(f.read())
__A : Any = {'image_id': 3_9769, 'annotations': target}
# encode them
__A : int = YolosImageProcessor.from_pretrained('hustvl/yolos-small')
__A : str = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='pt')
# verify pixel values
__A : List[Any] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase)
__A : List[str] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4))
# verify area
__A : Union[str, Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase))
# verify boxes
__A : Tuple = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase)
__A : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3))
# verify image_id
__A : Union[str, Any] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase))
# verify is_crowd
__A : List[str] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase))
# verify class_labels
__A : int = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase))
# verify orig_size
__A : Union[str, Any] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase))
# verify size
__A : List[str] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase))
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
__A : Any = json.loads(f.read())
__A : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
__A : Dict = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
__A : List[Any] = YolosImageProcessor(format='coco_panoptic')
__A : Optional[int] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='pt')
# verify pixel values
__A : str = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase)
__A : int = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4))
# verify area
__A : Union[str, Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase))
# verify boxes
__A : Optional[Any] = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase)
__A : Union[str, Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3))
# verify image_id
__A : Union[str, Any] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase))
# verify is_crowd
__A : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase))
# verify class_labels
__A : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase))
# verify masks
__A : Tuple = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCAmelCase)
# verify orig_size
__A : str = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase))
# verify size
__A : int = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase)) | 8 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer
def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__UpperCamelCase )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
_UpperCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_UpperCAmelCase = value
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 | 0 |
SCREAMING_SNAKE_CASE__ = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 9 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = torch.nn.Linear(10 , 10 )
_UpperCamelCase = torch.optim.SGD(model.parameters() , 0.1 )
_UpperCamelCase = Accelerator()
_UpperCamelCase = accelerator.prepare(_A )
try:
pickle.loads(pickle.dumps(_A ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 10 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ : List[str] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
'''simple docstring'''
A__ : str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCAmelCase__ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ) -> int:
# Return True if there is node that has not iterated.
__lowerCamelCase : Optional[int] = [False] * len(UpperCAmelCase_ )
__lowerCamelCase : List[Any] = [s]
__lowerCamelCase : str = True
while queue:
__lowerCamelCase : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase_ )
__lowerCamelCase : Tuple = True
__lowerCamelCase : List[str] = u
return visited[t]
def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ) -> List[Any]:
__lowerCamelCase : str = [-1] * (len(UpperCAmelCase_ ))
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : int = []
__lowerCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCamelCase : Tuple = float('Inf' )
__lowerCamelCase : List[str] = sink
while s != source:
# Find the minimum value in select path
__lowerCamelCase : Union[str, Any] = min(UpperCAmelCase_ , graph[parent[s]][s] )
__lowerCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
__lowerCamelCase : List[Any] = sink
while v != source:
__lowerCamelCase : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowerCamelCase : List[Any] = parent[v]
for i in range(len(UpperCAmelCase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 13 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a__ = '''__DUMMY_TRANSFORMERS_USER__'''
a__ = '''Dummy User'''
a__ = '''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'''
a__ = '''https://hub-ci.huggingface.co'''
a__ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
a__ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
a__ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def __UpperCAmelCase ( __a : Tuple ) -> Dict:
"""simple docstring"""
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' ,__a )
@pytest.fixture
def __UpperCAmelCase ( __a : Dict ) -> List[str]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' ,__a )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' ,__a )
@pytest.fixture
def __UpperCAmelCase ( __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' ,__a )
@pytest.fixture
def __UpperCAmelCase ( __a : Optional[int] ,__a : Dict ) -> Union[str, Any]:
"""simple docstring"""
HfFolder.save_token(__a )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
return HfApi(endpoint=__a )
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __a : HfApi ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = HfFolder.get_token()
HfFolder.save_token(__a )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__a )
@pytest.fixture
def __UpperCAmelCase ( __a : Optional[int] ) -> Dict:
"""simple docstring"""
def _cleanup_repo(__a : int ):
hf_api.delete_repo(__a ,token=__a ,repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def __UpperCAmelCase ( __a : List[str] ) -> int:
"""simple docstring"""
@contextmanager
def _temporary_repo(__a : Optional[Any] ):
try:
yield repo_id
finally:
cleanup_repo(__a )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __a : HfApi ,__a : str ,__a : int ) -> Dict:
"""simple docstring"""
_a : List[str] = F"""repo_txt_data-{int(time.time() * 10E3 )}"""
_a : List[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__a ,token=__a ,repo_type='''dataset''' ,private=__a )
hf_api.upload_file(
token=__a ,path_or_fileobj=str(__a ) ,path_in_repo='''data/text_data.txt''' ,repo_id=__a ,repo_type='''dataset''' ,)
yield repo_id
try:
hf_api.delete_repo(__a ,token=__a ,repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __UpperCAmelCase ( __a : int ,__a : List[Any] ,__a : Dict ) -> List[Any]:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __a : HfApi ,__a : str ,__a : Tuple ) -> Dict:
"""simple docstring"""
_a : List[str] = F"""repo_zipped_txt_data-{int(time.time() * 10E3 )}"""
_a : Optional[int] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__a ,token=__a ,repo_type='''dataset''' ,private=__a )
hf_api.upload_file(
token=__a ,path_or_fileobj=str(__a ) ,path_in_repo='''data.zip''' ,repo_id=__a ,repo_type='''dataset''' ,)
yield repo_id
try:
hf_api.delete_repo(__a ,token=__a ,repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Tuple ,__a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __a : HfApi ,__a : Any ,__a : Dict ) -> str:
"""simple docstring"""
_a : Dict = F"""repo_zipped_img_data-{int(time.time() * 10E3 )}"""
_a : str = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__a ,token=__a ,repo_type='''dataset''' ,private=__a )
hf_api.upload_file(
token=__a ,path_or_fileobj=str(__a ) ,path_in_repo='''data.zip''' ,repo_id=__a ,repo_type='''dataset''' ,)
yield repo_id
try:
hf_api.delete_repo(__a ,token=__a ,repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 14 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase__ = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 0 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__A : Tuple = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__A : Union[str, Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__A : List[Any] = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__A : int = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__A : Any = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 1_4]),
('2H 5D 3C AS 5S', False, [1_4, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__A : List[Any] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__A : str = (
('JH AH TH KH QH', 2_3),
('JH 9H TH KH QH', 2_2),
('JC KH JS JD JH', 2_1),
('KH KC 3S 3H 3D', 2_0),
('8C 9C 5C 3C TC', 1_9),
('JS QS 9H TS KH', 1_8),
('7C 7S KH 2H 7H', 1_7),
('3C KH 5D 5S KH', 1_6),
('QH 8H KD JH 8S', 1_5),
('2D 6D 9D TH 7D', 1_4),
)
def __a ( ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = randrange(len(A__ ) ), randrange(len(A__ ) )
SCREAMING_SNAKE_CASE = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __a ( A__ : int = 100 ):
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize("hand, expected" , A__ )
def __a ( A__ : str , A__ : List[Any] ):
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , A__ )
def __a ( A__ : Optional[Any] , A__ : int ):
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , A__ )
def __a ( A__ : Any , A__ : List[str] , A__ : Any ):
SCREAMING_SNAKE_CASE = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , A__ )
def __a ( A__ : Optional[int] , A__ : str ):
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , A__ )
def __a ( A__ : str , A__ : Optional[int] ):
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , A__ )
def __a ( A__ : Union[str, Any] , A__ : str , A__ : Optional[Any] ):
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def __a ( A__ : List[Any] , A__ : Optional[Any] , A__ : List[Any] ):
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def __a ( ):
SCREAMING_SNAKE_CASE = [PokerHand(A__ ) for hand in SORTED_HANDS]
SCREAMING_SNAKE_CASE = poker_hands.copy()
shuffle(A__ )
SCREAMING_SNAKE_CASE = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def __a ( ):
# Test that five high straights are compared correctly.
SCREAMING_SNAKE_CASE = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __a ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
SCREAMING_SNAKE_CASE = PokerHand("2C 4S AS 3D 5C" )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __a ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(A__ ) )
SCREAMING_SNAKE_CASE = os.path.join(A__ , "poker_hands.txt" )
with open(A__ ) as file_hand:
for line in file_hand:
SCREAMING_SNAKE_CASE = line[:14].strip()
SCREAMING_SNAKE_CASE = line[15:].strip()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PokerHand(A__ ), PokerHand(A__ )
SCREAMING_SNAKE_CASE = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376 | 16 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : list[float] ,a__ : list[float] ) -> float:
__A : str = sorted(numsa + numsa )
__A , __A : Any = divmod(len(a__ ) ,2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Dict = [float(x) for x in input('''Enter the elements of first array: ''').split()]
UpperCAmelCase_ : List[str] = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 17 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 0 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = result.headers["Location"]
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp:
fp.write(response.content )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_lowerCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(": " )]
_lowerCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase = line[len("FAILED " ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` '''
F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ):
'''simple docstring'''
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase = test.split("/" )[2]
else:
_lowerCAmelCase = None
return test
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| no. | error | status |"
_lowerCAmelCase = "|-:|:-|:-|"
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["count"]
_lowerCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| model | no. of errors | major error | count |"
_lowerCAmelCase = "|-:|-:|-:|-:|"
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["count"]
_lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(" / ")
_SCREAMING_SNAKE_CASE = k[index + len(" / ") :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18 |
import requests
__lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
# fetching a list of articles in json format
_UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 684 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = ''''''
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
return data[1:] + data[0]
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = ''''''
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = int('''0b''' + data[0] + data[-1], 2 )
_UpperCamelCase = int('''0b''' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = message[:4]
_UpperCamelCase = message[4:]
_UpperCamelCase = apply_table(__snake_case, __snake_case )
_UpperCamelCase = xor(__snake_case, __snake_case )
_UpperCamelCase = apply_sbox(__snake_case, temp[:4] ) # noqa: E741
_UpperCamelCase = apply_sbox(__snake_case, temp[4:] )
_UpperCamelCase = '''0''' * (2 - len(__snake_case )) + l # noqa: E741
_UpperCamelCase = '''0''' * (2 - len(__snake_case )) + r
_UpperCamelCase = apply_table(l + r, __snake_case )
_UpperCamelCase = xor(__snake_case, __snake_case )
return temp + right
if __name__ == "__main__":
_a = input("""Enter 10 bit key: """)
_a = input("""Enter 8 bit message: """)
_a = [6, 3, 7, 4, 8, 5, 10, 9]
_a = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_a = [2, 4, 3, 1]
_a = [2, 6, 3, 1, 4, 8, 5, 7]
_a = [4, 1, 3, 5, 7, 2, 8, 6]
_a = [4, 1, 2, 3, 2, 3, 4, 1]
_a = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_a = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_a = apply_table(key, paa_table)
_a = temp[:5]
_a = temp[5:]
_a = left_shift(left)
_a = left_shift(right)
_a = apply_table(left + right, pa_table)
_a = left_shift(left)
_a = left_shift(right)
_a = left_shift(left)
_a = left_shift(right)
_a = apply_table(left + right, pa_table)
# encryption
_a = apply_table(message, IP)
_a = function(expansion, sa, sa, keya, temp)
_a = temp[4:] + temp[:4]
_a = function(expansion, sa, sa, keya, temp)
_a = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
_a = apply_table(CT, IP)
_a = function(expansion, sa, sa, keya, temp)
_a = temp[4:] + temp[:4]
_a = function(expansion, sa, sa, keya, temp)
_a = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 19 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 10
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
_UpperCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 684 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.