code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_em... | 41 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None... | 1 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'llama'
SCREAMING_SNAKE_CASE_ ... | 42 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerC... | 1 | 0 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokeniza... | 43 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_(... | 1 | 0 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase__ :
def __init__( self : int,__A : List[str],__A : Tuple,__A : str,__A : str,__A : List[str],__A : int=0.2,__A : Li... | 44 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelC... | 1 | 0 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int... | 45 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_en... | 1 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing... | 46 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
... | 1 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ ... | 47 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeech... | 1 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.j... | 48 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''':... | 1 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
... | 49 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
... | 1 | 0 |
'''simple docstring'''
import math
UpperCamelCase : Union[str, Any] = 10
UpperCamelCase : Optional[Any] = 7
UpperCamelCase : str = BALLS_PER_COLOUR * NUM_COLOURS
def A__ ( __lowerCAmelCase : int = 20 ):
lowerCamelCase__ = math.comb... | 50 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0,... | 1 | 0 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask... | 51 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TO... | 1 | 0 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __A ( a_ :str) -> int: # p... | 52 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from acceler... | 1 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_confi... | 53 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase... | 1 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
__lowercase : List[str] =tuple[int, int]
class A :
def __init__( self: Any , _lowerCAmelCase: set[int] , _lowerCAmelCase: Mapping[EdgeT, int] ) -> None:
... | 54 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main_... | 1 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :Optional[Any] = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Lxme... | 55 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , k... | 1 | 0 |
'''simple docstring'''
from collections import deque
def _a (lowercase__ : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case = len(lowercase__ )
__snake_case = deque()
__snake_case = [False for _ in range... | 56 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''htt... | 1 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
A_ : str = logging.get_logger(__name__)
A_ : Tuple = {
'post_... | 57 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(... | 1 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attentio... | 58 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
... | 1 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from dataset... | 59 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner impo... | 1 | 0 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say tha... | 60 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distrib... | 1 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCamelCase ( ... | 61 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in ... | 1 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at http... | 62 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xfo... | 1 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_conf... | 63 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None... | 1 | 0 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "MCTCTFeatureExtractor"
__a = "AutoTokenizer"
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple... | 64 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerC... | 1 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return [ord(__UpperCamelCase ) - 96 for elem in plain]
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return "".... | 65 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_(... | 1 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : int = args.pruning_method
_lowercase : Optional[in... | 66 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelC... | 1 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
... | 67 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_en... | 1 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffus... | 68 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
... | 1 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> list:
__snake_case = [0] * len(_UpperCAmelCase )
for i in range(1 , len(_UpperCAmelCase ) ):
# use last results for better performance - dynamic programming
__snake_cas... | 69 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeech... | 1 | 0 |
import argparse
import json
import subprocess
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = (
f"""curl -H \"Accept:... | 70 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''':... | 1 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""t5-small""": """https:... | 71 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
... | 1 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( lowercase_ : list[float] ) -> float:
'''simple docstring'''
lowercase =0.0_0
lowercase =0
for resistor in resistors:
if resistor <= 0:
lowercase =f'Resistor at index {i... | 72 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0,... | 1 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREA... | 73 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TO... | 1 | 0 |
from __future__ import annotations
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if nth_term == "":
return [""]
__SCREAMING_SNAKE_CASE : int = int(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = int(snake_case )
__SCREAMING... | 74 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from acceler... | 1 | 0 |
'''simple docstring'''
import math
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = input('''Enter message: ''' )
UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) )
UpperCAmelCase__ ... | 75 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase... | 1 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if dist[i][j] != float('''inf''' ):... | 76 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main_... | 1 | 0 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
def wrapper(*UpperCamelCase , ... | 77 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , k... | 1 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProc... | 78 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''htt... | 1 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visu... | 79 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(... | 1 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
... | 80 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
... | 1 | 0 |
from __future__ import annotations
_snake_case : Any = "Muhammad Umer Farooq"
_snake_case : List[str] = "MIT"
_snake_case : Tuple = "1.0.0"
_snake_case : Any = "Muhammad Umer Farooq"
_snake_case : Optional[Any] = "contact... | 81 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner impo... | 1 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ ):
return len(set(lowerCAmelCase__ ) ) == len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distrib... | 1 | 0 |
"""simple docstring"""
import math
import unittest
def snake_case_ ( A_ : int ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 ... | 83 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in ... | 1 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
lowercase = 0
while number:
# This way we arrive at next set bit (next 1) instead of... | 84 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xfo... | 1 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import o... | 85 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None... | 1 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.... | 86 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerC... | 1 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ... | 87 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_(... | 1 | 0 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
... | 88 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelC... | 1 | 0 |
import re
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : List[Any] = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__... | 89 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_en... | 1 | 0 |
'''simple docstring'''
import numpy
# List of input, output pairs
__UpperCAmelCase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__UpperCAmelCase = (((515, 22, 13), 555), ((61, 35, 49), 15... | 90 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
... | 1 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''Gr... | 91 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeech... | 1 | 0 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[int]:
# getting number of pixels in the image
lowercase , lowercase : List[Any] =img.shape[0], img.shape[1]
# c... | 92 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''':... | 1 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :int
__magic_name__ :int
class _... | 93 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
... | 1 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILIm... | 94 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0,... | 1 | 0 |
"""simple docstring"""
class UpperCamelCase_ (__A ):
pass
class UpperCamelCase_ (__A ):
pass
class UpperCamelCase_ :
def __init__( self : List[str] ) -> Tuple:
UpperCAmelCase_ : int = [
[],
[],
[],
]
def _SCR... | 95 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TO... | 1 | 0 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/c... | 96 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from acceler... | 1 | 0 |
from __future__ import annotations
def a ( snake_case__: list[list[int]] ):
'''simple docstring'''
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in rang... | 97 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase... | 1 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Tuple = {'configurat... | 98 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main_... | 1 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except Opt... | 99 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , k... | 1 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils im... | 100 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''htt... | 1 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
... | 101 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(... | 1 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ):
UpperCamelCase : int = cipher_alpha... | 102 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
... | 1 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch... | 103 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner impo... | 1 | 0 |
"""simple docstring"""
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
pass
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
pass
class UpperCamelCase__ :
"""simp... | 104 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distrib... | 1 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ... | 105 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in ... | 1 | 0 |
__snake_case :Optional[Any] ={
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase_ ( lowerCAmelCase__ : dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : ... | 106 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xfo... | 1 | 0 |
'''simple docstring'''
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = {} # Mapping from char to TrieNode
_A = False
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : ... | 107 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None... | 1 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__snake_case ) * abs(__snake_case )
if __name__ == "__main__":
import doctest
docte... | 108 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerC... | 1 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __a ( _snake_case, unittest.TestCase... | 109 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_(... | 1 | 0 |
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class a ... | 110 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelC... | 1 | 0 |
from statistics import mean, stdev
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase = 3 ):
A_ = min(_lowercase )
A_ = max(_lowercase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _lowercase ) for x in data]
def ... | 141 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_en... | 1 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : List[str] = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
snake_case: Union[str, Any] = set()
# Replace all the whitespace in our sentence
snake_case: Dict ... | 329 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
... | 1 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import Regr... | 319 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeech... | 1 | 0 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a... | 538 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''':... | 1 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_... | 133 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
... | 1 | 0 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: t... | 293 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0,... | 1 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import To... | 67 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TO... | 1 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = '''▁'''
_snake_case = {'''vocab_file''... | 340 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from acceler... | 1 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, ... | 297 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase... | 1 | 0 |
'''simple docstring'''
from functools import reduce
__A = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950... | 325 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main_... | 1 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
def count_of_possible_combinations(__lowerCAmelCase ) -> int:
if target < 0:
return 0
... | 530 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , k... | 1 | 0 |
import string
def lowerCamelCase_ ( __UpperCamelCase ):
for key in range(len(string.ascii_uppercase ) ):
A_ = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
A_ = string.ascii_uppercase.find(_lowercase... | 141 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''htt... | 1 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"junnyu/roformer_chin... | 329 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(... | 1 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__a = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster ... | 319 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
... | 1 | 0 |
'''simple docstring'''
def a ( UpperCamelCase_ : Union[str, Any] = 4000000 ) -> int:
snake_case__ =[]
snake_case__ , snake_case__ =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowercase )
snake_case__ , snake_case__ =b, a + b
return sum(_... | 538 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner impo... | 1 | 0 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(... | 133 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distrib... | 1 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
Wav... | 293 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in ... | 1 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A_ :
"""simple docstring"""
def __init__( self : str ,__A : Optional[Any] ,__A : Any ,__A : Optional[Any] ,__A ... | 67 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xfo... | 1 | 0 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
... | 340 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None... | 1 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowerCamelCase : Union[str, Any] = (720, 1280) # Height, Width
__lowerCamelCase : Optional[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowe... | 297 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerC... | 1 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json"}
__A = {
"vocab_file": {
"mgp-str"... | 325 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_(... | 1 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def lowerCamelCase_ ( __lowerCAmelCase ) -> bool:
'''simple docst... | 530 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelC... | 1 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.co... | 141 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_en... | 1 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpe... | 329 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
... | 1 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAG... | 319 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeech... | 1 | 0 |
'''simple docstring'''
def a ( UpperCamelCase_ : Any = 1000 ) -> int:
snake_case__ =3
snake_case__ =0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{... | 538 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''':... | 1 | 0 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImage... | 133 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
... | 1 | 0 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from toke... | 293 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0,... | 1 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :Optional[Any] , snake_case__ :Dict ) -> float:
_lowercase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def ... | 67 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TO... | 1 | 0 |
from __future__ import annotations
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = []
lowerCamelCase : Dict = []
lowerCamelCase : str = 0
lowerCamel... | 340 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from acceler... | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.