input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
import os
from typing import Any, Callable, List, Optional, Tuple
import torch.utils.data as data
from ..utils import _log_api_usage_once
class VisionDataset(data.Dataset):
"""
Base Class For making datasets which are compatible with torchvision.
It is necessary to override the ``__getitem__`` and ``__l... | import os
from typing import Any, Callable, List, Optional, Tuple
import torch.utils.data as data
from ..utils import _log_api_usage_once
class VisionDataset(data.Dataset):
"""
Base Class For making datasets which are compatible with torchvision.
It is necessary to override the ``__getitem__`` and ``__l... |
"""Chains and utils related to evaluating question answering functionality."""
from langchain.evaluation.qa.eval_chain import (
ContextQAEvalChain,
CotQAEvalChain,
QAEvalChain,
)
from langchain.evaluation.qa.generate_chain import QAGenerateChain
__all__ = ["ContextQAEvalChain", "CotQAEvalChain", "QAEvalCh... | """Chains and utils related to evaluating question answering functionality."""
from langchain.evaluation.qa.eval_chain import (
ContextQAEvalChain,
CotQAEvalChain,
QAEvalChain,
)
from langchain.evaluation.qa.generate_chain import QAGenerateChain
__all__ = ["QAEvalChain", "QAGenerateChain", "ContextQAEvalC... |
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
from mmdet.utils import get_device
class DistributedSampler(_DistributedSampler):
def __init__(self,
dat... | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas... |
from .DenoisingAutoEncoderDataset import DenoisingAutoEncoderDataset
from .NoDuplicatesDataLoader import NoDuplicatesDataLoader
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
from .SentencesDataset import SentencesDataset
__all__ = [
"Denoising... | from .DenoisingAutoEncoderDataset import DenoisingAutoEncoderDataset
from .NoDuplicatesDataLoader import NoDuplicatesDataLoader
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentencesDataset import SentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
__all__ = [
"Denoising... |
from langchain_core.prompts.prompt import PromptTemplate
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:""" # noqa: E501
CONDE... | # flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
COND... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, List, Dict
import numpy as np
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, List, Dict
import numpy as np
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator... |
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions_video,
get_di... | from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get... |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config, load_dataset_builder
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset ... | import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config, load_dataset_builder
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset ... |
_base_ = './gfl_r50_fpn_1x_coco.py'
max_epochs = 24
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
... | _base_ = './gfl_r50_fpn_1x_coco.py'
max_epochs = 24
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
... |
import datetime
from typing import List
import prisma.enums
import pydantic
from backend.server.model import Pagination
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int
agent_name: str
agent_image: str | None = None
description: str
last_edited: datetime.datetime
class M... | import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.common.global_state import clear_session as clear_session
from keras.src.backend.common.keras_tensor import (
is_keras_tensor as is_keras_tensor,
)
from keras.src.backend.... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import bounding_boxes
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_ker... |
import asyncio
import random
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
class RequestStreamerWrapper:
def __init__(self, num_requests, prefetch, iterate_sy... | import asyncio
import random
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
class RequestStreamerWrapper:
def __init__(self, num_requests, prefetch):
s... |
import pathlib
from argparse import ArgumentParser
import sentencepiece as spm
from lightning import ConformerRNNTModule
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from transfo... | import pathlib
from argparse import ArgumentParser
import sentencepiece as spm
from lightning import ConformerRNNTModule
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.plugins import DDPPlugin
from transforms i... |
_base_ = './retinanet_r50_fpn_1x_coco_v1.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
# use caffe img_norm
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=di... | _base_ = './retinanet_r50_fpn_1x_coco_v1.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')))
# use caffe img_norm
img_norm_c... |
import logging
from typing import Annotated
from autogpt_libs.auth.middleware import APIKeyValidator
from fastapi import APIRouter, Body, Depends, HTTPException, Query
from fastapi.responses import JSONResponse
from backend.data.user import (
get_user_by_email,
set_user_email_verification,
unsubscribe_use... | import logging
from typing import Annotated
from autogpt_libs.auth.middleware import APIKeyValidator
from fastapi import APIRouter, Body, Depends, HTTPException, Query
from fastapi.responses import JSONResponse
from backend.data.user import (
get_user_by_email,
set_user_email_verification,
unsubscribe_use... |
import os
from pathlib import Path
import pytest
from jina import Flow
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.deployments import Deployment
from jina.parsers import set_deployment_parser
from jina.serve.executors import BaseExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
de... | import os
from pathlib import Path
import pytest
from jina import Flow
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.deployments import Deployment
from jina.parsers import set_deployment_parser
from jina.serve.executors import BaseExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
de... |
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import CSVDictParser, IterDataPipe, Mapper
from torchvision.prototype.datapoints import Image, Label
from torchvision.prototype.datasets.utils import Dataset, KaggleDownloadResource, OnlineResource
from torchvision.prot... | import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import CSVDictParser, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, KaggleDownloadResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_... |
import math
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Flatten")
class Flatte... | import math
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Flatten")
class Flatte... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, Optional
import numpy as np
import paddlehub as hub
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
class TextPa... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, Optional
import numpy as np
import paddlehub as hub
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
class TextPa... |
_base_ = './faster-rcnn_regnetx-3.2GF_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
... | _base_ = './faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
... |
"""
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
... | """
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import ADE20KPanopticDataset
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman ... | # Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wra... |
from __future__ import annotations
from .CrossEncoder import CrossEncoder
from .model_card import CrossEncoderModelCardData
from .trainer import CrossEncoderTrainer
from .training_args import CrossEncoderTrainingArguments
__all__ = [
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
... | from __future__ import annotations
from .CrossEncoder import CrossEncoder
__all__ = ["CrossEncoder"]
|
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocList
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocArray <=0.21.
It can be useful t... | from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocList
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocArray v1.
It can be useful to st... |
"""Utils for LLM Compiler."""
import ast
import re
from typing import Any, Dict, List, Sequence, Tuple, Union
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import BaseTool, adapt_to_async_tool
from .schema import (
LLMCompilerParseResult,
LLMCompilerTask,
)
... | """Utils for LLM Compiler."""
import ast
import re
from typing import Any, Dict, List, Sequence, Tuple, Union
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import BaseTool, adapt_to_async_tool
from .schema import (
LLMCompilerParseResult,
LLMCompilerTask,
)
#... |
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_torch_available
torch_ava... | from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
try:
import torch
torch_available = True
except Imp... |
import importlib
import os
import re
from pathlib import Path
from typing import TYPE_CHECKING, TypeVar
if TYPE_CHECKING:
from backend.data.block import Block
T = TypeVar("T")
_AVAILABLE_BLOCKS: dict[str, type["Block"]] = {}
def load_all_blocks() -> dict[str, type["Block"]]:
from backend.data.block import... | import importlib
import os
import re
from pathlib import Path
from typing import Type, TypeVar
from backend.data.block import Block
# Dynamically load all modules under backend.blocks
AVAILABLE_MODULES = []
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, "... |
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-c4.py',
'../_base_/schedules/schedule_1x.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_arg... | _base_ = [
'../_base_/models/faster-rcnn_r50-caffe-c4.py',
'../_base_/schedules/schedule_1x.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Iterator, List, Optional, Sequence, Union
from mmengine.data import BaseDataElement
from ..registry.root import METRICS
from .metric import BaseMetric
class Evaluator:
"""Wrapper class to compose multiple :class:`BaseMetric` instances.
Args:... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Iterator, List, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from ..registry.root import METRICS
from .metric import BaseMetric
class Evaluator:
"""Wrapper class to compose multiple :class:`BaseMetric` instances... |
"""
To better manage tools, we introduce a class called ToolImporter, which is used for importing and managing tool usage in SecGPT. Moreover, we also define some tool helper functions for spoke definition.
"""
from llama_index.core.tools import FunctionTool
class ToolImporter:
"""
A class to manage the impo... | """
To better manage tools, we introduce a class called ToolImporter, which is used for importing and managing tool usage in SecGPT. Moreover, we also define some tool helper functions for spoke definition.
"""
from llama_index.core.tools import FunctionTool
class ToolImporter:
"""
A class to manage the impo... |
# Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.nn as nn
from torch.distributed import destroy_process_group, init_process_group
from torch.nn.parallel import DataParallel, DistributedDataParallel
from mmengine.model import (MMDistributedDataParallel,
... | # Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.nn as nn
from torch.distributed import destroy_process_group, init_process_group
from torch.nn.parallel import DataParallel, DistributedDataParallel
from mmengine.model import (MMDistributedDataParallel,
... |
from __future__ import annotations
import os
import platform
import struct
from itertools import chain
from typing import cast, TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Iterable
CMAKE_MINIMUM_VERSION_STRING = "3.27"
IS_WINDOWS = platform.system() == "Windows"
IS_DARWIN = platform.system() =... | from __future__ import annotations
import os
import platform
import struct
from itertools import chain
from typing import cast, TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Iterable
IS_WINDOWS = platform.system() == "Windows"
IS_DARWIN = platform.system() == "Darwin"
IS_LINUX = platform.system()... |
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
ty... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth... |
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling1D", "keras.layers.AvgPool1D"])
class AveragePooling1D(BasePooling):
"""Average pooling for temporal data.
Downsamples the input representation by taking the ... | from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling1D", "keras.layers.AvgPool1D"])
class AveragePooling1D(BasePooling):
"""Average pooling for temporal data.
Downsamples the input representation by taking the ... |
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Callable
import numpy as np
from sentence_transformers.evaluation.NanoBEIREvaluator import NanoBEIREvaluator
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetri... | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Callable
import numpy as np
from sentence_transformers.evaluation.NanoBEIREvaluator import NanoBEIREvaluator
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetri... |
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval... | import logging
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-... |
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
class BaseDenseHead(BaseModule, metaclass=ABCMeta):
"""Base class for DenseHeads."""
def __init__(self, init_cfg=None):
super(BaseDenseHead, self).__init__(init_cfg)
@abstr... | from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
class BaseDenseHead(BaseModule, metaclass=ABCMeta):
"""Base class for DenseHeads."""
def __init__(self, init_cfg=None):
super(BaseDenseHead, self).__init__(init_cfg)
@abstractmethod
def loss(self, **kwargs):
... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser... |
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`Worke... | """Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction, add_arg_group
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which... |
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.43"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_in... | """Top-level imports for LlamaIndex."""
__version__ = "0.12.42"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_in... |
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
"""
import logging
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", dat... | """
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
"""
import logging
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", dat... |
_base_ = './lsj-100e_coco-instance.py'
# 8x25=200e
train_dataloader = dict(dataset=dict(times=8))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=25,
by_epo... | _base_ = './lsj_100e_coco_instance.py'
# 8x25=200e
train_dataloader = dict(dataset=dict(times=8))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=25,
by_epo... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.