content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def generate_run_base_dir(
result_dir: str, timestamp: int = None, tag: str = None, sub_dirs: List[str] = None
) -> str:
"""
Generate a base directory for each experiment run.
Looks like this: result_dir/date_tag/sub_dir_1/.../sub_dir_n
Args:
result_dir (str): Experiment output directory.
... | 5,356,300 |
def mulaw_to_value(mudata):
"""Convert a mu-law encoded value to linear."""
position = ((mudata & 0xF0) >> 4) + 5
return ((1 << position) | ((mudata & 0xF) << (position - 4)) | (1 << (position - 5))) - 33 | 5,356,301 |
def init_exclusion_regexes(paths_ignore: Iterable[str]) -> Set[re.Pattern]:
"""
filter_set creates a set of paths of the ignored
entries from 3 sources:
.gitguardian.yaml
files in .git
files ignore in .gitignore
"""
res = set()
for path in paths_ignore:
if not is_pattern_vali... | 5,356,302 |
def make_payments():
"""Pay payments based on credits"""
# i.e. [ { uid, addr_type, amount, address }, ... ]
payments = []
now = database.walltime_to_db_time(time())
users = get_balances_and_thresholds()
total_matured = 0
total_pending = 0
log.message('Building list of payments')
... | 5,356,303 |
def validate_watch(value):
"""Validate "watch" parameter."""
if not value:
return None
if isinstance(value, str):
value = [_ for _ in value.split("\n") if _]
return value | 5,356,304 |
def diatomic_unitary(a, b, c):
"""
Unitary decomposed as a diatomic gate of the form
Ztheta + X90 + Ztheta + X90 + Ztheta
"""
X90 = expm(-0.25j*np.pi*pX)
return expm(-0.5j*a*pZ)@X90@expm(-0.5j*b*pZ)@X90@expm(-0.5j*c*pZ) | 5,356,305 |
def is_x_y_in_hidden_zone_all_targets(room_representation, camera_id, x, y):
"""
:description
Extend the function is_x_y_in_hidden_zone_one_target,
1.for every target in the room
:param
1. (RoomRepresentation) -- room description of the target... | 5,356,306 |
def run_RIB_IN_capacity_test(cvg_api,
duthost,
tgen_ports,
multipath,
start_value,
step_value,
route_type,
port_speed... | 5,356,307 |
def _create_pure_mcts_player(
game: polygames.Game, mcts_option: mcts.MctsOption, num_actor: int
) -> mcts.MctsPlayer:
"""a player that uses only mcts + random rollout, no neural net"""
player = mcts.MctsPlayer(mcts_option)
for _ in range(num_actor):
actor = polygames.Actor(
None, ga... | 5,356,308 |
def get_fans_users():
"""
获取用户的粉丝
:return:
"""
user_id = request.argget.all("user_id")
page = str_to_num(request.argget.all("page", 1))
pre = str_to_num(request.argget.all("pre", 20))
s, r = arg_verify(reqargs=[("user id", user_id)], required=True)
if not s:
return r
data... | 5,356,309 |
def test_invalid_patterns(list, pattern):
"""
Function to facilitate the tests in MyRegExTest class
:param list: list with strings of invalid cases
:param pattern: a regular expression
:return: list with the result of all matches which should be a list of None
"""
newList = []
for item i... | 5,356,310 |
def args_parse():
"""Parse the input args."""
parser = argparse.ArgumentParser(description='Certificate import')
parser.add_argument("--cert", default="./kmc/config/crt/sever.cert", type=str,
help="The path of certificate file")
parser.add_argument("--key", default='./kmc/config/... | 5,356,311 |
def remove_constant_features(sfm):
"""
Remove features that are constant across all samples
"""
# boolean matrix of whether x == first column (feature)
x_not_equal_to_1st_row = sfm._x != sfm._x[0]
non_const_f_bool_ind = x_not_equal_to_1st_row.sum(axis=0) >= 1
return sfm.ind_x(selected_f_inds... | 5,356,312 |
def flatten(x):
""" Flatten list an array.
Parameters
----------
x: list of ndarray or ndarray
the input dataset.
Returns
-------
y: ndarray 1D
the flatten input list of array.
shape: list of uplet
the input list of array structure.
"""
# ... | 5,356,313 |
def open_window():
"""Open the logging window"""
log_window = stager.utils.BUILDER.get_object(LOG_VIEW_WINDOW_ID)
log_window.show_all()
textview = stager.utils.BUILDER.get_object(LOG_VIEW_TEXTVIEW_ID)
buffer = textview.get_buffer()
try:
with open(LOG_FILE, encoding="utf-8") as log_fil... | 5,356,314 |
def sample_automaton():
"""
Creates a sample automaton and returns it.
"""
# The states are a python Set. Name them whatever you want.
states = {"0","1","2"}
# Choose one of the states to be the initial state. You need to give this a Set, but that Set usually only contains one state.
init_state = {"0"}
... | 5,356,315 |
def mae(data, data_truth):
"""Computes mean absolute error (MAE)
:param data: Predicted time series values (n_timesteps, n_timeseries)
:type data: numpy array
:param data_truth: Ground truth time series values
:type data_truth: numpy array
"""
return np.mean(np.abs(data - data_truth)) | 5,356,316 |
def _find_ntc_family(guide_id):
"""Return a String of the NTC family
"""
guide_id_list = guide_id.split('_')
return '_'.join(guide_id_list[0:2]) | 5,356,317 |
def main(unused_argv):
"""训练入口"""
global total_feature_columns, label_feature_columns
dense_feature_columns, category_feature_columns, label_feature_columns = create_feature_columns()
total_feature_columns = dense_feature_columns + category_feature_columns
params = {
"category_feature_... | 5,356,318 |
def CreateFilletCurves(curve0, point0, curve1, point1, radius, join, trim, arcExtension, tolerance, angleTolerance, multiple=False):
"""
Creates a tangent arc between two curves and trims or extends the curves to the arc.
Args:
curve0 (Curve): The first curve to fillet.
point0 (Point3d): A ... | 5,356,319 |
def select_ads() -> jsonify:
"""
select ads
"""
try:
if INSERTED_FILES_NUM == 0 or INSERTED_FILES_NUM != PROCESSED_FILES_NUM:
raise Exception('server is not ready')
weights: List[Tuple[str, int]] = json.loads(request.get_json())
selected_ads: List[Dict[str, int or f... | 5,356,320 |
def test_or() -> None:
"""Test ``Or`` condition. """
@argcomb(Or("a", "b"))
def f(a: Any = None, b: Any = None) -> None:
...
# valid
f(a=1)
f(b=2)
f(a=1, b=2)
# invalid
with pytest.raises(InvalidArgumentCombination):
f() | 5,356,321 |
def _xpath_find(data: Dict, xparts: List, create_if_missing: bool = False) -> Any:
"""
Descend into a data dictionary.
:arg data:
The dictionary where to look for `xparts`.
:arg xparts:
Elements of an Xpath split with xpath_split()
:arg bool create_if_missing:
If elements ar... | 5,356,322 |
def r_mediate(y, t, m, x, interaction=False):
"""
This function calls the R function mediate from the package mediation
(https://cran.r-project.org/package=mediation)
y array-like, shape (n_samples)
outcome value for each unit, continuous
t array-like, shape (n_samples)
... | 5,356,323 |
def default_props(reset=False, **kwargs):
"""Return current default properties
Parameters
----------
reset : bool
if True, reset properties and return
default: False
"""
global _DEFAULT_PROPS
if _DEFAULT_PROPS is None or reset:
reset_default_props(**kwargs)
... | 5,356,324 |
def ReadFlatFileNGA(xlsfile):
"""
Generate NGA flatfile dictionary for generate usage
"""
# read in excel flatfile
book = xlrd.open_workbook(xlsfile)
sh = book.sheet_by_index(0) # 'Flatfile' sheet name
keys = sh.row_values(0)
for itmp in range( len(keys) ):
keys[itmp] = keys[i... | 5,356,325 |
def _chr_ord(x):
"""
This is a private utility function for getBytesIOString to return
chr(ord(x))
"""
return chr(ord(x)) | 5,356,326 |
def vkToWchar (m):
""" Mapping from virtual key to character """
ret = []
retTbl = ['/* table of virtual key to wchar mapping tables */',
'static VK_TO_WCHAR_TABLE aVkToWcharTable[] = {']
def generate (n, g, defPrefix=''):
defname = f'aVkToWch{defPrefix}{n}'
ret.extend ([f'... | 5,356,327 |
def load_glove_vectors(glove_file="/home/yaguang/pretrained_models/glove.6B.50d.txt"):
"""Load the glove word vectors"""
word_vectors = {}
with open(glove_file) as f:
for line in f:
split = line.split()
word_vectors[split[0]] = [float(x) for x in split[1:]]
return word_ve... | 5,356,328 |
def _ParseFileVersion(file_version):
"""Convert the string file_version in event.proto into a float.
Args:
file_version: String file_version from event.proto
Returns:
Version number as a float.
"""
tokens = file_version.split("brain.Event:")
try:
return float(tokens[-1])
... | 5,356,329 |
def gen_imgs(samples, batch_size, slide, shuffle=False):
"""This function returns a generator that
yields tuples of (
X: tensor, float - [batch_size, 224, 224, 3]
y: tensor, int32 - [batch_size, 224, 224, NUM_CLASSES]
)
input: samples: samples dataframe
input: batch_size: The number... | 5,356,330 |
def get_file_format(input_files):
"""
Takes all input files and checks their first character to assess
the file format. Returns one of the following strings; fasta, fastq,
other or mixed. fasta and fastq indicates that all input files are
of the same format, either fasta or fastq. other indiates tha... | 5,356,331 |
def get_nominal_hour(train_num):
"""Get the nominal hour for a train num (most frequent)"""
res = database.get().query("""
SELECT count(*) as count, substr(date, 12, 5) as hour
FROM results WHERE num = '%s'
GROUP BY hour ORDER BY count DESC LIMIT 1;
""" % train_num)
return next(r... | 5,356,332 |
def prepare_aggregation_data(group_name: str) -> List[PlotValues]:
"""Constructs and returns learning rate curves
Args:
group_name (str): group name for which to construct the curves
Returns:
A list of `PlotValues`.
"""
group_dir = os.path.join(FLAGS.results_dir, group_name)
# ... | 5,356,333 |
def compute_cgan_metrics(img_y, img_g, i = 0):
"""
Computes accuracy, precision, recall, f1, iou_score for passed image, return None in case of div 0
img_y: ground truth building footprint semantic map
img_g: generated image
i: 0 for entire image, 1 for inner (excluding border)
N... | 5,356,334 |
def learning(spiking_neurons, spike_times, taup, taum, Ap, Am, wmax, w_init):
"""
Takes a spiking group of neurons, connects the neurons sparsely with each other, and learns the weight 'pattern' via STDP:
exponential STDP: f(s) = A_p * exp(-s/tau_p) (if s > 0), where s=tpost_{spike}-tpre_{spike}
:param ... | 5,356,335 |
def deploy(version_tag=None):
"""deploys a updated version of the site
version_tag: a git tag, defaults to HEAD
"""
supervised_process = SITE_SETTINGS['supervised_process']
#dust()
stop(supervised_process)
update(commit=version_tag)
setup()
collectstatic()
start(supervised_proc... | 5,356,336 |
def acq2vhdr(
# Paths
output_file: Path,
data_file: str,
# Channels
channels: list[Channel],
ch_names: list = None,
ch_scales: list = None,
ch_units: list = None,
channel_indexes: list[int] = None,
# Raw data
samples_per_second: float = 2000.0,
# Markers
marker_file: ... | 5,356,337 |
def test_disequilibrium5(n):
"""
Test that peaked ScalarDistributions have non-zero disequilibrium.
"""
d = ScalarDistribution([1] + [0]*(n-1))
assert disequilibrium(d) >= 0 | 5,356,338 |
def bdc_check_build(build_file: str, verbose: bool = False) -> NoReturn:
"""
:param build_file:
:param verbose:
:return:
"""
init_verbosity(verbose)
_ = load_and_validate(build_file)
if errors == 0:
print("\nNo errors.")
else:
# Error messages already printed.
... | 5,356,339 |
def main():
"""
NAME
aarm_magic.py
DESCRIPTION
Converts AARM data to best-fit tensor (6 elements plus sigma)
Original program ARMcrunch written to accomodate ARM anisotropy data
collected from 6 axial directions (+X,+Y,+Z,-X,-Y,-Z) using the
off-axis remanence ... | 5,356,340 |
def register_config_callbacks():
"""
Registers callback function to fire whenever a Maya file is opened or
created.
:return: None
"""
# Make sure there are no callbacks that we've registered and never
# de-registered... this could happen if Mimic crashed.
de_register_callbacks()
c... | 5,356,341 |
def build_arg_parser2():
"""
Build an argument parser using optparse. Use it when python version is 2.5 or 2.6.
"""
usage_str = "Smatch table calculator -- arguments"
parser = optparse.OptionParser(usage=usage_str)
parser.add_option("--fl", dest="fl", type="string", help='AMR ID list file')
... | 5,356,342 |
def _write_log(path, lines):
"""
:param path: log file path
:param lines: content
:return status:Bool
"""
try:
with open(path, 'w') as file:
logi('open file {log_path} for writting'.format(log_path=path))
file.writelines(lines)
except Exception as e:
l... | 5,356,343 |
def test_branch_and_ring_at_beginning_of_branch():
"""Test SELFIES that have a branch and ring immediately at the start
of a branch.
"""
# CC1CCCS((Br)1Cl)F
assert is_eq(sf.decoder("[C][C][C][C][C][S][Branch1_2][Branch1_3]"
"[Branch1_1][C][Br]"
... | 5,356,344 |
def update_meal_plan(plan_id: str, meal_plan: MealPlan):
""" Updates a meal plan based off ID """
meal_plan.process_meals()
meal_plan.update(plan_id)
# try:
# meal_plan.process_meals()
# meal_plan.update(plan_id)
# except:
# raise HTTPException(
# status_code=404,... | 5,356,345 |
def clean_data(df):
"""
INPUT:
df - Panda DataFrame - A data frame that contains the data
OUTPUT:
df - Panda DataFrame - A Cleaned Panda Data frame
"""
#split categories into a data frame and take the first row
cat = df.categories.str.split(';', expand=True)
row = cat.iloc[... | 5,356,346 |
def adjust_learning_rate(optimizer, iteration_count,args):
"""Imitating the original implementation"""
lr = args.lr / (1.0 + args.lr_decay * iteration_count)
for param_group in optimizer.param_groups:
param_group['lr'] = lr | 5,356,347 |
def vec2transform(v):
"""Convert a pose from 7D vector format ( x y z qx qy qz qw) to transformation matrix form
Args: v pose in 7D vector format
Returns:
T 4x4 transformation matrix
$ rosrun tf tf_echo base os_lidar
- Translation: [-0.084, -0.025, 0.050]
- Rotation: in Quaternion [... | 5,356,348 |
def cleanup_name_customregex(cname, customregex=None, returnmatches=False):
"""Cleanup the input name given a custom dictionary of regular expressions (format of customregex: a dict like {'regex-pattern': 'replacement'}"""
if customregex is None:
customregex = {'_': ' ',
'repos': ... | 5,356,349 |
def test_query_vaults_details_non_premium(rotkehlchen_api_server):
"""Check querying the vaults details endpoint without premium does not work"""
response = requests.get(api_url_for(
rotkehlchen_api_server,
"makerdaovaultdetailsresource",
))
assert_error_response(
response=respon... | 5,356,350 |
def compute_recommended_batch_size_for_trustworthy_experiments(C: int, H: int, W: int, safety_val: float) -> int:
"""
Based on inequality with safety_val=s:
N' >= s*D'
the recommended batch size is, assuming N'=B*H*W and D'=C (so considering neurons as filter, patches as data):
B*H*W >= s*C
... | 5,356,351 |
def filter_gradient(t, h_, n_std=3):
"""Filter outliers by evaluating the derivative.
Take derivative and evaluate outliers in derivative.
"""
h = h_.copy()
# NOTE: This must be a separate step
# dh/dt = 0 -> invalid
dhdt = np.gradient(h)
invalid = np.round(dhdt, 6) == 0.0
dhdt[inv... | 5,356,352 |
def network_driver_create_endpoint():
"""Creates new Neutron Subnets and a Port with the given EndpointID.
This function takes the following JSON data and delegates the actual
endpoint creation to the Neutron client mapping it into Subnet and Port. ::
{
"NetworkID": string,
... | 5,356,353 |
def save_pkl(fp, obj):
"""Saves an object to pickle file."""
with open(fp, "wb") as fh:
pickle.dump(obj, fh) | 5,356,354 |
def generate_valve_from_great_vessel(
label_great_vessel,
label_ventricle,
valve_thickness_mm=8,
):
"""
Generates a geometrically-defined valve.
This function is suitable for the pulmonic and aortic valves.
Args:
label_great_vessel (SimpleITK.Image): The binary mask for the great ve... | 5,356,355 |
def current_decay(dataframe,two_components=False):
"""
Fits 95% peak to:
A(t) = A*exp(-t/Taufast) + B*exp(-t/Tauslow) +Iss
Parameters
----------
dataframe : A pandas dataframe
Should be baselined
two_components : True/False
When False, a single exponential component ... | 5,356,356 |
def get_n_mode_follow(p_state, idx_image=-1, idx_chain=-1):
"""Returns the index of the mode which to follow."""
return int(_MMF_Get_N_Mode_Follow(p_state, ctypes.c_int(idx_image), ctypes.c_int(idx_chain))) | 5,356,357 |
def get_possible_centroid_nodes_from_partial_preprocessing(nw_name):
""" this function returns a list of partially preprocessed nodes to used them as zone systems
(for fast routing) """
nw_path = os.path.join(MAIN_DIR, "data", "networks", nw_name)
ppf = os.path.join(nw_path, "base", "tt_matrix.npy")
... | 5,356,358 |
def dismiss_recommendation(title):
"""Dismiss the movie matching the specified criteria from showing up in
recommendations.
"""
yield 'recommendations/movies/{title}'.format(title=slugify(str(title))) | 5,356,359 |
def divide(lhs, rhs):
"""Division with auto-broadcasting
Parameters
----------
lhs : tvm.Tensor or Expr
The left operand
rhs : tvm.Tensor or Expr
The right operand
Returns
-------
ret : tvm.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise... | 5,356,360 |
def load(filename):
"""Loads a compressed object from disk
"""
file = gzip.GzipFile(filename, 'rb')
buffer = b''
while True:
data = file.read()
if data == b'':
break
buffer += data
object = pickle.loads(buffer)
file.close()
return object | 5,356,361 |
def ordered_ltrunc():
"""
Iterates through the left truncatable prime https://youtu.be/azL5ehbw_24
A left truncatable prime is still prime when the left most digit is
dropped. For example 317 -> 17 -> 7 are prime so 317 is a left
truncatable prime
returns an iterator that goes through them in increasing order
... | 5,356,362 |
def on_receive_best5(best5_entry):
"""
處理最佳五檔事件
"""
print('[%s %s] 最佳五檔' % (best5_entry['id'], best5_entry['name']))
for i in range(0, 5):
print('%5d %.2f | %.2f %5d' % (
best5_entry['best'][i]['bidQty'],
best5_entry['best'][i]['bid'],
best5_entry['best'][... | 5,356,363 |
def connect_message_queue(name, url=None, maxsize=0):
"""
create connection to message queue
name:
name of message queue
rabbitmq:
amqp://username:password@host:5672/%2F
Refer: https://www.rabbitmq.com/uri-spec.html
beanstalk:
beanstalk://host:11300/
redis:
... | 5,356,364 |
def load_document(filepath):
"""
Description:Opens and loads the file specified by filepath as a raw txt string; assumes valid text file format.
Input: String -> filepath of file from current directory
Output: Entire contents of text file as a string
"""
#assert(filepath.endswith(".txt")), "Function: Load ... | 5,356,365 |
def get_user(request, uid):
"""
GET /user/1/
"""
if uid != 1:
return JsonResponse({"code": 10101, "message": "user id null"})
data = {"age": 22, "id": 1, "name": "tom"}
return JsonResponse({"code": 10200, "data": data, "message": "success"}) | 5,356,366 |
def write_json_test_results(category, # type: ResultType
name, # type: str
content, # type: t.Union[t.List[t.Any], t.Dict[str, t.Any]]
formatted=True, # type: bool
encoder=None, # type: t.Optional[t.Call... | 5,356,367 |
def count_ngrams(lines, min_length=1, max_length=3):
"""
Iterate through given lines iterator (file object or list of
lines) and return n-gram frequencies. The return value is a dict
mapping the length of the n-gram to a collections.Counter
object of n-gram tuple and number of times that n-gram occu... | 5,356,368 |
def notas(*n, sit = False):
"""
-> Função para analisar notas e situação de vários alunos.
:param n: notas (uma ou mais)
:param sit: situação (valor opcional)
:return: dicionário com várias informaçoes sobre o aluno.
"""
r = {}
r['total'] = len(n)
r['maior'] = max(n)
r['menor'] =... | 5,356,369 |
def main(
filepath: str = "ice_thickness_01-01-2020.csv",
rescaling_factor: int = 2,
grid_size: float = 0.1,
robot_radius: float = 0.01,
):
"""Loads the ice thickness data and plans a route over safe ice."""
df = pd.read_csv(filepath)
df_rescaled = df.iloc[::rescaling_factor, :]
gx, gy,... | 5,356,370 |
def validate_metrics(metrics):
"""
Checks if specified metrics are valid. Returns None if check passes,
else raises ValueError.
"""
if any(m not in METRICS for m in metrics):
bad_metrics = [m for m in metrics if m not in METRICS]
raise ValueError('Unknown metrics: {}'.format(bad_metr... | 5,356,371 |
def accuracy(output, labels_test):
"""How many correct predictions?"""
TP, TN, FP, FN = confusionMatrix(labels_test, numpy.sign(output))
return float(TP + TN) / (TP + TN + FP + FN) | 5,356,372 |
def test_tk_import():
"""Test `tqdm.tk` import"""
importorskip('tqdm.tk') | 5,356,373 |
def metric_pairs(request):
"""Pairs of (dask-ml, sklearn) accuracy metrics.
* accuracy_score
"""
return (
getattr(dask_ml.metrics, request.param),
getattr(sklearn.metrics, request.param)
) | 5,356,374 |
def MyDuration(duration, initial_time=None):
"""
Usecase:
a timestamp is provided as when an access token expires,
then add it to the current time, then showing it as a human-readable
future time.
Alternatively specify a *initial_time* as manual now value.
Args
... | 5,356,375 |
def test_fpn():
"""Tests fpn."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
# `num_outs` is not equal to len(in_channels) - start_level
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
... | 5,356,376 |
def orthogonal_init(shape, gain=1.0):
"""Generating orthogonal matrix"""
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original s... | 5,356,377 |
def application(env, start_response):
"""The callable function per the WSGI spec; PEP 333"""
headers = {x[5:].replace('_', '-'):y for x, y in env.items() if x.startswith('HTTP_')}
if env.get('CONTENT_TYPE', None):
headers['Content-Type'] = env['CONTENT_TYPE']
if env.get('CONTENT_LENGTH', None):
... | 5,356,378 |
def loadModule():
"""
"""
load_config_data()
registe_madmin()
from gatenodeapp import * | 5,356,379 |
def angle_normalize(x):
"""
Normalize angles between 0-2PI
"""
return ((x + np.pi) % (2 * np.pi)) - np.pi | 5,356,380 |
async def hook_factory() -> HookFactory:
"""Factory creation fixture.
Cleans uf after yield."""
cur_factory = HookFactory()
yield cur_factory
cur_factory.stop_all()
await asyncio.sleep(0.01) | 5,356,381 |
def load_data(BASE_DIR, DATA_DIR):
"""
Loads data necessary for project
Arguments:
BASE_DIR (str) -- path to working dir
DATA_DIR (str) -- path to KEGG data
Returns:
tla_to_mod_to_kos (defaultdict of dicts) -- maps tla to series of dicts, keys are KEGG modules and values are lists of KOs in that module (e... | 5,356,382 |
def disable_app(app, base_url=DEFAULT_BASE_URL):
"""Disable App.
Disable an app to effectively remove it from your Cytoscape session without having to uninstall it.
Args:
app (str): Name of app
base_url (str): Ignore unless you need to specify a custom domain,
port or version t... | 5,356,383 |
def wrangle_adni():
"""This function returns three dataframes.
Unpack the dataframes when calling the function.
"""
# ensure pandas availability for the function
if 'pd' not in globals():
import pandas as pd
# read in the data to a pandas dataframe
adni_full = pd.read_csv('ADNIMER... | 5,356,384 |
def test_remove_state_no_key():
"""Test ability of remove_state function to work with input of the state."""
v = get_vapordome()
state_3 = State("water", T=500 * units.kelvin, v=1 * units.m ** 3 / units.kg)
v.add_state(state_3) # test of repr(state)
v.remove_state(state_3)
# assert v.states[rep... | 5,356,385 |
def readByte (file):
""" Read a byte from file. """
return ord (file.read (1)) | 5,356,386 |
def run_main():
"""
这是主函数
"""
input_dir = os.path.abspath("./input")
groundtruth_dir = os.path.join(input_dir, "ground-truth")
if not os.path.exists(groundtruth_dir):
os.makedirs(groundtruth_dir)
# 初始化测试数据集txt文件
dataset_dir = os.path.abspath(args.dataset_dir)
test_txt_pat... | 5,356,387 |
def qa_tfserving(data_input, url):
""" tf-serving 一整套流程 """
bert_input = covert_text_to_id(data_input)
data = json.dumps(bert_input)
r = requests.post(url, data)
r_text_json = json.loads(r.text)
r_post = postprocess(r_text_json)
return r_post | 5,356,388 |
def contract_creation_exceptions():
"""
Return create exceptions.
These elements depend on the networksegments table which was renamed
in the contract branch.
"""
return {
sa.Table: ['segmenthostmappings'],
sa.Index: ['segmenthostmappings']
} | 5,356,389 |
def _column_sel_dispatch(columns_to_select, df): # noqa: F811
"""
Base function for column selection.
Applies only to slices.
The start slice value must be a string or None;
same goes for the stop slice value.
The step slice value should be an integer or None.
A slice, if passed correctly i... | 5,356,390 |
def create_or_update(*, db_session, monitor_in: MonitorCreate) -> Monitor:
"""Creates or updates a monitor."""
monitor = get_by_weblink(db_session=db_session, weblink=monitor_in.weblink)
if monitor:
monitor = update(db_session=db_session, monitor=monitor, monitor_in=monitor_in)
else:
mon... | 5,356,391 |
def get_record_map(index_array, true_false_ratio):
"""Get record map.
:param index_array: the indexes of the images
:type index_array: numpy array
:param true_false_ratio: the number of occurrences of true cases over the number of occurrences of false cases
:type true_false_ratio: int or float
... | 5,356,392 |
def sdfGetMolBlock(mol):
"""
sdfGetMolBlock() returns the MOL block of the molecule
"""
return mol["molblock"] | 5,356,393 |
def changelog(ctx, base, head, jira):
"""Get changelog between base branch and head branch"""
log = git.log(base, head, merges=True)
logger.debug(log)
# Git changelog
click.echo('\nGit changelog:\n')
ticket_ids, changelog = git.changelog(log, ticket_ids=True)
click.echo(changelog)
# JI... | 5,356,394 |
def get_methods(klass):
"""Get all methods, include regular, static, class method.
"""
methods = list()
attributes = get_attributes(klass)
for key, value in inspect.getmembers(MyClass):
if (not (key.startswith("__") and key.endswith("__"))) and \
(key not in attributes):
... | 5,356,395 |
def get_sample_column(table_file_name, sample_name, sex='U'):
"""
Get a VCF column as a Pandas Series for one sample.
:param table_file_name: Name of genotyped features table file output by the genotyper after applying the genotyping
model and annotating the genotype for no-call variants.
:para... | 5,356,396 |
def find_plugin_models():
"""
Find custom models
"""
# List of plugin objects
plugins_dir = find_plugins_dir()
# Go through files in plug-in directory
if not os.path.isdir(plugins_dir):
msg = "SasView couldn't locate Model plugin folder %r." % plugins_dir
logger.warning(msg)
... | 5,356,397 |
def write_to_variable(tensor, fail_if_exists=True):
"""Saves a tensor for later retrieval on CPU."""
if not isinstance(tensor, tf.Tensor):
raise ValueError('Expected tf.Tensor but got {}'.format(type(tensor)))
# Only relevant for debugging.
debug_name = 'tpu_util__' + tensor.name.split(':')[0]
reuse = F... | 5,356,398 |
def evaluate_sb_policy_against_gym_env(sb_algorithm, policy_path, gym_env_name, episodes):
"""CLI command for stable baselines policy evaluation against ("real") gym env."""
sb_cls = get_sb_class_for_algo(sb_algorithm.upper())
policy_path = os.path.abspath(policy_path)
model = sb_cls.load(policy_path)
... | 5,356,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.