content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import sqlite3
def prob8(cur: sqlite3.Cursor) -> pd.DataFrame:
"""Give a list of the services which connect the stops 'Craiglockhart' and
'Tollcross'.
Parameters
----------
cur (sqlite3.Cursor) : The cursor for the database we're accessing.
Returns
-------
(pd.DataFrame) ... | 14e8bbb04befc1116f969ca977d83bc27890664c | 3,655,300 |
def get_command(name):
""" return command represented by name """
_rc = COMMANDS[name]()
return _rc | 22e64898973d2a2ec1cca2ff72fa86eaed4a3546 | 3,655,301 |
def _str_struct(a):
"""converts the structure to a string for logging purposes."""
shape_dtype = lambda x: (jnp.asarray(x).shape, str(jnp.asarray(x).dtype))
return str(jax.tree_map(shape_dtype, a)) | 96d417c6cd1332d6e71b21472444cf6178cad92a | 3,655,302 |
from typing import Set
import os
def get_moved_files(dir_path: str) -> Set:
"""
获取要移动的文件(夹),包括:
- 文件夹
- 损坏的图片
- 非图像文件
- 重复的图片
"""
removed_files = set()
file_map = {}
for file in os.listdir(dir_path):
file_path = os.path.join(dir_path, file)
# 过滤文... | 52520d8e8cd41343945e45bb46da32177175ad34 | 3,655,303 |
def delete_interface_address(
api_client, interface_id, address_id, **kwargs
): # noqa: E501
"""delete_interface_address # noqa: E501
Delete interface address details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_re... | 19d04ef0783988c8eb86983d589d9f07e82ba3b8 | 3,655,304 |
import types
async def set_promo(message: types.Message, state: FSMContext):
"""
Команда /setpromo
"""
arg = message.get_args()
if not arg:
return await message.answer(_("Укажите аргумент: промокод. Например: <pre>/set_promo my-promo-code</pre>"),
parse... | 9a15dd1bea20c3da6dd31eee5e2a723ddd110ba2 | 3,655,305 |
def plot_waterfall(*sigObjs, step=10, xLim:list=None,
Pmin=20, Pmax=None, tmin=0, tmax=None, azim=-72, elev=14,
cmap='jet', winPlot=False, waterfallPlot=True, fill=True,
lines=False, alpha=1, figsize=(20, 8), winAlpha=0,
removeGridLines=False, ... | 85888e49a938a5e4faac90c52b2df7fa7036610c | 3,655,306 |
import csv
import re
def indices(input_file):
"""
Parse the index file or target file and return a list of values.
:return:
"""
index_list = []
line_num = 0
index_file = list(csv.reader(open(input_file), delimiter='\t'))
for line in index_file:
line_num += 1
col_count ... | ea07d6f2bc8f3d23cf2ae59cb2df6c19158752fc | 3,655,307 |
import argparse
def parse_arguments():
"""
Parse the arguments from the user
"""
parser = argparse.ArgumentParser(
description="omeClust visualization script.\n",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"adist",
help="the input file D*... | aaa649b34cdb6819f9a56e7e0d547ccc88bff139 | 3,655,308 |
import math
from functools import reduce
def checkSequences(numl, rowlen, seqlen):
"""In a square of numbers, represented by the list with
the given row length, look for the top product with length
of seqlen"""
listl=len(numl)
collen=math.ceil(listl/rowlen)
seqind=seqlen-1
log.debug("List l... | ec37575a088098fb57dc2a58603ccea6fcc9b5a9 | 3,655,309 |
def has_same_facts(ruler_intervals1, ruler_intervals2, D):
"""
Check whether the two same-pattern ruler lists have the same facts at each corresponding ruler-interval
Args:
ruler_intervals1: a list of ruler-intervals
ruler_intervals2: a list of ruler-intervals
D: contain all relation... | 210540bd2c2062f3150a34c5911017ec49b5603f | 3,655,310 |
def main():
""" """
undet = argument_parse()
print 'Start\t|\tCheck incorrect index'
fq_list = split_fastq(undet)
print 'Process\t|\tAnalysis undetermined data'
combined_df = multi_process(fq_list)
sorted_combined_df = combined_df.sort_values(
by='count',
ascending=F... | e20f65e172f49ce2f184b32344135ccadb550253 | 3,655,311 |
def ruleset_delete(p_engine, p_username, rulesetname, envname):
"""
Delete ruleset from Masking engine
param1: p_engine: engine name from configuration
param2: rulesetname: ruleset name
return 0 if added, non 0 for error
"""
return ruleset_worker(p_engine=p_engine, p_username=p_username, ru... | 470e2d104a6d10737bba975a0cb15a4768238244 | 3,655,312 |
def config_from_file(file_name):
"""Load and return json from file."""
with open(file_name) as config_file:
config = ujson.load(config_file)
return config | 2dd1b57612c528a85dbe04c717800b6908cb9c40 | 3,655,313 |
def build_yaml_object(
dataset_id: str,
table_id: str,
config: dict,
schema: dict,
metadata: dict = dict(),
columns_schema: dict = dict(),
partition_columns: list = list(),
):
"""Build a dataset_config.yaml or table_config.yaml
Args:
dataset_id (str): The dataset id.
... | 8fa7d3acac0e9636fda923d9a38e9a82f904afae | 3,655,314 |
import os
def read_candidate_data_list(file, path=IEDC_paths.candidates):
"""
Will read a candidate file and return its data.
:param file: Filename of the file to process
:param path: Path of the file
:return: Dictionary of dataframes for metadata, classifications, and data
"""
# make it ... | acf3ed6c93f4e57797b8451bd6b239a4d6564dff | 3,655,315 |
from pathlib import Path
def make_cumulative(frame, filedate, unit):
"""Create a cumulative graph of cases over time"""
gb = frame.groupby("Accurate_Episode_Date").agg(patients=("Row_ID", "count"))
gb = gb.resample("D").last().fillna(0).reset_index()
max_date = gb["Accurate_Episode_Date"].max().strfti... | 44a2a1b3af68c293a86af97b11edf8cca562e6b8 | 3,655,316 |
def most_common(l):
""" Helper function.
:l: List of strings.
:returns: most common string.
"""
# another way to get max of list?
#from collections import Counter
#data = Counter(your_list_in_here)
#data.most_common() # Returns all unique items and their counts
#data.most_... | 5010e4e26b00099c287f8597d8dc5881a67c4034 | 3,655,317 |
def reduce_avg(reduce_target, lengths, dim):
"""
Args:
reduce_target : shape(d_0, d_1,..,d_dim, .., d_k)
lengths : shape(d0, .., d_(dim-1))
dim : which dimension to average, should be a python number
"""
shape_of_lengths = lengths.get_shape()
shape_of_target = reduce_target.g... | 3bba229f448d393019857d89d16820076732e932 | 3,655,318 |
def _near_mod_2pi(e, t, atol=_DEFAULT_ATOL):
"""Returns whether a value, e, translated by t, is equal to 0 mod 2 * pi."""
return _near_mod_n(e, t, n=2 * np.pi, atol=atol) | 465911aca0fe1a7cd397ed2304426da5fdaaccc3 | 3,655,319 |
def create_returns_similarity(strategy: QFSeries, benchmark: QFSeries, mean_normalization: bool = True,
std_normalization: bool = True, frequency: Frequency = None) -> KDEChart:
"""
Creates a new returns similarity chart. The frequency is determined by the specified returns series.... | a83a7d2171ee488c1ac9ede80f39778658a4538f | 3,655,320 |
import matplotlib.cm as mpl_color_map
from PIL import Image
import copy
def apply_colormap_on_image(org_im, activation, colormap_name='viridis', alpha=.4, thresh=30):
"""
Apply heatmap on image
Args:
org_img (PIL img): Original image
activation_map (numpy arr): Activation map (grayscal... | d6f89cb06e8ec489e9bd38cb52f5d04628037b70 | 3,655,321 |
def _cli():
"""
command line interface
:return:
"""
parser = generate_parser()
args = parser.parse_args()
return interface(args.bids_dir,
args.output_dir,
args.aseg,
args.subject_list,
args.session_list,
... | 0b37b2eab79c5f50d5f18b5d6b435e3b97682a36 | 3,655,322 |
def statusize():
"""Posts a status from the web."""
db = get_session(current_app)
user_id = session.get('user_id')
if not user_id:
return forbidden('You must be logged in to statusize!')
user = db.query(User).get(user_id)
message = request.form.get('message', '')
if not message:
... | b06f711dfbf73b9c75ba2303478799dcc678a28c | 3,655,323 |
import base64
def urlsafe_b64decode_nopadding(val):
"""Deal with unpadded urlsafe base64."""
# Yes, it accepts extra = characters.
return base64.urlsafe_b64decode(str(val) + '===') | 22ed00b07e16b4b557dc46b5caeb9f7ce9513c0d | 3,655,324 |
def _subimg_bbox(img, subimage, xc, yc):
"""
Find the x/y bounding-box pixel coordinates in ``img`` needed to
add ``subimage``, centered at ``(xc, yc)``, to ``img``. Returns
``None`` if the ``subimage`` would extend past the ``img``
boundary.
"""
ys, xs = subimage.shape
y, x = img.shap... | b299a6b3726ced525b538b4fea45b235fc0bd56e | 3,655,325 |
from datetime import datetime
def _ToDatetimeObject(date_str):
"""Converts a string into datetime object.
Args:
date_str: (str) A date and optional time for the oldest article
allowed. This should be in ISO 8601 format. (yyyy-mm-dd)
Returns:
datetime.datetime Object.
Raises:
... | df675cb5391456122bb350a126e0b4a4ed31fc49 | 3,655,326 |
def select_most_uncertain_patch(x_image_pl, y_label_pl, fb_pred, ed_pred, fb_prob_mean_bald, kernel_window, stride_size,
already_select_image_index, previously_selected_binary_mask, num_most_uncert_patch,
method):
"""This function is used to acquire th... | 21f40e34b1436d91eca041998cb927800cc10f7b | 3,655,327 |
import requests
import json
def submit_extraction(connector, host, key, datasetid, extractorname):
"""Submit dataset for extraction by given extractor.
Keyword arguments:
connector -- connector information, used to get missing parameters and send status updates
host -- the clowder host, including htt... | 449fc6c3c37ef8a5206a7ebe18b367885ae319a8 | 3,655,328 |
import math
def fcmp(x, y, precision):
"""fcmp(x, y, precision) -> -1, 0, or 1"""
if math.fabs(x-y) < precision:
return 0
elif x < y:
return -1
return 1 | 905421b36635ab830e2216ab34fee89f75c7f4c4 | 3,655,329 |
def parse_vcf_line(line):
"""
Args:
line (str): line in VCF file obj.
Returns:
parsed_line_lst (lst): with tuple elem (chr, pos, ref, alt)
Example:
deletion
pos 123456789012
reference ATTAGTAGATGT
deletion ATTA---GATGT
VCF:
... | 705c3bfe2ed3a0d4552dcbd18e8c08b73b84b40b | 3,655,330 |
def fuzzy_lookup_item(name_or_id, lst):
"""Lookup an item by either name or id.
Looking up by id is exact match. Looking up by name is by containment, and
if the term is entirely lowercase then it's also case-insensitive.
Multiple matches will throw an exception, unless one of them was an exact
mat... | 604b3879d0f97822d5a36db6dcf468ef8eefaac9 | 3,655,331 |
import os
def _make_output_dirs(root_output_dir, experiment_name):
"""Get directories for outputs. Create if not exist."""
tf.io.gfile.makedirs(root_output_dir)
checkpoint_dir = os.path.join(root_output_dir, 'checkpoints', experiment_name)
tf.io.gfile.makedirs(checkpoint_dir)
results_dir = os.path.join(ro... | df9f6774b74dfb4156414e5c7761a595fb8b6cb3 | 3,655,332 |
def fantasy_pros_ecr_scrape(league_dict=config.sean):
"""Scrape Fantasy Pros ECR given a league scoring format
:param league_dict: league dict in config.py used to determine whether to pull PPR/standard/half-ppr
"""
scoring = league_dict.get('scoring')
if scoring == 'ppr':
url = 'https:... | c20ae9542f9fea096510681bcf3c430b23cbdf29 | 3,655,333 |
def lda(X, y, nr_components=2):
"""
Linear discrimindant analysis
:param X: Input vectors
:param y: Input classes
:param nr_components: Dimension of output co-ordinates
:return: Output co-ordinates
"""
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.fl... | c9db65d494304246cf518833c1ae5c6ed22f3fa6 | 3,655,334 |
def _flatten_value_to_list(batch_values):
"""Converts an N-D dense or sparse batch to a 1-D list."""
# Ravel for flattening and tolist so that we go to native Python types
# for more efficient followup processing.
#
batch_value, = batch_values
return batch_value.ravel().tolist() | 77bfd9d32cbbf86a16a8da2701417a9ac9b9cc93 | 3,655,335 |
def sun_position(time):
"""
Computes the sun's position in longitude and colatitude at a given time
(mjd2000).
It is accurate for years 1901 through 2099, to within 0.006 deg.
Input shape is preserved.
Parameters
----------
time : ndarray, shape (...)
Time given as modified Jul... | d5465044fbbe650580f4e9afaa13cf83e2cad758 | 3,655,336 |
import json
def get_assay_description(assay_id, summary=True, attempts=10):
""" Get the description of an assay in JSON format.
Parameters
----------
assay_id : int
The id of the bioassay.
summary : bool, optional
If true returns a summary of the d... | 13ff3620a1ef3e7aa1c12bd5a9b5aa88b2fb297f | 3,655,337 |
def acos(expr):
"""
Arc cosine -- output in radians.
It is the same that :code:`arccos` moodle math function.
"""
return Expression('acos({0})'.format(str(expr))) | d064caaa037de619266e322f85ae09c2ba7d9d16 | 3,655,338 |
from datetime import datetime
def annotate_genes(gene_df, annotation_gtf, lookup_df=None):
"""
Add gene and variant annotations (e.g., gene_name, rs_id, etc.) to gene-level output
gene_df: output from map_cis()
annotation_gtf: gene annotation in GTF format
lookup_df: DataFrame with va... | 562ef01380075a3e12eeaecdd6ab1e2285ddbc4f | 3,655,339 |
import torch
def y_gate():
"""
Pauli y
"""
return torch.tensor([[0, -1j], [1j, 0]]) + 0j | c0da0112233773e1c764e103599a591bb7a4a7f5 | 3,655,340 |
import tarfile
def extract_tarball(tarball, install_dir):
"""Extract tarball to a local path"""
if not tarball.path.is_file():
raise IOError(f"<info>{tarball.path}</info> is not a file!")
try:
with tarfile.open(tarball.path, "r:gz") as f_tarball:
extraction_dir = [
... | da9deeb71da36c7c01611f3be7965a8c4a22dc41 | 3,655,341 |
def compose_matrix(scale=None, shear=None, angles=None, translation=None, perspective=None):
"""Calculates a matrix from the components of scale, shear, euler_angles, translation and perspective.
Parameters
----------
scale : [float, float, float]
The 3 scale factors in x-, y-, and z-direction.... | a186919f8b6fc47637e7c20db30fbdd8e461e059 | 3,655,342 |
def dict_merge(set1, set2):
"""Joins two dictionaries."""
return dict(list(set1.items()) + list(set2.items())) | d88a68720cb9406c46bdef40f46e461a80e588c0 | 3,655,343 |
def EucDistIntegral(a, b, x):
"""[summary]
Calculate Integrated Euclidean distance.
Args:
a (float): a value
b (float): b value
x (float): x value
Returns:
val: Integration result
"""
asq = a * a
bsq = b * b
xsq = x * x
dn = (6 * (1 + asq)**(3 ... | 3da541356636e8be7f9264d9d59a29dd003c082b | 3,655,344 |
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value ... | 4bd9b1c8d362f5e72e97f9f2c8e0d5711065291f | 3,655,345 |
import requests
def send_to_hipchat(
message,
token=settings.HIPCHAT_API_TOKEN,
room=settings.HIPCHAT_ROOM_ID,
sender="Trello",
color="yellow",
notify=False): # noqa
"""
Send a message to HipChat.
Returns the status code of the request. Should be 200.
"""
payload = {
... | 138abbf59f561a4c5d21aea9976856dbd7a581ca | 3,655,346 |
from cStringIO import StringIO
import cgi
def input(*requireds, **defaults):
"""
Returns a `storage` object with the GET and POST arguments.
See `storify` for how `requireds` and `defaults` work.
"""
def dictify(fs): return dict([(k, fs[k]) for k in fs.keys()])
_method = defaults.pop('_m... | 0b3fcd9142dbcd3309b80837c6fc53abdf4aaad6 | 3,655,347 |
def nodes_and_edges_valid(dev, num_nodes, node_names, rep):
"""Asserts that nodes in a device ``dev`` are properly initialized, when there
are ``num_nodes`` nodes expected, with names ``node_names``, using representation ``rep``."""
if not set(dev._nodes.keys()) == {"state"}:
return False
if not... | ad6dbfdfd92114c9b041617a91ad30dbe8a8189f | 3,655,348 |
def is_android(builder_cfg):
"""Determine whether the given builder is an Android builder."""
return ('Android' in builder_cfg.get('extra_config', '') or
builder_cfg.get('os') == 'Android') | 74b1620ba2f6fff46495174158f734c5aa8da372 | 3,655,349 |
def twoSum(self, numbers, target): # ! 这个方法可行
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
numbers_dict = {}
for idn, v in enumerate(numbers):
if target - v in numbers_dict:
return [numbers_dict[target - v] + 1, idn + 1]
numbers_dict[v] = ... | e2b93828b5db7256b9a1e90e7e21adad1ce0b4de | 3,655,350 |
def not_after(cert):
"""
Gets the naive datetime of the certificates 'not_after' field.
This field denotes the last date in time which the given certificate
is valid.
:return: Datetime
"""
return cert.not_valid_after | 4f084146908d70af5c2cdfa5151f0c26533ac7fe | 3,655,351 |
from datetime import datetime
def parse_time_string(time_str: str) -> datetime.time:
"""Parses a string recognizable by TIME_REGEXP into a datetime.time object. If
the string has an invalid format, a ValueError is raised."""
match = TIME_REGEXP.match(time_str)
if match is None:
raise ValueErr... | 3238abcc6edb5a37c4a3d615b71e9dde6344f0ac | 3,655,352 |
import os
def dig(start, outdir, depth=2, max_duration=360):
"""
Crawls YouTube for source material (as mp3s).
Args:
- start: the starting YouTube url
- outdir: directory to save download tracks to
- depth: how many levels of related vids to look through
- max_duration: on... | 09cd6c98d117fc71f58373213ed432c1a126a6cf | 3,655,353 |
def get_roc_curve(y_true, y_score, title=None, with_plot=True):
"""
Plot the [Receiver Operating Characteristic][roc] curve of the given
true labels and confidence scores.
[roc]: http://en.wikipedia.org/wiki/Receiver_operating_characteristic
"""
fpr, tpr, thresholds = sklearn.metrics.roc_curve(... | 7635af1705c6bdaccce1e1c5e99719645026d436 | 3,655,354 |
from datetime import datetime
def read_err_songs():
""" read song data from xml file to a list of dictionaries """
songfile = open('/home/gabe/python/selfishmusic/errors.xml')
soup = BS.BeautifulSoup(songfile.read())
songsxml = soup.findAll('song')
songs = []
for song in songsxml:
sd =... | 287c205c054045b3a88b74cf008e5a21037f9727 | 3,655,355 |
def word_value(word: str) -> int:
"""Returns the sum of the alphabetical positions of each letter in word."""
return (0 if word == '' else
word_value(word[:-1]) + alpha.letter_index_upper(word[-1])) | b964faa5a5792e003fb0859c1ffb0b25e63f6a75 | 3,655,356 |
def status():
"""
Returns json response of api status
Returns:
JSON: json object
"""
status = {
"status": "OK"
}
return jsonify(status) | d515e0628bb4c77ad83b0a26b758a3686663d329 | 3,655,357 |
def celcius_to_farenheit(x):
"""calculate celcius to farenheit"""
farenheit = (9*x/5) + 32
return farenheit | fa0041451c82b20283e4f20b501a6042ab19ec95 | 3,655,358 |
def CheckFlags(node_name, report_per_node, warnings, errors,
flags, warning_helper, error_helper):
"""Check the status flags in each node and bookkeep the results.
Args:
node_name: Short name of the node.
report_per_node: Structure to record warning/error messages per node.
Its type ... | 63bac7bfa4e3fa9c3cc462f5400d68116dfb898d | 3,655,359 |
def EnrollmentTransaction():
"""
:return:
"""
return b'\x20' | 05adff34b6cf100d95e16ab837b38b26b6315b6a | 3,655,360 |
def sentinel_id(vocabulary, return_value=None):
"""Token ID to use as a sentinel.
By default, we use the last token in the vocabulary.
Args:
vocabulary: a t5.data.vocabularies.Vocabulary
return_value: an optional integer
Returns:
an integer
"""
if return_value is not None:
return return_va... | 08ad1116b7f41ba7070359675a0133f14b9917bd | 3,655,361 |
from datetime import datetime
import urllib
import hmac
import hashlib
import base64
def create_signature(api_key, method, host, path, secret_key, get_params=None):
"""
创建签名
:param get_params: dict 使用GET方法时附带的额外参数(urlparams)
:return:
"""
sorted_params = [
("AccessKeyId", api_key),
... | 3e38bc883da9f5ebb311e1498f8cc73d1754c38b | 3,655,362 |
def measure_fwhm(image, plot=True, printout=True):
"""
Find the 2D FWHM of a background/continuum subtracted cutout image of a target.
The target should be centered and cropped in the cutout.
Use lcbg.utils.cutout for cropping targets.
FWHM is estimated using the sigmas from a 2D gaussian fit of the... | c2fdb3a10ffa575ffe6fdeb9e86a47ffaefea5c2 | 3,655,363 |
from .mappia_publisher import MappiaPublisherPlugin
def classFactory(iface): # pylint: disable=invalid-name
"""Load MappiaPublisher class from file MappiaPublisher.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
return MappiaPublisherPlugin() | 1802094cb49c01b0c9c5ed8b45d3c77bcd9b746a | 3,655,364 |
def mongo_insert_canary(mongo, db_name, coll_name, doc):
""" Inserts a canary document with 'j' True. Returns 0 if successful. """
LOGGER.info("Inserting canary document %s to DB %s Collection %s", doc, db_name, coll_name)
coll = mongo[db_name][coll_name].with_options(
write_concern=pymongo.write_co... | d82fe021db76972be19394688a07e9426bff82b7 | 3,655,365 |
from typing import Type
def is_dict_specifier(value):
# type: (object) -> bool
""" Check if value is a supported dictionary.
Check if a parameter of the task decorator is a dictionary that specifies
at least Type (and therefore can include things like Prefix, see binary
decorator test for some exa... | e18ad83a1b79a8150dfda1c65f4ab7e72cc8c8c8 | 3,655,366 |
def parse_star_count(stars_str):
"""Parse strings like 40.3k and get the no. of stars as a number"""
stars_str = stars_str.strip()
return int(float(stars_str[:-1]) * 1000) if stars_str[-1] == 'k' else int(stars_str) | d47177f26656e6dc33d708a0c4824ff677f3387a | 3,655,367 |
import shutil
def is_libreoffice_sdk_available() -> bool:
""" do we have idlc somewhere (we suppose it is made available in current path var.) ? """
return shutil.which("idlc") is not None | 83f8b158bcf97aa875280b20e177895432116d21 | 3,655,368 |
def set_metrics_file(filenames, metric_type):
"""Create metrics from data read from a file.
Args:
filenames (list of str):
Paths to files containing one json string per line (potentially base64
encoded)
metric_type (ts_mon.Metric): any class deriving from ts_mon.Metric.
For ex. ts_mon.Gau... | 372ec1fcb4b50711b35e40936e63839d75689dee | 3,655,369 |
def sortino_ratio_nb(returns, ann_factor, required_return_arr):
"""2-dim version of `sortino_ratio_1d_nb`.
`required_return_arr` should be an array of shape `returns.shape[1]`."""
result = np.empty(returns.shape[1], dtype=np.float_)
for col in range(returns.shape[1]):
result[col] = sortino_rati... | 2dfd6be1b7d3747c87484b22eb0cc0b0271c93a6 | 3,655,370 |
import re
def format_env_var(name: str, value: str) -> str:
"""
Formats environment variable value.
Formatter is chosen according to the kind of variable.
:param name: name of environment variable
:param value: value of environment variable
:return: string representation of value in appropria... | 030b16b897f2222d8465143b462f99ba344ba1eb | 3,655,371 |
from typing import Counter
def evenly_divisible(n):
""" Idea:
- Find factors of numbers 1 to n. Use DP to cache results bottom up.
- Amongst all factors, we have to include max counts of prime factors.
- For example, in in 1 .. 10, 2 has to be included 3 times since 8 = 2 ^ 3
"""
max_count... | 68301a33751c2f3863092450235ca5c24b28379e | 3,655,372 |
def gradients(vals, func, releps=1e-3, abseps=None, mineps=1e-9, reltol=1,
epsscale=0.5):
"""
Calculate the partial derivatives of a function at a set of values. The
derivatives are calculated using the central difference, using an iterative
method to check that the values converge as step... | a3dcc4e0bb9402bd2d4c6b14b37c13647200f1a8 | 3,655,373 |
def do_open(user_input):
"""identical to io.open in PY3"""
try:
with open(user_input) as f:
return f.read()
except Exception:
return None | 72037207adecb2758c844c2f0c7233d834060111 | 3,655,374 |
def likely_solution(players):
""" Return tuples of cards with the
number of players who don't have them
"""
likely = likely_solution_nums(players)
return sorted([(ALLCARDS[n], ct) for n, ct in likely],
key=lambda tp: tp[1], reverse=True) | f0531f3188a38ec1b70ca48f95c9cfdc71d723b5 | 3,655,375 |
def cns_extended_inp(mtf_infile, pdb_outfile):
"""
Create CNS iput script (.inp) to create extended PDB file
from molecular topology file (.mtf)
Parameters
----------
mtf_infile : str
Path to .mtf topology file
pdb_outfile : str
Path where extended .pdb file will be stored
... | c850137db9a22fd48559228e3032bcd510c9d69b | 3,655,376 |
def index(request, response_format='html'):
"""Sales index page"""
query = Q(status__hidden=False)
if request.GET:
if 'status' in request.GET and request.GET['status']:
query = _get_filter_query(request.GET)
else:
query = query & _get_filter_query(request.GET)
or... | afb47a5c9094c9ff125c05c3588712d1875c69f3 | 3,655,377 |
import os
def getDroppableFilename(mime_data):
"""
Returns the filename of a file dropped into the canvas (if it was
accepted via @see isDroppableMimeType).
"""
if mime_data.hasUrls():
# Return the first locally existing file
for url in mime_data.urls():
fpath = url.toL... | c49370abf2b56f1cb3ded02c5edfab121a728096 | 3,655,378 |
def team_points_leaders(num_results=None, round_name=None):
"""Returns the team points leaders across all groups, as a dictionary profile__team__name
and points.
"""
size = team_normalize_size()
if size:
entries = score_mgr.team_points_leaders(round_name=round_name)
else:
entries... | 56b72b28f74f94e428b668b785b3dbd5b0c7c378 | 3,655,379 |
def with_color(text, color, bold=False):
"""
Return a ZSH color-formatted string.
Arguments
---------
text: str
text to be colored
color: str
ZSH color code
bold: bool
whether or not to make the text bold
Returns
-------
str
string with ZSH color... | 40c194d9de76ab504a25592cfb13407cb089da0a | 3,655,380 |
def sample_test():
"""Return sample test json."""
return get_sample_json("test.json") | 9d135d4fd2f7eb52d16ff96332811e4141139a12 | 3,655,381 |
import warnings
from io import StringIO
def dataframe_from_inp(inp_path, section, additional_cols=None, quote_replace=' ', **kwargs):
"""
create a dataframe from a section of an INP file
:param inp_path:
:param section:
:param additional_cols:
:param skip_headers:
:param quote_replace:
... | 8eaefdc08c7de3991f5a85cfe5001a6dcd0aaf7b | 3,655,382 |
def compositional_stratified_splitting(dataset, perc_train):
"""Given the dataset and the percentage of data you want to extract from it, method will
apply stratified sampling where X is the dataset and Y is are the category values for each datapoint.
In the case each structure contains 2 types of atoms, th... | b57a0b7d651e6f9be4182fec8c918438dcae9b7a | 3,655,383 |
def is_inside_line_segment(x, y, x0, y0, x1, y1):
"""Return True if the (x, y) lies inside the line segment defined by
(x0, y0) and (x1, y1)."""
# Create two vectors.
v0 = np.array([ x0-x, y0-y ]).reshape((2,1))
v1 = np.array([ x1-x, y1-y ]).reshape((2,1))
# Inner product.
prod = v0.transp... | b653c542d3d573857199d90257e9e36e6c45ccdc | 3,655,384 |
def transition_soil_carbon(area_final, carbon_final, depth_final,
transition_rate, year, area_initial,
carbon_initial, depth_initial):
"""This is the formula for calculating the transition of soil carbon
.. math:: (af * cf * df) - \
\\frac{1}{(1 ... | bfbf83f201eb8b8b0be0ec6a8722e850f6084e95 | 3,655,385 |
import json
import os
def make_global_config():
"""load & augment experiment configuration, then add it to global variables"""
parser = ArgumentParser(description='Evaluate TRE model.', formatter_class=ArgumentDefaultsHelpFormatter)
# parser.add_argument('--config_path', type=str, default="1d_gauss/20200... | 3e4e1a035220ae216b1beb22a27764c833e98566 | 3,655,386 |
import sys
def dijkstra(graph, source):
"""Find the shortest path from the source node to every other node in the given graph"""
# Declare and initialize result, unvisited, and path
result = {i: sys.maxsize if i != source else 0 for i in graph.nodes} # placeholder, by default set distance to maxsize
... | 4c3fda4922795b8a47e7b94bf3a09016f5eb2551 | 3,655,387 |
def _snr_approx(array, source_xy, fwhm, centery, centerx):
"""
array - frame convolved with top hat kernel
"""
sourcex, sourcey = source_xy
rad = dist(centery, centerx, sourcey, sourcex)
ind_aper = draw.circle(sourcey, sourcex, fwhm/2.)
# noise : STDDEV in convolved array of 1px wide annulus... | 6f055444163c03d0bcc61107db2045b968f06b52 | 3,655,388 |
def create_pipeline(training_set, validation_set, test_set):
"""
Create a pipeline for the training, validation and testing set
Parameters: training_set: Training data set
validation_set: Validation data set
test_set: Test data set
Returns: bat... | a6af6ff83180a0a11bfc3bacefd6a2e2261aaeed | 3,655,389 |
from typing import Tuple
from typing import Any
def invalid_request() -> Tuple[Any, int]:
"""Invalid request API response."""
return jsonify({API.Response.KEY_INFO: API.Response.VAL_INVALID_REQUEST}), 400 | 76a81f8c85014822f4fa306c917a06d92a89ea70 | 3,655,390 |
from pathlib import Path
from typing import List
from typing import Dict
from typing import Any
def _add_hyperparameters(
ranges_path: Path, defaults_path: Path
) -> List[Dict[str, Any]]:
"""Returns a list of hyperparameters in a format that is compatible with the json
reader of the ConfigSpace API.
... | 9609d3f31ffaee69148360966b1040f1970399b3 | 3,655,391 |
def setup(app):
"""Setup extension."""
app.add_domain(StuffDomain)
app.connect("builder-inited", generate_latex_preamble)
app.connect("config-inited", init_numfig_format)
app.add_css_file("stuff.css")
app.add_enumerable_node(
StuffNode,
"stuff",
html=(html_visit_stuff... | 3c7a5d36c835e7339876cdf88673d79e5f76b590 | 3,655,392 |
from pathlib import Path
import typing
def has_checksum(path: Path, csum: str,
csum_fun: typing.Optional[Checksum] = None) -> bool:
"""
:return: True if the file at the path `path` has given checksum
"""
return get_checksum(path, csum_fun=csum_fun) == csum | e9bed6e0d82745113412e6dace5869aa32aa4fc9 | 3,655,393 |
import numpy as np
def remove_outliers(column):
"""
:param column: list of numbers
:return:
"""
if len(column) < 1:
return []
clean_column = []
q1 = np.percentile(column, 25)
q3 = np.percentile(column, 75)
#k = 1.5
k = 2
# [Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
lowe... | 04c1e736e27ffeaef528f25fd303d0f27c3a94ac | 3,655,394 |
import json
from datetime import datetime
import pytz
def lambda_handler(event, context):
"""
Lambda entry-point
"""
news_link = "https://news.ok.ubc.ca/feed/"
news_items = []
filtered_news_items = []
response_items = get_news_items_from_web(news_link)
if len(response_items) == 0:
... | cd188a0179ee74335202b865edb8b5216dbf50b8 | 3,655,395 |
def read_photons(photonfile, ra0, dec0, tranges, radius, verbose=0,
colnames=['t', 'x', 'y', 'xa', 'ya', 'q', 'xi', 'eta', 'ra',
'dec', 'flags']):
"""
Read a photon list file and return a python dict() with the expected
format.
:param photonfile: Name of ... | c83958f8ae541e5df564c5ce53dd40593c9dfc3e | 3,655,396 |
def normElec(surf, electrode, normdist, NaN_as_zeros=True):
"""
Notes
-----
When `normway` is a scalar, it takes the normal of the points of the mesh which are closer
than `normway`. However, some points have a normal of (0, 0, 0) (default assigned
if the vertex does not belong to any triangle).... | d449f4518c589a2a68b64ca812d964cb6249694e | 3,655,397 |
def filter_sources(sources, release):
"""Check if a source has already been consumed. If has not then add it to
sources dict.
"""
source, version, dist, arch = parse_release(release)
if source not in sources.keys():
sources[source] = {version: {dist: [arch]}}
return True
elif ver... | 661d379291170a4994c0813d24820007e47bd092 | 3,655,398 |
from typing import Union
from typing import Dict
from typing import Any
async def train(model, *args: Union[BaseSource, Record, Dict[str, Any]]):
"""
Train a machine learning model.
Provide records to the model to train it. The model should be already
instantiated.
Parameters
----------
... | 9a8e1648247a8eb3c8354c324ac2c48a52617899 | 3,655,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.