content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def str_to_bool(v):
"""
:type v: str
"""
return v.lower() in ("true", "1") | 3,600 |
def home_event_manager():
"""
Route for alumni's home
:return:
"""
if "idUsers" in session and session["UserTypes_idUserTypes"] == 2:
return redirect("/events")
else:
session.clear()
return redirect("/login") | 3,601 |
def define_url_service(settings_dict) -> str:
"""Define the url service for the client.
It prioritizes ENV variable over settings module"""
url = os.environ.get(defaults.SERVICE_URL_ENV)
if url:
return url
else:
return settings_dict.get("WORKFLOW_SERVICE", defaults.SERVICE_URL) | 3,602 |
def make_analytics_slices(
record: Mapping[str, Any], key_value_map: Mapping[str, Any], start_date: str, end_date: str = None
) -> Iterable[Mapping[str, Any]]:
"""
We drive the ability to directly pass the prepared parameters inside the stream_slice.
The output of this method is ready slices for analyti... | 3,603 |
def test_check_file_relevance_and_format_path_ignored_non_pack_files(input_file_path):
"""
Given
- file path to validate
When
- file is not in Packs directory
Then
- return None, file is ignored
"""
validator_obj = ValidateManager(is_external_repo=True, chec... | 3,604 |
def md5_encode(text):
""" 把數據 md5 化 """
md5 = hashlib.md5()
md5.update(text.encode('utf-8'))
encodedStr = md5.hexdigest().upper()
return encodedStr | 3,605 |
def _ngrams(segment, n):
"""Extracts n-grams from an input segment.
Parameters
----------
segment: list
Text segment from which n-grams will be extracted.
n: int
Order of n-gram.
Returns
-------
ngram_counts: Counter
Contain all the nth n-grams in segment with a... | 3,606 |
def process_file_scanexpr (container, filename, data):
"""
Process a single file
:param container: str, path and filename of container if the file is within
a zip archive, None otherwise.
:param filename: str, path and filename of file on disk, or within the container.
:param data: bytes, conte... | 3,607 |
def p_ProtocolDefn(p):
"""ProtocolDefn : OptionalSendSemanticsQual PROTOCOL ID '{' ProtocolBody '}' ';'"""
protocol = p[5]
protocol.loc = locFromTok(p, 2)
protocol.name = p[3]
protocol.sendSemantics = p[1]
p[0] = protocol
if Parser.current.type == 'header':
_error(protocol.loc, 'can... | 3,608 |
def batch(ctx, batches, batches_files, tuning_search_dict, tuning_search_file, no_wait, list_contexts, list_output_dirs, list_inputs, runner, local_concurrency, lsf_threads, lsf_memory, lsf_queue, lsf_fast_queue, lsf_resources, lsf_priority, action_on_existing, action_on_pending, prefix_outputs_path, forwarded_args):
... | 3,609 |
def build_varint(val):
"""Build a protobuf varint for the given value"""
data = []
while val > 127:
data.append((val & 127) | 128)
val >>= 7
data.append(val)
return bytes(data) | 3,610 |
def show_user_workspace():
"""Shows the path of the user's workspace."""
click.echo(f'current workspace: {dict_workspace["active_workspace"]}') | 3,611 |
def save_plot_values(temp_arrays, temp_names, out_dir, isParallel=True, saveInTextFormat=True, isDebug=True):
""" Saves arrays provided in the list in npy format """
# Return if not master process
# if isParallel:
# if not du.is_master_proc():
# return
for i in range(len(te... | 3,612 |
def _register(plugin_identity, type_identity):
"""
Registers a plug-in as a specific plug-in type.
This registers the plug-in here in the plug-ins module, and then calls the
register function of the plug-in type plug-in in case that plug-in wants to
do additional work when registering a new plug-in.
:param plugi... | 3,613 |
def python_safe_name(s):
"""
Return a name derived from string `s` safe to use as a Python function name.
For example:
>>> s = "not `\\a /`good` -safe name ??"
>>> assert python_safe_name(s) == 'not_good_safe_name'
"""
no_punctuation = re.compile(r'[\W_]', re.MULTILINE).sub
s = s.lower(... | 3,614 |
def hash_bytes(hash_type: SupportedHashes, bytes_param: bytes) -> bytes:
"""Hash arbitrary bytes using a supported algo of your choice.
Args:
hash_type: SupportedHashes enum type
bytes_param: bytes to be hashed
Returns:
hashed bytes
"""
hasher = get_hash_obj(hash_type)
ha... | 3,615 |
def analyze(osi, num_inc=1, dt=None, dt_min=None, dt_max=None, jd=None):
"""
Performs an analysis step.
Returns 0 if successful, and <0 if fail
Parameters
----------
osi
num_inc
dt
dt_min
dt_max
jd
Returns
-------
"""
op_type = 'analyze'
if dt is None:... | 3,616 |
def load_class(path: str) -> Any:
"""
Load a class at the provided location. Path is a string of the form: path.to.module.class and conform to the python
import conventions.
:param path: string pointing to the class to load
:return: the requested class object
"""
try:
log.info('load... | 3,617 |
def dcg_at_k(r, k, method=0):
"""Score is discounted cumulative gain (dcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
# >>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
# >>> dcg_... | 3,618 |
def main():
"""
Boggle Game - print out the words in sequences of adjacent letters.
"""
start = time.time()
####################
boggle_dict = {}
# User input 4 rows of letters 4*4
row_1 = input('1 row of letters: ')
if len(row_1) > 7:
print("Illegal Input.")
else:
row_1 = row_1.lower()
boggle_dict['0']... | 3,619 |
def metric_section(data_model, metric, level) -> str:
"""Return the metric as Markdown section."""
markdown = markdown_header(metric["name"], level=level, index=True)
markdown += markdown_paragraph(metric["description"])
markdown += definition_list("Default target", metric_target(metric))
markdown +... | 3,620 |
def make_argument_parser():
"""
Creates an ArgumentParser to read the options for this script from
sys.argv
"""
parser = argparse.ArgumentParser()
parser.add_argument("nifti", help="Nifti file to be processed.")
parser.add_argument("--out", default=None, help="Output pickle file of roi dict.... | 3,621 |
def get_pkgs(rpmdir):
"""scan a dir of rpms and generate a pkgs structure. first try parsing
the filename. if that fails, try parsing the rpm headers.
"""
pkgs = {}
"""
pkgs structure:
* pkgs is a dict of package name, rpmblob list pairs:
pkgs = {name:[rpmblob,rpmblob...], name:[rpmblob,rpmblob...... | 3,622 |
def MAKEFOURCC(ch0: str, ch1: str, ch2: str, ch3: str) -> int:
"""Implementation of Window's `MAKEFOURCC`.
This is simply just returning the bytes of the joined characters.
`MAKEFOURCC(*"DX10")` can also be implemented by `Bytes(b"DX10")`.
Args:
ch0 (str): First char
ch1 (str): Second ... | 3,623 |
def upload(serial_port, path, eeprom=False, run=True, gpio_pin=-1, progress=do_nothing, terminal=False):
"""Upload file on given serial port and call the progress handler when done.
Arguments:
serial_port -- Serial port name in a PySerial compatible format, eg: /dev/ttyUSB0
path -- File path to Propell... | 3,624 |
def sort_configs(configs): # pylint: disable=R0912
"""Sort configs by global/package/node, then by package name, then by node name
Attributes:
configs (list): List of config dicts
"""
result = []
# Find all unique keys and sort alphabetically
_keys = []
for config in configs:
... | 3,625 |
def read_user_config():
"""Returns keys in lowercase of xlwings.conf in the user's home directory"""
config = {}
if Path(xlwings.USER_CONFIG_FILE).is_file():
with open(xlwings.USER_CONFIG_FILE, "r") as f:
for line in f:
values = re.findall(r'"[^"]*"', line)
... | 3,626 |
def MakeTableData(
visible_results, starred_items, lower_columns, lower_group_by,
users_by_id, cell_factories, id_accessor, related_issues, config,
context_for_all_issues=None):
"""Return a list of list row objects for display by EZT.
Args:
visible_results: list of artifacts to display on one pagin... | 3,627 |
def test_atomic_hex_binary_min_length_nistxml_sv_iv_atomic_hex_binary_min_length_1_1(mode, save_output, output_format):
"""
Type atomic/hexBinary is restricted by facet minLength with value 1.
"""
assert_bindings(
schema="nistData/atomic/hexBinary/Schema+Instance/NISTSchema-SV-IV-atomic-hexBinar... | 3,628 |
def _add_student_submit(behave_sensibly):
"""Allow addition of new students
Handle both "good" and "bad" versions (to keep code DRY)
"""
try:
if behave_sensibly:
do_add_student_good(
first_name=request.forms.first_name,
last_name=request.forms.last_n... | 3,629 |
def test_py2dict():
"""Test UNTL Elements are converted to a dictionary."""
title = us.Title(qualifier='serialtitle', content='The Bronco')
name = us.Name(content='Case, J.')
creator = us.Creator(qualifier='aut')
creator.add_child(name)
elements = us.Metadata()
elements.add_child(title)
... | 3,630 |
def test_stockwell_api():
"""Test stockwell functions"""
epochs = Epochs(raw, events, # XXX pick 2 has epochs of zeros.
event_id, tmin, tmax, picks=[0, 1, 3], baseline=(None, 0))
for fmin, fmax in [(None, 50), (5, 50), (5, None)]:
with warnings.catch_warnings(record=True): # ze... | 3,631 |
def _get_table_reference(self, table_id):
"""Constructs a TableReference.
Args:
table_id (str): The ID of the table.
Returns:
google.cloud.bigquery.table.TableReference:
A table reference for a table in this dataset.
"""
return TableReference(self, table_id) | 3,632 |
def request_item_js( request ):
""" Returns modified javascript file for development.
Hit by a `dev_josiah_request_item.js` url; production hits the apache-served js file. """
js_unicode = u''
current_directory = os.path.dirname(os.path.abspath(__file__))
js_path = u'%s/lib/josiah_request_item.j... | 3,633 |
def get_present_types(robots):
"""Get unique set of types present in given list"""
return {type_char for robot in robots for type_char in robot.type_chars} | 3,634 |
def _deserialize_row(params, mask):
"""
This is for stochastic vectors where some elements are forced to zero.
Such a vector is defined by a number of parameters equal to
the length of the vector minus one and minus the number of elements
forced to zero.
@param params: an array of statistical pa... | 3,635 |
def ruru_old_log_checker(s):
"""
古いログ形式ならTrue、そうでないならFalseを返す
:param s:
:return:
"""
time_data_regex = r'[0-9]{4}\/[0-9]{2}\/[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}'
# るる鯖新ログ形式なら1つ目のdiv:d12150で時刻が取得可能。そうでないなら取得不可
time_data = re.search(time_data_regex, str(s.find('div', class_='d12150')))
... | 3,636 |
def read(file_name, code='utf-8'):
"""
async generator to read file in with special delimeter
:param file_name: the way to the file
:param code: encoding of file (utf-8)
:return: generator with all parts of file
"""
with open(file_name, 'r', encoding=code) as file:
for line in file:
... | 3,637 |
def get_user_map_best(beatmap_id, user, enabled_mods=0):
"""
gets users best play on map
:param beatmap_id: beatmap id
:param user: username
:param enabled_mods: mods used
:return: list of plays
"""
response = OSU_API.get('/get_scores', {"b": beatmap_id, "u": user, "mods": enabled_mods}... | 3,638 |
def preprocess(sc,inputDir,file_format,outputDir):
"""
this method just reads the offer file and creates vertexrdd and edgerdd required for graphx
vertexrdd will be node uri and type
edgesrdd will be node a,node b,edge type
:param inputDir:
:param file_format:
:return:
"""
fileUtil =... | 3,639 |
def scale_z_by_atom(z, scale, copy=True):
"""
Parameters
----------
z_ : array, shape (n_trials, n_atoms, n_times - n_times_atom + 1)
Can also be a list of n_trials LIL-sparse matrix of shape
(n_atoms, n_times - n_times_atom + 1)
The sparse activation matrix.
scale : arra... | 3,640 |
def test_handle_time_limits(generate_workchain_base, generate_remote_data, generate_retrieved_data):
"""Test `FleurBaseWorkChain._handle_time_limits`."""
from aiida.common import LinkType
process = generate_workchain_base(exit_code=FleurCalculation.exit_codes.ERROR_TIME_LIMIT)
process.setup()
proce... | 3,641 |
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
if you need a simple wiki simply replace the two lines below with:
return auth.wiki()
"""
if auth.is_logged_in():
# # if newly registered us... | 3,642 |
def test_ct_i026_ct_i026_v(mode, save_output, output_format):
"""
TEST :Syntax Checking for top level complexType Declaration : schema
with finalDefault = 'restriction extension' and final='restriction' ,
derived complexType by extension
"""
assert_bindings(
schema="msData/complexType/ct... | 3,643 |
def compute_classification_metrics_at_ks(is_match, num_predictions, num_trgs, k_list=[5,10], meng_rui_precision=False):
"""
:param is_match: a boolean np array with size [num_predictions]
:param predicted_list:
:param true_list:
:param topk:
:return: {'precision@%d' % topk: precision_k, 'recall@... | 3,644 |
def coordinatesOfPosition(shape, distance):
"""Compute the point at a given distance from the beginning of a shape.
The shape is a list of points. A point is a sequence of two floats.
The returned point is the x- and y-coordinate of the point that has
the given distance along the line of the shape... | 3,645 |
def compute_dose_median_scores(null_dist_medians, dose_list):
"""
Align median scores per dose, this function return a dictionary,
with keys as dose numbers and values as all median scores for each dose
"""
median_scores_per_dose = {}
for dose in dose_list:
median_list = []
for ... | 3,646 |
def generate_converter(name, taskdep, **options) :
"""
taskdep 是执行该程序之前应该执行的任务
task_html_generator 表示的是能够生成html的任务,我们需要从这个任务中提取result
taskname是生成的任务名
"""
converter = options.get('converter',
Pandoc("-f", "html", "-t", "markdown", "--wrap=none"))
flowdep = options.g... | 3,647 |
def log_to_tensorboard(writer, step, prefix, loss):
"""
Log metrics to Tensorboard.
"""
log_generic_to_tensorboard(writer, step, prefix, "loss", loss) | 3,648 |
def save_model_all(model, save_dir, model_name, epoch):
"""
:param model:
:param save_dir:
:param model_name:
:param epoch:
:return:
"""
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
save_prefix = os.path.join(save_dir, model_name)
save_path = '{}_epoch_{}.pt'.for... | 3,649 |
def symmetric_product(tensor):
"""
Symmetric outer product of tensor
"""
shape = tensor.size()
idx = list(range(len(shape)))
idx[-1], idx[-2] = idx[-2], idx[-1]
return 0.5 * (tensor + tensor.permute(*idx)) | 3,650 |
def prep_image(img, inp_dim):
"""
Function:
Prepare image for inputting to the neural network.
Arguments:
img -- image it self
inp_dim -- dimension for resize the image (input dimension)
Return:
img -- image after preparing
"""
img = (letterbo... | 3,651 |
def display_similarity_matches(img, segm, patchSize, nbBins, classifier, axis_in_degrees=None):
"""Display the map of similar and non similar matches over the original image thanks to respectively green and red
circles.
# Arguments :
im: The image whose textures symmetry has been evaluate... | 3,652 |
async def auth_check(request):
"""
No-op view to set the session cookie, this is used by websocket since the "Set-Cookie" header
doesn't work with 101 upgrade
"""
return json_response(status='ok') | 3,653 |
def check_constraint(term_freq,top_terms,top_terms_test_freq):
"""
Check the constraint 12%-30% for the test set
term_freq is the dictionnary of all term frequencies
top_terms is the list of terms we care about (first 300?)
top_terms_freq is an array of frequency of top terms in test set.
RETURN... | 3,654 |
def create_LSTM_model(patient_idx, time_steps, save_model=False, plot_loss=False):
"""
Trains an LSTM model over a patient
@param patient_idx: number
@param time_steps: number of concatenated heartbeats per datapoint
@param save_model: whether to save the model to h5 file
@param plot_loss: wheth... | 3,655 |
def get_supported_providers() -> list[str]:
"""
Return the list of supported discussion providers
TODO: Load this from entry points?
"""
providers = [
'legacy',
'piazza',
]
return providers | 3,656 |
def predict_split(history, prediction_length=7*24, hyperparameters={}):
"""
This function predicts a time series of gas prices by splitting it into a
tren and a residual and then applying a feature pipeline and predicting
each of them individually.
Keyword arguments:
history -- the time series ... | 3,657 |
def SFRfromLFIR(LFIR):
"""
Kennicut 1998
To get Star formation rate from LFIR (8-1000um)
LFIR in erg s-1
SFR in Msun /year
"""
SFR = 4.5E-44 * LFIR
return SFR | 3,658 |
def test_valid_remote_your_discr_1(control, valid_data, mocker): # noqa: F811
"""Inject a valid packet and monitor the log"""
valid_data['your_discr'] = control.sessions[0].local_discr
packet = bitstring.pack(PACKET_FORMAT, **valid_data)
mocker.patch('aiobfd.control.log')
control.process_packet(pac... | 3,659 |
def save_element_as_file(element, filename, height, width):
"""
Saves any element as an image file. Element needs to have an underlyiong Widget available (almost if not all of them do)
:param element: The element to save
:param filename: The filename to save to. The extension of the filename determines... | 3,660 |
def inc_group_layers(n_list, d_list, c_list):
"""
Helper function for inc_tmm. Groups and sorts layer information.
See coh_tmm for definitions of n_list, d_list.
c_list is "coherency list". Each entry should be 'i' for incoherent or 'c'
for 'coherent'.
A "stack" is a group of one or more cons... | 3,661 |
def get_heater_device_json():
""" returns information about the heater in json """
return '{\n "state" : "' + _pretty_state_identifier(brew_logic.heater_state) + '",\n "overridden" : "' + str(brew_logic.heater_override).lower() + '"\n }' | 3,662 |
def rfc_deploy():
"""This function trains a Random Forest classifier and outputs the
out-of-sample performance from the validation and test sets
"""
df = pd.DataFrame()
for pair in pairs:
# retrieving the data and preparing the features
dataset = gen_feat(pair)
dataset.drop(['Open', 'High', '... | 3,663 |
def test_log_two_tasks():
""" Test tailing a single file on two separate tasks """
returncode, stdout, stderr = exec_command(
['dcos', 'task', 'log', 'test-app'])
assert returncode == 0
assert stderr == b''
lines = stdout.decode('utf-8').split('\n')
assert len(lines) == 11
assert r... | 3,664 |
def get_block(block_name):
"""Get block from BLOCK_REGISTRY based on block_name."""
if not block_name in BLOCK_REGISTRY:
raise Exception(NO_BLOCK_ERR.format(
block_name, BLOCK_REGISTRY.keys()))
block = BLOCK_REGISTRY[block_name]
return block | 3,665 |
def task4_a2():
"""
Write a program to copy its input to its output,
replacing each string of one or more blanks by a single blank.
"""
s = input('Enter your input: ')
print(' '.join(s.split())) | 3,666 |
def _determine_role_name(var_file: Path) -> str:
"""
Lookup role name from directory or galaxy_info.
"""
if var_file.is_file():
role_path: Path = var_file.parent / ".."
name = str(role_path.resolve().name)
meta_path: Path = role_path / 'meta' / 'main.yml'
if (meta_path.is... | 3,667 |
def load_table(file_path, metadata_ext='.pklmetadata'):
"""
Loads a pickled DataFrame from a file along with its metadata.
This function loads a DataFrame from a file stored in pickle format.
Further, this function looks for a metadata file with the same file name
but with an extension given by th... | 3,668 |
def main() -> int:
"""Ensure runtime environment is ready, and start the server."""
app.utils.setup_runtime_environment()
for safety_check in (
app.utils.ensure_supported_platform, # linux only at the moment
app.utils.ensure_local_services_are_running, # mysql (if local)
app.utils... | 3,669 |
def coco17_category_info(with_background=True):
"""
Get class id to category id map and category id
to category name map of COCO2017 dataset
Args:
with_background (bool, default True):
whether load background as class 0.
"""
clsid2catid = {
1: 1,
2: 2,
... | 3,670 |
def test_app_version(testing_defaults):
"""Test app_version default"""
assert testing_defaults.info["app_version"] == '4.2.6' | 3,671 |
def _parse_parameters(paramdoc):
"""Parse parameters and return list of (name, full_doc_string)
It is needed to remove multiple entries for the same parameter
like it could be with adding parameters from the parent class
It assumes that previously parameters were unwrapped, so their
documentation ... | 3,672 |
def example_miller_set(example_crystal):
"""Generate an example miller set."""
ms = miller.set(
crystal_symmetry=example_crystal.get_crystal_symmetry(),
indices=flex.miller_index([(1, 1, 1)] * 8 + [(2, 2, 2)]),
anomalous_flag=False,
)
return ms | 3,673 |
async def get(req):
"""
Get a complete analysis document.
"""
db = req.app["db"]
analysis_id = req.match_info["analysis_id"]
document = await db.analyses.find_one(analysis_id)
if document is None:
return not_found()
sample = await db.samples.find_one({"_id": document["sample... | 3,674 |
def get_cube_point_indexes(cube: xr.Dataset,
points: Union[xr.Dataset, pd.DataFrame, Mapping[str, Any]],
dim_name_mapping: Mapping[str, str] = None,
index_name_pattern: str = DEFAULT_INDEX_NAME_PATTERN,
index_dty... | 3,675 |
def auto_add():
"""
自动添加
1 查找所有amis文件
2 更新记录
3 记录按照app组织,生成dict
4 为每个app生成auto_urls.py
:return:
"""
amis_json_file_list = get_amis_files()
cnt = update_rcd(amis_json_file_list)
aml_app_dict = get_rcd_by_app_name()
add_needed_auto_urls(aml_app_dict)
add_urls_needed(... | 3,676 |
def _get_configs(cli_args: CLIArgs, project_root: Path) -> Configs:
"""
Deal with extra configs for 3rd party tool.
Parameters
----------
cli_args
Commandline arguments passed to nbqa
project_root
Root of repository, where .git / .hg / .nbqa.ini file is.
Returns
-------... | 3,677 |
def run_gx_test(dataset_path, output_dir, dist_types, ex_config, mp_args):
"""
The start and end parameter together make an interval that contains the datasets to be included in this experiment
:param mp_args: the configuration of the multiprocess backend,
go to this site https://docs.aws.amazon... | 3,678 |
def main():
"""
This is main function used to call other function
:return: nothing
"""
image_path = input("Enter path of image:")
# call read function to read an image
img = Read(image_path)
# call flip function to sharp the image
sharp_image = Sharp(img)
# call show function to ... | 3,679 |
def _load_lib():
"""Load libary in build/lib."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_path = os.path.join(curr_path, '../../build/lib/')
path_to_so_file = os.path.join(lib_path, "libc_runtime_api.so")
lib = ctypes.CDLL(path_to_so_file, ctypes.RTLD_GLOBAL)
... | 3,680 |
def robust_standardize(df: pd.DataFrame, excluded_colnames: list = None) -> pd.DataFrame:
"""
Applies the RobustScaler from the module sklearn.preprocessing by
removing the median and scaling the data according to the quantile
range (IQR). This transformation is robust to outliers.
Note: In case mu... | 3,681 |
def meanStdDev( valueList, scale ):
"""Compute the mean and standard deviation of a *non-empty* list of numbers."""
numElements = len(valueList)
if numElements == 0:
return(None, 0.0)
mean = float(sum(valueList)) / numElements
variance = 0
for value in valueList:
variance += math.pow( value - mean, 2 )
var... | 3,682 |
def audience_filter(digest, audience):
"""Check whether the current audience level should include that digest."""
return get_split(
digest,
[
{
"key": "audience_{}".format(idx),
"size": 1.0
} for idx in range(0, 100)
]
) < audie... | 3,683 |
def request_specific_data2num(batch_data):
"""
input: next_batch_requestable request_specific_data[slot].
change the data into processable type for tensorflow
:param batch_data: 一个 batch 的训练数据
:return: 直接输入request-specific tracker 模型计算的数据
"""
batchsize_request = len(batch_data)
x_usr = ... | 3,684 |
def model_pred(auto_model=True, model_date=None): # 模型预测
"""
:param auto_model: 是否自动获取最新的模型,默认为True
:param model_date: 如果auto_model = False, 手动指定对应日期的模型
:return:
"""
if auto_model:
model_pred_machine = pump_model_pred(station_name, obj_type)
else:
model_pred_machine... | 3,685 |
def points_from_x0y0x1y1(xyxy):
"""
Constructs a polygon representation from a rectangle described as a list [x0, y0, x1, y1]
"""
[x0, y0, x1, y1] = xyxy
return "%s,%s %s,%s %s,%s %s,%s" % (
x0, y0,
x1, y0,
x1, y1,
x0, y1
) | 3,686 |
def get_columns(invoice_list, additional_table_columns):
"""return columns based on filters"""
columns = [
_("Invoice") + ":Link/Sales Invoice:120", _("Posting Date") + ":Date:80", _("Status") + "::80",
_("Customer") + ":Link/Customer:120", _("Sales Person") + ":Link/Sales Person:100",
_("AR Status") + "::75", ... | 3,687 |
def one_norm(a):
"""
Return the one-norm of the matrix.
References:
[0] https://www.mathworks.com/help/dsp/ref/matrix1norm.html
Arguments:
a :: ndarray(N x N) - The matrix to compute the one norm of.
Returns:
one_norm_a :: float - The one norm of a.
"""
return anp.max(anp.... | 3,688 |
def test_degree():
"""Tests for nodes of the given degree."""
os.chdir(os.path.dirname(__file__) + '/data')
proc = subprocess.Popen(['swc', 'find', 'pass_simple_branch.swc',
'-g', '2'],
stdout=subprocess.PIPE,
stderr=subpro... | 3,689 |
def remove_nan_inf(df, reindex=True):
"""
Removes all rows that have NaN, inf or -inf as a value, and then optionally
reindexes the dataframe.
Parameters
----------
df : pd.DataFrame
Dataframe to remove NaNs and Infs from.
reindex : bool, optional
Reindex the dataframe ... | 3,690 |
async def action(**kwargs):
"""
[infinity]
admin_channel_id =
admin_id =
recruiter_id =
"""
message = kwargs['message']
config = kwargs['config']
client = kwargs['client']
split_message = message.content.split()
admin_channel_id = config.getint('infinity', 'admin_channel_id'... | 3,691 |
def plot_insert_len(insert_len_filename,
settings_filename,
output_dir):
"""
Plot insert length distribution.
"""
if not os.path.isfile(settings_filename):
print "Error: settings filename %s not found." %(settings_filename)
sys.exit(1)
plot_nam... | 3,692 |
def contingency_table(seg, gt, ignore_seg=[0], ignore_gt=[0], norm=True):
"""Return the contingency table for all regions in matched segmentations.
Parameters
----------
seg : np.ndarray, int type, arbitrary shape
A candidate segmentation.
gt : np.ndarray, int type, same shape as `seg`
... | 3,693 |
def process_swissmodel(output_dir):
"""
Identify swissmodel models in a directory and copy them to output_dir
"""
proteins = [Path(i).stem for i in os.listdir('data/swissmodel') if i.endswith('.models')]
for protein in proteins:
config = pd.read_csv(f'data/swissmodel/{protein}.models', sep='... | 3,694 |
def LengthOfStayRangeAt24Hours(patient, enc):
"""Generate length of stay range labels at 24 hours after admission.
Args:
patient: patient proto, needed for label proto.
enc: encounter, caller needs to do the proper cohort filterings.
Yields:
(label_name, value, label_time) tuple.
"""
label_time ... | 3,695 |
def update_credit_status(blk_id, status):
"""Change a credit status"""
try:
database.execute("UPDATE credits SET status=%s WHERE blk_id = %s", (status, blk_id))
log.message('Changed credit status on block_id %d status to %s' % (blk_id, status))
except database.psycopg2.Error as e:
ra... | 3,696 |
def get_basic_track_info(track):
"""
Given a track object, return a dictionary of track name, artist name,
album name, track uri, and track id.
"""
# Remember that artist and album artist have different entries in the
# spotify track object.
name = track["name"]
artist = track['artists'... | 3,697 |
def demander_nombre(mini: int = None, maxi: int = None) -> int:
"""
Demande un nombre à l'utilisateur, situé entre min et max.
:param mini: le minimum
:param maxi: le maximum
:return: le nombre entrée par l'utilisateur
"""
message = 'Veuillez rentrer un nombre:'
if mini is not None and m... | 3,698 |
def ordered_load(stream, merge_duplicate_keys=False):
"""
Parse the first YAML document in a stream and produce the corresponding
Python object, using OrderedDicts instead of dicts.
If merge_duplicate_keys is True, merge the values of duplicate mapping keys
into a list, as the uWSGI "dumb" YAML par... | 3,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.