content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_depth(da, errors="raise"):
"""Get or compute the depth coordinate
If a depth variable cannot be found, it tries to compute either
from sigma-like coordinates or from layer thinknesses.
Parameters
----------
{errors}
Return
------
xarray.DataArray or None
See also
... | 5,356,100 |
def route53_scan(assets, record_value, record):
"""
Scan Route53
"""
for i, asset in enumerate(assets):
asset_type = asset.get_type()
if asset_type == 'EC2' and record_value in (asset.public_ip, asset.private_ip):
assets[i].dns_record = record['Name'].replace('\\052', '*')
... | 5,356,101 |
def process_language(text):
"""
Fetch from language processing API (cloud function)
:param text:
:return:
"""
# The language processing seems to fail without acsii decoding, ie remove emoji and chinese characters
request = {
"text": text.encode("ascii", errors="ignore").decode()
... | 5,356,102 |
def main() -> None:
"""
Plot the total burn and creep together with the creep categories for a certain sprint.
"""
root_path = Path(__file__).parents[1].resolve()
charts_dir = root_path.joinpath("charts")
sheet_dir = root_path.joinpath("data")
sprint_tasks_path = sheet_dir.joinpath("sprint_t... | 5,356,103 |
def to_int(text):
"""Text to integer."""
try:
return int(text)
except ValueError:
return '' | 5,356,104 |
def random_sample_datamap(datamap, size):
"""
returns a generator that samples from a datamap along the first dimension
"""
total_size = len(datamap.values()[0])
for v in datamap.values():
assert len(v) == total_size
while True:
res = {k: [] for k in datamap}
for _ in ran... | 5,356,105 |
def get_speakerproposal_picture_upload_path():
"""
Must exist because it is mentioned in old migrations.
Can be removed when we clean up old migrations at some point
"""
pass | 5,356,106 |
def render_items(s_items: dict, all_items: Dict[str, Dict[str, str]], ostream: StringIO):
"""
args:
s_items (dict): the dictionary of capabilities belonging to the sample.
all_items (dict): the dictionary of all the reference capabilities (e.g. ATT&CK or MBC).
ostream (StringIO): the out... | 5,356,107 |
def parse_commonsense_reasoning_test(test_data_name):
"""Read JSON test data."""
with tf.gfile.Open(os.path.join(
FLAGS.data_dir, 'commonsense_test',
'{}.json'.format(test_data_name)), 'r') as f:
data = json.load(f)
question_ids = [d['question_id'] for d in data]
sentenc... | 5,356,108 |
def flooding(loss, b):
"""flooding loss
"""
return (loss - b).abs() + b | 5,356,109 |
def CreateConditions(p,avec,bvec,indexgenerator=CreateLyndonIndices):
"""This creates the set of equations using by default the Lyndon Basis elements.
Parameters
----------
p : the considered order
avec: The set of symbols to use for the first operator.
bvec: The set of symbols to use for the s... | 5,356,110 |
def with_conf_blddir(conf, name, body, func):
"""'Context manager' to execute a series of tasks into code-specific build
directory.
func must be a callable taking no arguments
"""
old_root, new_root = create_conf_blddir(conf, name, body)
try:
conf.bld_root = new_root
conf.bl... | 5,356,111 |
def many_to_one(clsname, **kw):
"""Use an event to build a many-to-one relationship on a class.
This makes use of the :meth:`.References._reference_table` method
to generate a full foreign key relationship to the remote table.
"""
@declared_attr
def m2o(cls):
cls._references((cls.__nam... | 5,356,112 |
def fullImport(arg):
""" Import only for design evaluation """
global evalDesignInfo
if not arg.onlyTable:
sys.path.append(os.path.join(os.getenv('CODE'),'sbml2doe'))
from stress_test_scripts.evaluateSBCDesign import evalDesignInfo | 5,356,113 |
def _get_dictionary_paths(d, main_list, tmp_list):
"""
Private method for setting the given main_list with lists containing all the key-paths of a dictionary.
For example: The key-paths of this list {a{b:{c:1, d:2}, e:3}} are a,b,c; a,b,d; a,e
:param d: the dictionary for gaining all the depth path
... | 5,356,114 |
def get_workflow_by_id(obj, pretty_print, beep,
id,
headers):
"""Returns a workflow specified by id.
"""
spinner = init_spinner(beep=beep)
start_spinner(spinner)
try:
if headers is not None:
headers = json.loads(headers)
resul... | 5,356,115 |
def get_cnx(dbname=None, write=False):
"""Return a new connection to the database by the given name.
If 'dbname' is None, return a connection to the system database.
If the database file does not exist, it will be created.
The OS-level file permissions are set in DbSaver.
"""
if dbname is None:
... | 5,356,116 |
def nir_mean(msarr,nir_band=7):
"""
Calculate the mean of the (unmasked) values of the NIR (near infrared) band
of an image array. The default `nir_band` value of 7 selects the NIR2 band
in WorldView-2 imagery. If you're working with a different type of imagery,
you will need figure out the appropri... | 5,356,117 |
def decode(invoice) -> LightningInvoice:
"""
@invoice: is a str, bolt11.
"""
client = CreateLightningClient()
try:
decode_response = client.call("decode", invoice)
assert decode_response.get("error") is None
result = decode_response["result"]
assert result["valid"], ... | 5,356,118 |
def main(args):
"""Scan through PDF and split PDF and images."""
filename = args[0]
split_path = args[1]
qr_prefix = args[2]
qr_suffix = args[3]
try:
os.chdir(split_path)
pdfPages = PdfFileReader(filename)
pdf_writer = PdfFileWriter()
i = cover_index = id_index = ... | 5,356,119 |
def get_wharton_sessionid(public=False):
""" Try to get a GSR session id. """
sessionid = request.args.get("sessionid")
cache_key = "studyspaces:gsr:sessionid"
if sessionid:
return sessionid
if public:
if db.exists(cache_key):
return db.get(cache_key).decode("utf8")
... | 5,356,120 |
def timing ( name = '' , logger = None ) :
"""Simple context manager to measure the clock counts
>>> with timing () :
... whatever action is here
at the exit it prints the clock counts
>>> with timing () as c :
... whatever action is here
at the exit it prints the clock coun... | 5,356,121 |
def plotErrorEllipse(x, y, prob=[.68, .95, .997], **kwargs):
"""Compute and plot error ellipses around the mean of a 2d distribution.
Given two arrays, ``x`` and ``y`` where the values of each are drawn
from a Gaussian distribution, but the values of ``y`` are correlated with
the values of ``x``, compu... | 5,356,122 |
def abs(x):
"""
complex-step safe version of numpy.abs function.
Parameters
----------
x : ndarray
array value to be computed on
Returns
-------
ndarray
"""
if isinstance(x, np.ndarray):
return x * np.sign(x)
elif x.real < 0.0:
return -x
return x | 5,356,123 |
def downgrade():
"""Downgrade database."""
op.drop_column("accounts_user", "preferences")
op.drop_column("accounts_user", "profile")
op.drop_constraint(
op.f("uq_accounts_user_username"), "accounts_user", type_="unique"
)
op.drop_column("accounts_user", "displayname")
op.drop_column(... | 5,356,124 |
async def add_comm_post(request):
# return json.dumps(current_id, title, link, proc_id)
"""current_id это id ветки"""
# ip = request.environ.get('REMOTE_ADDR')
data = await request.post(); ip = None
print('data->', data)
#get ip address client
peername = request.transport.get_extra_info('peername'); host=None
if... | 5,356,125 |
def reachable_from_node(node, language=None, include_aliases=True):
"""Returns a tuple of strings containing html <ul> lists of the Nodes and
pages that are children of "node" and any MetaPages associated with these
items.
:params node: node to find reachables for
:params language: if None, retur... | 5,356,126 |
def init_data():
"""
setup all kinds of constants here, just to make it cleaner :)
"""
if args.dataset=='imagenet32':
mean = (0.4811, 0.4575, 0.4078)
std = (0.2605 , 0.2533, 0.2683)
num_classes = 1000
else:
raise NotImplementedError
if args.whiten_image==0:
... | 5,356,127 |
def make_status_craft():
""" Cria alguns status de pedido de fabricação"""
if Statusfabricacao.objects.count() == 0:
status1 = Statusfabricacao(order=0, status='Pedido Criado')
status2 = Statusfabricacao(order=1, status='Maturação')
status3 = Statusfabricacao(order=2, status='Finalizaçã... | 5,356,128 |
def _call_rest_api(url, input_data, request_type):
"""Calls the other rest api's"""
try:
if request_type == 'post':
req = requests.post(url, params=input_data, json=input_data, timeout=30)
else:
req = requests.get(url, params=input_data, timeout=30)
respons... | 5,356,129 |
def compute_tso_threshold(arr, min_td=0.1, max_td=0.5, perc=10, factor=15.0):
"""
Computes the daily threshold value separating rest periods from active periods
for the TSO detection algorithm.
Parameters
----------
arr : array
Array of the absolute difference of the z-angle.
min_td... | 5,356,130 |
def test_graceful_squeezing(loss):
"""Test that reshaped raw_prediction gives same results."""
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=20,
y_bound=(-100, 100),
raw_bound=(-10, 10),
seed=42,
)
if raw_prediction.ndim == 1:
... | 5,356,131 |
def test_gen_date_14():
"""max-date must be a Datetime type."""
with pytest.raises(ValueError):
gen_datetime(
min_date=datetime.datetime.now(),
max_date='foo'
) | 5,356,132 |
def run_generator(conversation_name):
"""
Input:
conversation_name: name of conversation to analyze
Output:
username of next speaker, message for that speaker to send next
"""
state = settings.DISCORD_CONVERSATION_STATES.get(conversation_name, {})
(
next_speaker_username... | 5,356,133 |
def count_uniques(row):
"""
Count the unique values in row -1 (becase nan counts as a unique value)
"""
return len(np.unique(row)) - 1 | 5,356,134 |
def gmres_dot(X, surf_array, field_array, ind0, param, timing, kernel):
"""
It computes the matrix-vector product in the GMRES.
Arguments
----------
X : array, initial vector guess.
surf_array : array, contains the surface classes of each region on the
surface.
... | 5,356,135 |
def test_compile_3():
""" test_compile_3 """
# test for Graph mode
# has bias
context.set_context(mode=context.GRAPH_MODE)
net = Net(128, 10)
input_data = Tensor(np.random.randint(0, 255, [128, 128]).astype(np.float32))
_executor.compile(net, input_data)
# training
net_train = Net(1... | 5,356,136 |
def update_pris(traj, td_loss, indices, alpha=0.6, epsilon=1e-6, update_epi_pris=False, seq_length=None, eta=0.9):
"""
Update priorities specified in indices.
Parameters
----------
traj : Traj
td_loss : torch.Tensor
indices : torch.Tensor ot List of int
alpha : float
epsilon : float... | 5,356,137 |
def get_library_version(performer_prefix: str, schemas: Sequence[Schema]) -> str:
"""Generates the library's version string.
The version string is of the form "{performer_prefix}_{latest_creation_date}_{library_hash}".
Args:
performer_prefix: Performer prefix for context.
schemas: YAML sch... | 5,356,138 |
def revert(revision):
"""
Reverts application to selected revision.
Usage: fab prod revert:ae7b9acb96c3fea00ab855952071570279b5d978
"""
with virtualenv():
run('git checkout {}'.format(revision))
run('git submodule update') | 5,356,139 |
def test_for_user_bootcamp_run(lines_fulfilled):
"""
Test for the for_user_bootcamp_run classmethod
"""
line_fulfilled_1, _, user = lines_fulfilled
assert list(Line.for_user_bootcamp_run(user, line_fulfilled_1.bootcamp_run)) == [
line_fulfilled_1
] | 5,356,140 |
def test_raise_fe():
"""Test the raise of KeyError in fe."""
x = np.linspace(0, 1, num=5)
natoms_a = np.full(5, 8)
df = pd.DataFrame({"x": x, "natoms_a": natoms_a})
with pytest.raises(KeyError):
exma.electrochemistry.formation_energy(df, -1.0, -0.5) | 5,356,141 |
def sizeFromString(sizeStr, relativeSize):
"""
Converts from a size string to a float size.
sizeStr: The string representation of the size.
relativeSize: The size to use in case of percentages.
"""
if not sizeStr:
raise Exception("Size not specified")
dpi = 96.0
cm = 2.54
if len(sizeStr) > 2 and sizeStr[-2:... | 5,356,142 |
def plot_r2(
model: mofa_model,
x="Group",
y="Factor",
factors: Union[int, List[int], str, List[str]] = None,
groups_df: pd.DataFrame = None,
group_label: str = None,
views=None,
groups=None,
cmap="Blues",
vmin=None,
vmax=None,
**kwargs,
):
"""
Plot R2 values for ... | 5,356,143 |
def all(x: Union[ivy.Array, ivy.NativeArray],
axis: Optional[Union[int, Tuple[int], List[int]]] = None,
keepdims: bool = False)\
-> ivy.Array:
"""
Tests whether all input array elements evaluate to ``True`` along a specified axis.
.. note::
Positive infinity, negative infinit... | 5,356,144 |
def compilePy(target):
"""Compiles py files to pyc and removes all py files at the end."""
import compileall
compileall.compile_dir(target, force=True, legacy=True)
os.system(f'find "{target}" -name "*.py" -type f -delete') | 5,356,145 |
def verify_time_format(time_str):
"""
This method is to verify time str format, which is in the format of 'hour:minute', both can be either one or two
characters.
Hour must be greater or equal 0 and smaller than 24, minute must be greater or equal 0 and smaller than 60
:param time_str: time s... | 5,356,146 |
def TestTags(client, get_fn, add_fn, delete_fn, *args):
""" Tests whether tagging works.
@type client: C{GanetiRapiClientWrapper}
@param client: The client wrapper.
@type get_fn: function
@param get_fn: A Get*Tags function of the client.
@type add_fn: function
@param add_fn: An Add*Tags function of the c... | 5,356,147 |
def test_export_overwrite(tmpdir, data):
"""Overwrites existing file"""
inputfile = str(data.join('RGB.byte.tif'))
output = tmpdir.join('export.mbtiles')
output.write("lolwut")
outputfile = str(output)
runner = CliRunner()
result = runner.invoke(main_group, ['mbtiles', '--overwrite', inputfi... | 5,356,148 |
def extract_region_df(region_code="11"):
"""
Extracts dataframes that describes regional-level vaccines data for a single region, making some analysis on it.
:rtype: Dataframe
"""
df = RAW_DF
df = df.loc[df['codice_regione_ISTAT'] == region_code]
df = df.sort_values('data_somministrazione... | 5,356,149 |
def get_title(offer_markup):
""" Searches for offer title on offer page
:param offer_markup: Class "offerbody" from offer page markup
:type offer_markup: str
:return: Title of offer
:rtype: str, None
"""
html_parser = BeautifulSoup(offer_markup, "html.parser")
return html_parser.h1.text... | 5,356,150 |
def genome(request):
"""Create a test genome and location"""
name = "ce10" # Use fake name for blacklist test
fafile = "tests/data/small_genome.fa.gz"
genomes_dir = os.path.join(os.getcwd(), ".genomepy_plugin_tests")
if os.path.exists(genomes_dir):
shutil.rmtree(genomes_dir)
genome_dir... | 5,356,151 |
def get_twinboundary_shear_structure(twinboundary_relax_structure,
shear_strain_ratio,
previous_relax_structure=None,
**additional_relax_structures,
):
"""
If lates... | 5,356,152 |
def search(source_num, bin_path, chrome_args):
"""
Scrape proxies from the web
"""
chrome_args = chrome_args.split(',')
_args = []
for arg in chrome_args:
if len(arg) > 0:
if not arg.startswith('--'):
arg = '--{}'.format(arg)
_args.append(arg)
... | 5,356,153 |
def _map_spectrum_weight(map, spectrum=None):
"""Weight a map with a spectrum.
This requires map to have an "energy" axis.
The weights are normalised so that they sum to 1.
The mean and unit of the output image is the same as of the input cube.
At the moment this is used to get a weighted exposure... | 5,356,154 |
def fetch_all_db_as_df(allow_cached=False):
"""Converts list of dicts returned by `fetch_all_db` to DataFrame with ID removed
Actual job is done in `_worker`. When `allow_cached`, attempt to retrieve timed cached from
`_fetch_all_db_as_df_cache`; ignore cache and call `_work` if cache expires or `allow_cach... | 5,356,155 |
def tool_proxy_from_persistent_representation(persisted_tool, strict_cwl_validation=True, tool_directory=None):
"""Load a ToolProxy from a previously persisted representation."""
ensure_cwltool_available()
return ToolProxy.from_persistent_representation(
persisted_tool, strict_cwl_validation=strict_... | 5,356,156 |
def has_space_element(source):
"""
判断对象中的元素,如果存在 None 或空字符串,则返回 True, 否则返回 False, 支持字典、列表和元组
:param:
* source: (list, set, dict) 需要检查的对象
:return:
* result: (bool) 存在 None 或空字符串或空格字符串返回 True, 否则返回 False
举例如下::
print('--- has_space_element demo---')
print(has_space_... | 5,356,157 |
def return_latest_psm_is(df, id_col, file_col, instr_col, psm_col):
""" Extracts info on PSM number, search ID and Instrument from the last row in DB
"""
last_row = df.iloc[-1]
search_id = last_row[id_col]
instr = last_row[instr_col]
psm = last_row[psm_col]
psm_string = str(psm) + ' P... | 5,356,158 |
async def check_find_settings(settings_collection: SettingsCollection, test_data: dict):
"""
Check that the find settings in the database collection returns the expected result.
:param settings_collection: MongoDB collection.
:type settings_collection: SettingsCollection
:param test_data: Database ... | 5,356,159 |
def add_parser_arguments_misc(parser):
"""
Adds the options that the command line parser will search for, some miscellaneous parameters, like use of gpu,
timing, etc.
:param parser: the argument parser
:return: the same parser, but with the added options.
"""
parser.add_argument('--use_gpu',... | 5,356,160 |
def colo_model_tensor_clone(t: Union[StatefulTensor, torch.Tensor], target_device: torch.device) -> torch.Tensor:
"""
Clone a model data tensor
Args:
t (Union[StatefulTensor, torch.Tensor]): a model data tensor
target_device (torch.device): the target device
Returns:
torch.Tensor... | 5,356,161 |
def plugin_init(config):
"""Registers HTTP Listener handler to accept sensor readings
Args:
config: JSON configuration document for the South device configuration category
Returns:
handle: JSON object to be used in future calls to the plugin
Raises:
"""
handle = config
retur... | 5,356,162 |
def search(query,page):
"""Scrapes the search query page and returns the results in json format.
Parameters
------------
query: The query you want to search for.
page: The page number for which you want the results.
Every page returns 11 results.
... | 5,356,163 |
def get_jobs(job_filename):
"""Reads jobs from a known job file location
"""
jobs = list()
if job_filename and os.path.isfile(job_filename):
with open(job_filename, 'r') as input_fd:
data = input_fd.read()
job_dict = json.loads(data)
del data
for job in jo... | 5,356,164 |
def split_train_eval_data(origin_file: str, train_file: str,
eval_file: str, fraction: float = .2) -> None:
"""
从原始手动标注数据中分离训练和测试集
:param origin_file: 原始数据路径
:param train_file: 保存的训练数据文件路径
:param eval_file: 保存的测试文件路径
:param fraction: 分离的测试数据比例,默认总体的20%
:return:
... | 5,356,165 |
def quote_with_backticks_definer(definer):
"""Quote the given definer clause with backticks.
This functions quotes the given definer clause with backticks, converting
backticks (`) in the string with the correct escape sequence (``).
definer[in] definer clause to quote.
Returns string with th... | 5,356,166 |
def cvimg_to_b64(img):
"""
图片转换函数,将二进制图片转换为base64加密格式
"""
try:
image = cv2.imencode('.jpg', img)[1] #将图片格式转换(编码)成流数据,赋值到内存缓存中
base64_data = str(base64.b64encode(image))[2:-1] #将图片加密成base64格式的数据
return base64_data #返回加密后的结果
except Exception as e:
return "error" | 5,356,167 |
def draw_label(label, img, n_class, label_titles, bg_label=0):
"""Convert label to rgb with label titles.
@param label_title: label title for each labels.
@type label_title: dict
"""
from PIL import Image
from scipy.misc import fromimage
from skimage.color import label2rgb
from skimage.... | 5,356,168 |
def inspect(template_dir, display_type=None):
"""Generates a some string representation of all undefined variables
in templates.
Args:
template_dir (str): all files within are treated as templates
display_type (str): tabulate.tabulate tablefmt or 'terse'.
Examples:
Yields an ov... | 5,356,169 |
def calc_fitness_all(chromosomes, video_list, video_data):
"""Calculates fitness for all chromosomes
Parameters
----------
chromosomes : np.ndarrray
List of chromosomes
video_list : np.ndarray
List of all video titles (in this case number identifiers)
video_data : pd dataframe
... | 5,356,170 |
def grouping_is_valid(
proposed_grouping: List[Set[str]],
past_groups: List[Set[str]],
max_intersection_size: int,
) -> bool:
"""Returns true if no group in the proposed grouping intersects with any
past group with intersection size strictly greater than
`max_intersection_size`.
"""
for ... | 5,356,171 |
def spyder_light(event):
""" Launch spyder in "light" mode """
oldarg = sys.argv
sys.argv = ['spyder', '--light']
spyder_launch(event)
sys.argv = oldarg | 5,356,172 |
def next_wire_in_dimension(wire1, tile1, wire2, tile2, tiles, x_wires, y_wires,
wire_map, wires_in_node):
""" next_wire_in_dimension returns true if tile1 and tile2 are in the same
row and column, and must be adjcent.
"""
tile1_info = tiles[tile1]
tile2_info = tiles[tile2]
... | 5,356,173 |
def get(*, db_session, report_id: int) -> Optional[Report]:
"""
Get a report by id.
"""
return db_session.query(Report).filter(Report.id == report_id).one_or_none() | 5,356,174 |
def host_from_path(path):
"""returns the host of the path"""
url = urllib.parse.urlparse(path)
return url.netloc | 5,356,175 |
def sampleM(a0, bk, njk, m_cap=20):
"""produces sample from distribution over M using normalized log probabilities parameterizing a
categorical dist."""
raise DeprecationWarning()
wts = np.empty((m_cap,))
sum = 0
for m in range(m_cap):
wts[m] = gammaln(a0*bk) - gammaln(a0*bk+njk) + log(... | 5,356,176 |
def or_default(none_or_value, default):
"""
inputs:
none_or_value: variable to test
default: value to return if none_or_value is None
"""
return none_or_value if none_or_value is not None else default | 5,356,177 |
def find_optimal_components_subset(contours, edges):
"""Find a crop which strikes a good balance of coverage/compactness.
Returns an (x1, y1, x2, y2) tuple.
"""
c_info = props_for_contours(contours, edges)
c_info.sort(key=lambda x: -x['sum'])
total = np.sum(edges) / 255
area = edges.shape[0... | 5,356,178 |
def fetch_align_sex(rerun, run, camcol, field,
bands=None, reference_band='r', remove=True):
"""
Run fetch, align, and sex in a single field.
"""
if bands is None:
bands = [b for b in "ugriz"]
registered_images = fetch_align(rerun, run, camcol, field, remove=remove)
reference_image... | 5,356,179 |
def scrape(html):
"""정규표현식으로 도서 정보 추출"""
books = []
for partial_html in re.findall(r'<td class="left">Ma.*?</td>', html, re.DOTALL):
#도서의 URL 추출
url = re.search(r'<a href="(.*?)">', partial_html).group(1)
url = 'http://www.hanbit.co.kr' + url
#태그를 제거해 도서의 제목 추출
title... | 5,356,180 |
def get_time_zone_offset(area_code):
""" Returns an integer offset value if it finds a matching area code,
otherwise returns None."""
if not isinstance(area_code, str):
area_code = str(area_code)
if area_code in area_code_mapping:
return area_code_mapping[area_code][1] | 5,356,181 |
def true_false_counts(series: pd.Series):
"""
input: a boolean series
returns: two-tuple (num_true, num_false)
"""
return series.value_counts().sort_index(ascending=False).tolist() | 5,356,182 |
def phyutility(DIR,alignment,min_col_occup,seqtype,min_chr=10):
"""
remove columns with occupancy lower than MIN_COLUMN_OCCUPANCY
remove seqs shorter than MIN_CHR after filter columns
"""
if DIR[-1] != "/": DIR += "/"
cleaned = alignment+"-cln"
if os.path.exists(DIR+cleaned): return cleaned
assert alignment.end... | 5,356,183 |
def configProject(projectName):
""" read in config file"""
if projectName==None:return
filename=os.path.join(projectsfolder,unicode(projectName),u"project.cfg" ).encode("utf-8")
if projectName not in projects:
print 'Content-type: text/plain\n\n',"error in projects:",type(projectName),"projectName:",[projectName... | 5,356,184 |
def bitwise_not(rasters, extent_type="FirstOf", cellsize_type="FirstOf", astype=None):
"""
The BitwiseNot operation
The arguments for this function are as follows:
:param rasters: array of rasters. If a scalar is needed for the operation, the scalar can be a double or string
:param extent_type: on... | 5,356,185 |
def keyclean(key):
"""
Default way to clean table headers so they make good
dictionary keys.
"""
clean = re.sub(r'\s+', '_', key.strip())
clean = re.sub(r'[^\w]', '', clean)
return clean | 5,356,186 |
def test_compute_ts_map_downsampled(input_dataset):
"""Minimal test of compute_ts_image"""
spatial_model = GaussianSpatialModel(sigma="0.11 deg")
spectral_model = PowerLawSpectralModel(index=2)
model = SkyModel(spatial_model=spatial_model, spectral_model=spectral_model)
ts_estimator = TSMapEstimato... | 5,356,187 |
def get_rfactors_for_each(lpin):
"""
R-FACTORS FOR INTENSITIES OF DATA SET /isilon/users/target/target/Iwata/_proc_ox2r/150415-hirata/1010/06/DS/multi011_1-5/XDS_ASCII_fullres.HKL
RESOLUTION R-FACTOR R-FACTOR COMPARED
LIMIT observed expected
5.84 60.4% 50.1% 174
4.13... | 5,356,188 |
def value_left(self, right):
"""
Returns the value of the right type instance to use in an
operator method, namely when the method's instance is on the
left side of the expression.
"""
return right.value if isinstance(right, self.__class__) else right | 5,356,189 |
def correct_throughput(inspec, spFile='BT-Settl_Asplund2009.fits', quiet=False):
"""
Main function
Inputs:
inspec - list of input spectra, each list item should
be a 3xN array of wavelenghts (in microns),
flux, and variance. One list item for each
... | 5,356,190 |
def convert_list(
items,
ids,
parent,
attr_type,
):
"""Converts a list into an XML string."""
LOG.info('Inside convert_list()')
output = []
addline = output.append
if ids:
this_id = get_unique_id(parent)
for (i, item) in enumerate(items):
LOG.in... | 5,356,191 |
def ReduceDureeEtat(id_individu):
"""Réduit d'un jour la durée restante de l'état d'un individu"""
pop_cur.execute("UPDATE etat SET duree_etat = duree_etat - 1 WHERE id_individu = ?", (int(id_individu), )) | 5,356,192 |
def get_mid_surface(in_surfaces):
"""get_mid_surface gives the mid surface when dealing with the 7 different surfaces
Args:
(list of strings) in_surfaces : List of path to the 7 different surfaces generated by mris_expand
Returns:
(string) Path to the mid surface
"""
return in_surf... | 5,356,193 |
def parse_type(msg_type):
"""
Parse ROS message field type
:param msg_type: ROS field type, ``str``
:returns: base_type, is_array, array_length, ``(str, bool, int)``
:raises: :exc:`ValueError` If *msg_type* cannot be parsed
"""
if not msg_type:
raise ValueError("Invalid empty type")
... | 5,356,194 |
def preprocess(feature_modules: List, queries: List[Query],
prefix: Optional[str] = None,
process_count: Optional[int] = None):
"""
Args:
feature_modules: the feature modules used to generate features, each must implement the add_features function
queries: all the ... | 5,356,195 |
def to_distance(maybe_distance_function):
"""
Parameters
----------
maybe_distance_function: either a Callable, which takes two arguments, or
a DistanceFunction instance.
Returns
-------
"""
if maybe_distance_function is None:
return NoDistance()
if isinstance(maybe_... | 5,356,196 |
def echo(text):
"""Return echo function."""
return text | 5,356,197 |
def test_issue3972(en_vocab):
"""Test that the PhraseMatcher returns duplicates for duplicate match IDs.
"""
matcher = PhraseMatcher(en_vocab)
matcher.add("A", None, Doc(en_vocab, words=["New", "York"]))
matcher.add("B", None, Doc(en_vocab, words=["New", "York"]))
doc = Doc(en_vocab, words=["I",... | 5,356,198 |
def include_package(config):
"""Pyramid package include"""
# add translations
config.add_translation_dirs('pyams_portal:locales')
# register permissions
config.register_permission({
'id': MANAGE_TEMPLATE_PERMISSION,
'title': _("Manage presentation templates")
})
# register... | 5,356,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.