hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ae59008d75b0339efe1290aefcf9a7e20a285167 | 146 | py | Python | 1-multiple.py | svamja/learning_assignments | fa1b2279250a6a7f9dbfee86fdacfe8609e6ad7d | [
"MIT"
] | null | null | null | 1-multiple.py | svamja/learning_assignments | fa1b2279250a6a7f9dbfee86fdacfe8609e6ad7d | [
"MIT"
] | null | null | null | 1-multiple.py | svamja/learning_assignments | fa1b2279250a6a7f9dbfee86fdacfe8609e6ad7d | [
"MIT"
] | null | null | null | nums = [3, 5]
maximum = 1000
result = 0
for i in range(0, maximum):
if i % 3 == 0 or i % 5 == 0:
result += i
print(result)
| 14.6 | 33 | 0.486301 |
3de581e4bbb7ee5c98dec1aae357378061eb2597 | 3,806 | py | Python | __init__.py | humairan/speech-rate-skill | 47ca7bff4c928776d906f6b1c5fd9d9df0f65798 | [
"Apache-2.0"
] | null | null | null | __init__.py | humairan/speech-rate-skill | 47ca7bff4c928776d906f6b1c5fd9d9df0f65798 | [
"Apache-2.0"
] | null | null | null | __init__.py | humairan/speech-rate-skill | 47ca7bff4c928776d906f6b1c5fd9d9df0f65798 | [
"Apache-2.0"
] | null | null | null | from mycroft import MycroftSkill, intent_file_handler
from mycroft.util.log import LOG
from mycroft.messagebus.message import Message
class SpeechRate(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
self.fastest = '0.1'
self.fast = "0.5"
self.default = self.normal = '1.0'
self.slow = '1.5'
self.slowest = '2.0'
self.curr_level = 1.0
@intent_file_handler('increase.rate.intent')
def handle_level_increase(self, message):
from mycroft.configuration.config import(
LocalConf, USER_CONFIG
)
new_rate = self.curr_level - 0.10
if new_rate < 2.0 and new_rate >= 0.1:
new_config = {
"tts": {
"mimic": {
"duration_stretch" : str(new_rate)
}
}
}
user_config = LocalConf(USER_CONFIG)
user_config.merge(new_config)
user_config.store()
self.curr_level = new_rate
self.bus.emit(Message('configuration.updated'))
self.speak_dialog('updatedLevel', {'directionChange': 'increased'})
else:
self.speak_dialog("invalidLevel")
@intent_file_handler('decrease.rate.intent')
def handle_level_decrease(self, message):
from mycroft.configuration.config import(
LocalConf, USER_CONFIG
)
new_rate = self.curr_level + 0.10
if new_rate <= 2.0 and new_rate > 0.1:
new_config = {
"tts": {
"mimic": {
"duration_stretch" : str(new_rate)
}
}
}
user_config = LocalConf(USER_CONFIG)
user_config.merge(new_config)
user_config.store()
self.curr_level = new_rate
self.bus.emit(Message('configuration.updated'))
self.speak_dialog('updatedLevel', {'directionChange': 'decreased'})
else:
self.speak_dialog("invalidLevel")
@intent_file_handler('rate.speech.intent')
def handle_rate_speech(self, message):
from mycroft.configuration.config import(
LocalConf, Configuration, USER_CONFIG
)
user_config = LocalConf(USER_CONFIG)
LOG.info(str(user_config))
user_level = message.data.get('ratelevel')
LOG.info(user_level)
rate_var = self.get_rate_var(user_level)
if(rate_var == "error"):
self.speak_dialog("invalidInput", {'userLevel': user_level})
else:
new_config = {
"tts": {
"mimic": {
"duration_stretch" : str(rate_var)
}
}
}
user_config.merge(new_config)
user_config.store()
LOG.info(str(LocalConf(USER_CONFIG)))
self.bus.emit(Message('configuration.updated'))
self.speak_dialog("rate.speech", {'rateLevel': user_level})
def get_rate_var(self, level):
if(level == "fastest"):
self.curr_level = float(self.fastest)
return self.fastest
elif(level == "fast"):
self.curr_level = float(self.fast)
return self.fast
elif(level == "normal" or level == "default"):
self.curr_level = float(self.normal)
return self.normal
elif(level == "slow"):
self.curr_level = float(self.slow)
return self.slow
elif(level == "slowest"):
self.curr_level = float(self.slowest)
return self.slowest
else:
return "error"
def create_skill():
return SpeechRate()
| 31.716667 | 80 | 0.541513 |
fc95a3a3322bf0cce537cb77bc70271da5081299 | 50,356 | py | Python | src/vnsw/provisioning/contrail_vrouter_provisioning/common.py | jnpr-pranav/contrail-controller | 428eee37c28c31830fd764315794e1a6e52720c1 | [
"Apache-2.0"
] | 37 | 2020-09-21T10:42:26.000Z | 2022-01-09T10:16:40.000Z | src/vnsw/provisioning/contrail_vrouter_provisioning/common.py | jnpr-pranav/contrail-controller | 428eee37c28c31830fd764315794e1a6e52720c1 | [
"Apache-2.0"
] | null | null | null | src/vnsw/provisioning/contrail_vrouter_provisioning/common.py | jnpr-pranav/contrail-controller | 428eee37c28c31830fd764315794e1a6e52720c1 | [
"Apache-2.0"
] | 21 | 2020-08-25T12:48:42.000Z | 2022-03-22T04:32:18.000Z | #!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
import os
import socket
import netaddr
import logging
import netifaces
import tempfile
from contrail_vrouter_provisioning import local
from contrail_vrouter_provisioning.base import ContrailSetup
from contrail_vrouter_provisioning.network import ComputeNetworkSetup
log = logging.getLogger('contrail_vrouter_provisioning.common')
def insert_line_to_file(line, file_name, pattern=None):
if pattern:
local('sed -i \'/%s/d\' %s' % (pattern, file_name), warn_only=True)
local('printf "%s\n" >> %s' % (line, file_name))
class CommonComputeSetup(ContrailSetup, ComputeNetworkSetup):
def __init__(self, args):
super(CommonComputeSetup, self).__init__()
self._args = args
# Using keystone admin password for nova/neutron if not supplied
if not self._args.neutron_password:
self._args.neutron_password = self._args.keystone_admin_password
self.multi_net = False
if self._args.non_mgmt_ip:
self.multi_net = True
self.vhost_ip = self._args.non_mgmt_ip
else:
self.vhost_ip = self._args.self_ip
self.dev = None # Will be physical device
if self._args.physical_interface:
# During re-provision/upgrade vhost0 will be present
# so vhost0 should be treated as dev,
# which is used to get netmask/gateway
if 'vhost0' in netifaces.interfaces():
self.dev = 'vhost0'
# During intial provision actual interface should be treated as dev
# which is used to get netmask/gateway
elif self._args.physical_interface in netifaces.interfaces():
self.dev = self._args.physical_interface
else:
raise KeyError('Interface %s in present' %
self._args.physical_interface)
else:
# Get the physical device and provision status
# if reprov is False, it means fresh install
# True, it means reprovision
(self.dev, self.reprov) = self.get_device_info(self.vhost_ip)
def fixup_config_files(self):
self.add_dev_tun_in_cgroup_device_acl()
self.fixup_contrail_vrouter_agent()
self.add_qos_config()
self.fixup_contrail_vrouter_nodemgr()
self.fixup_contrail_lbaas()
def setup_lbaas_prereq(self):
if self.pdist in ['centos', 'redhat']:
local('sudo groupadd -f nogroup')
cmd = "sudo sed -i s/'Defaults requiretty'/'#Defaults "
cmd += "requiretty'/g /etc/sudoers"
local(cmd)
def add_dev_tun_in_cgroup_device_acl(self):
# add /dev/net/tun in cgroup_device_acl needed
# for type=ethernet interfaces
fl = "/etc/libvirt/qemu.conf"
ret = local("sudo grep -q '^cgroup_device_acl' %s" % fl,
warn_only=True)
if ret.failed:
if self.pdist in ['centos', 'redhat']:
local('sudo echo "clear_emulator_capabilities = 1" >> %s' % fl,
warn_only=True)
local('sudo echo \'user = "root"\' >> %s' % fl, warn_only=True)
local('sudo echo \'group = "root"\' >> %s' % fl,
warn_only=True)
cmds = ['echo \'cgroup_device_acl = [\' >> %s' % fl,
'echo \' "/dev/null", "/dev/full", "/dev/zero",\''
+ ' >> %s' % fl,
'echo \' "/dev/random", "/dev/urandom",\''
+ ' >> %s' % fl,
'echo \' "/dev/ptmx", "/dev/kvm", "/dev/kqemu",\''
+ ' >> %s' % fl,
'echo \' "/dev/rtc", "/dev/hpet", "/dev/net/tun",\''
+ ' >> %s' % fl,
'echo \']\' >> %s' % fl]
for cmd in cmds:
local('sudo ' + cmd, warn_only=True)
self._fixed_qemu_conf = True
# add "alias bridge off" in /etc/modprobe.conf for Centos
if self.pdist in ['centos', 'redhat']:
local('sudo echo "alias bridge off" > /etc/modprobe.conf',
warn_only=True)
def fixup_contrail_vrouter_nodemgr(self):
# Workaround https://bugs.launchpad.net/juniperopenstack/+bug/1681172
cfgfile = '/etc/contrail/contrail-vrouter-nodemgr.conf'
if not os.path.isfile(cfgfile):
local('sudo touch %s' % cfgfile)
collector_list = ' '.join('%s:%s' % (server, '8086')
for server in self._args.collectors)
self.set_config(cfgfile, 'COLLECTOR', 'server_list', collector_list)
self.set_config(cfgfile, 'SANDESH', 'sandesh_ssl_enable',
self._args.sandesh_ssl_enable)
self.set_config(cfgfile, 'SANDESH', 'introspect_ssl_enable',
self._args.introspect_ssl_enable)
def setup_hugepages_node(self, dpdk_args):
"""Setup hugepages on one or list of nodes
"""
# How many times DPDK inits hugepages (rte_eal_init())
# See function map_all_hugepages() in DPDK
DPDK_HUGEPAGES_INIT_TIMES = 2
# get required size of hugetlbfs
factor = int(dpdk_args['huge_pages'])
print(dpdk_args)
if factor == 0:
factor = 1
# set number of huge pages
memsize = local("sudo grep MemTotal /proc/meminfo |"
" tr -s ' ' | cut -d' ' -f 2 | tr -d '\n'",
capture=True, warn_only=True)
pagesize = local("sudo grep Hugepagesize /proc/meminfo"
" | tr -s ' 'i | cut -d' ' -f 2 | tr -d '\n'",
capture=True, warn_only=True)
reserved = local("sudo grep HugePages_Total /proc/meminfo"
" | tr -s ' 'i | cut -d' ' -f 2 | tr -d '\n'",
capture=True, warn_only=True)
if (reserved == ""):
reserved = "0"
requested = old_div((old_div((int(memsize) * factor), 100)), int(pagesize))
if (requested > int(reserved)):
pattern = "^vm.nr_hugepages ="
line = "vm.nr_hugepages = %d" % requested
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/sysctl.conf')
current_max_map_count = local("sudo sysctl -n "
"vm.max_map_count")
if current_max_map_count == "":
current_max_map_count = 0
current_huge_pages = max(int(requested), int(reserved))
requested_max_map_count = (DPDK_HUGEPAGES_INIT_TIMES
* int(current_huge_pages))
if int(requested_max_map_count) > int(current_max_map_count):
pattern = "^vm.max_map_count ="
line = "vm.max_map_count = %d" % requested_max_map_count
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/sysctl.conf')
local('sudo sysctl -p', warn_only=True)
mounted = local("sudo mount | grep hugetlbfs | cut -d' ' -f 3",
capture=True, warn_only=False)
if (mounted != ""):
print("hugepages already mounted on %s" % mounted)
else:
local("sudo mkdir -p /hugepages", warn_only=False)
pattern = "^hugetlbfs"
line = "hugetlbfs "\
"/hugepages hugetlbfs defaults 0 0"
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/fstab')
local("sudo mount -t hugetlbfs hugetlbfs /hugepages",
warn_only=False)
def search_and_replace(self, lvalue, rvalue, position, vrouter_file):
"""Search and replace strings in the format <key>=<filepath> <args>
- 'position' determines where the <rvalue> needs to be inserted
- If it is "Begin", the string becomes:
<key>=<rvalue> <filepath> <args>
- If it is "End", the string becomes:
<key>=<filepath> <args> <rvalue>
- If <rvalue> already exists in <args>, it deletes it first
- If <rvalue> already preceeds <filepath> it deletes it first
Input:
- lvalue = <key>
- rvalue = <arg> to be searched and replaced
- position = Begin/End
- vrouter_file = path of vrouter file
"""
if position == "Begin":
regexp_del = r"'s/\(^ *%s *=\)\(.*\)\( \/.*\)/\1\3/'" % (lvalue)
regexp_add = r"'s/\(^%s=\)\(.*\)/\1%s \2/'" % (lvalue, rvalue)
regexp = "sed -i.bak -e %s -e %s %s" \
% (regexp_del, regexp_add, vrouter_file)
local(regexp, warn_only=False)
elif position == "End":
regexp_del = r"'s/\(^ *%s *=.*\) \(%s [^ ]*\)\(.*\) *$/\1\3/'" \
% (lvalue, rvalue.split(' ')[0])
regexp_add = r"'s/\(^ *%s *=.*\)/\1 %s/'" % (lvalue, rvalue)
regexp = "sed -i.bak -e %s -e %s %s" \
% (regexp_del, regexp_add, vrouter_file)
local(regexp, warn_only=False)
def setup_coremask_node(self, dpdk_args):
"""Setup core mask on one or list of nodes
"""
try:
coremask = dpdk_args['coremask']
except KeyError:
raise RuntimeError("Core mask for host %s is not defined."
% (dpdk_args))
if not coremask:
raise RuntimeError("Core mask for host %s is not defined."
% dpdk_args)
# if a list of cpus is provided, -c flag must be passed to taskset
if (',' in coremask) or ('-' in coremask):
taskset_param = ' -c'
else:
taskset_param = ''
# supported coremask format: hex: (0x3f); list: (0,3-5), (0,1,2,3,4,5)
# try taskset on a dummy command
if local('sudo taskset%s %s true' % (taskset_param, coremask),
capture=True, warn_only=False).succeeded:
self.search_and_replace(self.command_key, '\/usr\/bin\/taskset ' + coremask,
"Begin", self.vrouter_file)
else:
raise RuntimeError("Error: Core mask %s for host %s is invalid."
% (coremask, dpdk_args))
def setup_vm_coremask_node(self, q_coremask, dpdk_args):
"""
Setup CPU affinity for QEMU processes based on
vRouter/DPDK core affinity on a given node.
Supported core mask format:
vRouter/DPDK: hex (0x3f), list (0,1,2,3,4,5), range (0,3-5)
QEMU/nova.conf: list (0,1,2,3,4,5), range (0,3-5),
exclusion (0-5,^4)
QEMU needs to be pinned to different cores than vRouter. Because of
different core mask formats, it is not possible to just set QEMU to
<not vRouter cores>. This function takes vRouter core mask from
testbed, changes it to list of cores and removes them from list
of all possible cores (generated as a list from 0 to N-1, where
N = number of cores). This is changed back to string and passed to
openstack-config.
"""
try:
vr_coremask = dpdk_args['coremask']
except KeyError:
raise RuntimeError("vRouter core mask for "
"host %s is not defined." % (dpdk_args))
if not vr_coremask:
raise RuntimeError("vRouter core mask for host "
"%s is not defined." % dpdk_args)
if not q_coremask:
try:
cpu_count = int(local(
'sudo grep -c processor /proc/cpuinfo',
capture=True))
except ValueError:
log.info("Cannot count CPUs on host %s. VM core "
"mask cannot be computed." % (dpdk_args))
raise
if not cpu_count or cpu_count == -1:
raise ValueError("Cannot count CPUs on host %s. "
"VM core mask cannot be computed."
% (dpdk_args))
all_cores = [x for x in range(cpu_count)]
if 'x' in vr_coremask: # String containing hexadecimal mask.
vr_coremask = int(vr_coremask, 16)
"""
Convert hexmask to a string with numbers of cores to be
used, eg.
0x19 -> 11001 -> 10011 -> [(0,1), (1,0), (2,0),
(3,1), (4,1)] -> '0,3,4'
"""
vr_coremask = [
x[0] for x in enumerate(reversed(bin(vr_coremask)[2:]))
if x[1] == '1']
# Range or list of cores.
elif (',' in vr_coremask) or ('-' in vr_coremask):
# Get list of core numbers and/or core ranges.
vr_coremask = vr_coremask.split(',')
# Expand ranges like 0-4 to 0, 1, 2, 3, 4.
vr_coremask_expanded = []
for rng in vr_coremask:
if '-' in rng: # If it's a range - expand it.
a, b = rng.split('-')
vr_coremask_expanded += list(range(int(a), int(b) + 1))
else: # If not, just add to the list.
vr_coremask_expanded.append(int(rng))
vr_coremask = vr_coremask_expanded
else: # A single core.
try:
single_core = int(vr_coremask)
except ValueError:
log.error("vRouter core mask %s for host %s is invalid."
% (vr_coremask, dpdk_args))
raise
vr_coremask = []
vr_coremask.append(single_core)
# From list of all cores remove list of vRouter cores
# and stringify.
diff = set(all_cores) - set(vr_coremask)
q_coremask = ','.join(str(x) for x in diff)
# If we have no spare cores for VMs
if not q_coremask:
raise RuntimeError("Setting QEMU core mask for host %s "
"failed - empty string."
% (dpdk_args))
# This can fail eg. because openstack-config is not present.
# There's no sanity check in openstack-config.
if local("sudo crudini --set /etc/nova/nova.conf "
"DEFAULT vcpu_pin_set %s"
% q_coremask, capture=True, warn_only=False).succeeded:
log.info("QEMU coremask on host %s set to %s."
% (dpdk_args, q_coremask))
else:
raise RuntimeError("Error: setting QEMU core mask %s for "
"host %s failed." % (vr_coremask, dpdk_args))
def setup_uio_driver(self, dpdk_args):
"""Setup UIO driver to use for DPDK
(igb_uio, uio_pci_generic or vfio-pci)
"""
vrouter_agent_file = '/etc/contrail/contrail-vrouter-agent.conf'
if 'uio_driver' in dpdk_args:
uio_driver = dpdk_args['uio_driver']
if uio_driver == "vfio-pci":
self.setup_sriov_grub(uio_driver)
else:
print("No UIO driver defined for host, skipping...")
return
if local('sudo modprobe %s'
% (uio_driver), capture=True, warn_only=False).succeeded:
log.info("Setting UIO driver to %s for host..." % uio_driver)
local('sudo contrail-config --set %s DEFAULT '\
'physical_uio_driver %s' % (vrouter_agent_file, uio_driver))
else:
raise RuntimeError("Error: invalid UIO driver %s for host"
% (uio_driver))
def dpdk_increase_vrouter_limit(self,
vrouter_module_params_args):
"""Increase the maximum number of mpls label
and nexthop on tsn node"""
vr_params = {
'flow_entries': '524288',
'oflow_entries': '3000',
'mpls_labels': '5120',
'nexthops': '65536',
'vrfs': '5120',
'macs': {'bridge_entries': '262144'},
}
for param in vr_params:
if isinstance(vr_params[param], dict):
for p in vr_params[param]:
param_name = p
param_val = vrouter_module_params_args.setdefault(
param, vr_params[param][p])
else:
param_name = param
param_val = vrouter_module_params_args.setdefault(
param, vr_params[param])
param = "--vr_" + param_name + " " + param_val
self.search_and_replace(self.command_key, param,
"End", self.vrouter_file)
def fixup_contrail_vrouter_agent(self):
compute_ip = self._args.self_ip
non_mgmt_gw = self._args.non_mgmt_gw
vgw_public_subnet = self._args.vgw_public_subnet
vgw_public_vn_name = self._args.vgw_public_vn_name
vgw_intf_list = self._args.vgw_intf_list
vgw_gateway_routes = self._args.vgw_gateway_routes
compute_as_gateway = self._args.compute_as_gateway
flow_thread_count = self._args.flow_thread_count
self.mac = None
# Fresh install
if self.dev and not self.reprov:
self.mac = netifaces.ifaddresses(self.dev)[netifaces.AF_LINK][0][
'addr']
if not self.mac:
raise KeyError('Interface %s Mac %s' % (str(self.dev),
str(self.mac)))
self.netmask = netifaces.ifaddresses(self.dev)[
netifaces.AF_INET][0]['netmask']
if self.multi_net:
self.gateway = non_mgmt_gw
else:
self.gateway = self.find_gateway(self.dev)
self.cidr = netaddr.IPNetwork('%s/%s' % (self.vhost_ip,
self.netmask))
elif self.dev:
# Reprovision
cfg_file = "/etc/contrail/contrail-vrouter-agent.conf"
section = "DEFAULT"
key = "physical_interface_mac"
self.mac = self.get_config(cfg_file, section, key).strip()
section = "VIRTUAL-HOST-INTERFACE"
key = "ip"
self.cidr = netaddr.IPNetwork(self.get_config(cfg_file, section, key).strip())
section = "VIRTUAL-HOST-INTERFACE"
key = "gateway"
self.gateway = self.get_config(cfg_file, section, key).strip()
self.netmask = "255.255.255.0"
if self.dev:
if vgw_public_subnet:
os.chdir(self._temp_dir_name)
# Manipulating the string to use in agent_param
vgw_public_subnet_str = []
for i in vgw_public_subnet[1:-1].split(";"):
j = i[1:-1].split(",")
j = ";".join(j)
vgw_public_subnet_str.append(j)
vgw_public_subnet_str = str(tuple(
vgw_public_subnet_str)).replace("'", "")
vgw_public_subnet_str = vgw_public_subnet_str.replace(" ", "")
vgw_intf_list_str = str(tuple(
vgw_intf_list[1:-1].split(";"))).replace(" ", "")
cmds = ["sudo sed 's@dev=.*@dev=%s@g;" % self.dev,
"s@vgw_subnet_ip=.*@vgw_subnet_ip=%s@g;" %
vgw_public_subnet_str,
"s@vgw_intf=.*@vgw_intf=%s@g'" % vgw_intf_list_str,
" /etc/contrail/agent_param.tmpl > agent_param.new"]
local(' '.join(cmds))
local("sudo mv agent_param.new /etc/contrail/agent_param")
else:
os.chdir(self._temp_dir_name)
cmds = ["sudo sed 's/dev=.*/dev=%s/g' " % self.dev,
"/etc/contrail/agent_param.tmpl > agent_param.new"]
local(''.join(cmds))
local("sudo mv agent_param.new /etc/contrail/agent_param")
vmware_dev = None
gateway_mode = None
if (self._args.mode == 'vcenter' or
self._args.hypervisor == 'vmware'):
vmware_dev = self.get_secondary_device(self.dev)
if compute_as_gateway == 'server':
gateway_mode = "server"
# Set template options for DPDK mode
pci_dev = ""
platform_mode = "default"
if self._args.dpdk:
dpdk_args = dict(
u.split("=") for u in self._args.dpdk.split(","))
log.info(dpdk_args)
platform_mode = "dpdk"
supervisor_vrouter_file = ('/etc/contrail/' +
'supervisord_vrouter_files/' +
'contrail-vrouter-dpdk.ini')
systemd_vrouter_file = ('/lib/systemd/system/' +
'contrail-vrouter-dpdk.service')
if os.path.isfile(supervisor_vrouter_file):
self.vrouter_file = supervisor_vrouter_file
self.command_key = "command"
elif os.path.isfile(systemd_vrouter_file):
self.vrouter_file = systemd_vrouter_file
self.command_key = "ExecStart"
else:
raise RuntimeError("Vrouter Supervisor/Systemd not found.")
self.setup_hugepages_node(dpdk_args)
self.setup_coremask_node(dpdk_args)
self.setup_vm_coremask_node(False, dpdk_args)
self.setup_uio_driver(dpdk_args)
if self._args.dpdk and not self.reprov:
iface = self.dev
if self.is_interface_vlan(self.dev):
iface = self.get_physical_interface_of_vlan(self.dev)
local("ls /opt/contrail/bin/dpdk_nic_bind.py", warn_only=False)
cmd = "sudo /opt/contrail/bin/dpdk_nic_bind.py --status | "
cmd += "sudo grep -w %s | cut -d' ' -f 1" % iface.strip()
pci_dev = local(cmd, capture=True, warn_only=False)
# If there is no PCI address, the device is a bond.
# Bond interface in DPDK has zero PCI address.
if not pci_dev:
pci_dev = "0000:00:00.0"
elif self._args.dpdk and self.reprov:
cfg_file = "/etc/contrail/contrail-vrouter-agent.conf"
section = "DEFAULT"
key = "physical_interface_address"
pci_dev = self.get_config(cfg_file, section, key).strip()
if self.pdist == 'Ubuntu':
# Fix /dev/vhost-net permissions. It is required for
# multiqueue operation
local('sudo echo \'KERNEL=="vhost-net", '
'GROUP="kvm", MODE="0660"\' > '
'/etc/udev/rules.d/vhost-net.rules', warn_only=True)
# The vhost-net module has to be loaded at startup to
# ensure the correct permissions while the qemu is being
# launched
pattern = "vhost-net"
line = "vhost-net"
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/modules')
if not self._args.dpdk:
self.setup_vrouter_kmod_hugepages()
vrouter_kmod_1G_page = ''
vrouter_kmod_2M_page = ''
if self._args.vrouter_1G_hugepages != '0':
if (os.path.isfile('/mnt/hugepage_1G/vrouter_1G_mem_0')):
vrouter_kmod_1G_page = '/mnt/hugepage_1G/vrouter_1G_mem_0'
if (os.path.isfile('/mnt/hugepage_1G/vrouter_1G_mem_1')):
vrouter_kmod_1G_page = vrouter_kmod_1G_page + ' /mnt/hugepage_1G/vrouter_1G_mem_1'
if self._args.vrouter_2M_hugepages != '0':
if (os.path.isfile('/mnt/hugepage_2M/vrouter_2M_mem_0')):
vrouter_kmod_2M_page = '/mnt/hugepage_2M/vrouter_2M_mem_0'
if (os.path.isfile('/mnt/hugepage_2M/vrouter_2M_mem_1')):
vrouter_kmod_2M_page = vrouter_kmod_2M_page + ' /mnt/hugepage_2M/vrouter_2M_mem_1'
control_servers = ' '.join('%s:%s' % (server, '5269')
for server in self._args.control_nodes)
dns_servers = ' '.join('%s:%s' % (server, '53')
for server in self._args.control_nodes)
collector_servers = ' '.join('%s:%s' % (server, '8086')
for server in self._args.collectors)
if self._args.tsn_evpn_mode and self._args.tsn_servers:
tsn_servers = ' '.join(self._args.tsn_servers)
else:
tsn_servers = ''
configs = {
'DEFAULT': {
'platform': platform_mode,
'gateway_mode': gateway_mode or '',
'physical_interface_address': pci_dev,
'physical_interface_mac': self.mac,
'collectors': collector_servers,
'xmpp_auth_enable': self._args.xmpp_auth_enable,
'xmpp_dns_auth_enable': self._args.xmpp_dns_auth_enable,
'tsn_servers': tsn_servers,
'agent_mode': ''},
'NETWORKS': {
'control_network_ip': compute_ip},
'VIRTUAL-HOST-INTERFACE': {
'name': 'vhost0',
'ip': str(self.cidr),
'gateway': self.gateway,
'physical_interface': self.dev},
'HYPERVISOR': {
'type': ('kvm' if self._args.hypervisor == 'libvirt'
else self._args.hypervisor),
'vmware_mode': self._args.mode or '',
'vmware_physical_interface': vmware_dev or ''},
'CONTROL-NODE': {
'servers': control_servers},
'DNS': {
'servers': dns_servers},
'SANDESH': {
'sandesh_ssl_enable': self._args.sandesh_ssl_enable,
'introspect_ssl_enable':
self._args.introspect_ssl_enable},
'FLOWS': {
'thread_count': flow_thread_count},
'METADATA': {
'metadata_proxy_secret': self._args.metadata_secret,
'metadata_use_ssl': self._args.metadata_use_ssl,
'metadata_client_cert': ('/etc/contrail/ssl/certs/server.pem'
if self._args.metadata_use_ssl else ''),
'metdata_client_cert_type': ('PEM' if self._args.metadata_use_ssl
else ''),
'metadata_client_key': ('/etc/contrail/ssl/private/server-privkey.pem'
if self._args.metadata_use_ssl else '')},
'RESTART': {
'huge_page_2M': vrouter_kmod_2M_page,
'huge_page_1G': vrouter_kmod_1G_page,
'backup_enable': (True
if self._args.resource_backup_restore else False),
'backup_dir': ('/var/lib/contrail/backup'),
'backup_file_count': (self._args.backup_file_count),
'backup_idle_timeout': (self._args.backup_idle_timeout),
'restore_enable': (True
if self._args.resource_backup_restore else False),
'restore_audit_timeout': (self._args.restore_audit_timeout)},
}
# VGW configs
if vgw_public_vn_name and vgw_public_subnet:
vgw_public_vn_name = vgw_public_vn_name[1:-1].split(';')
vgw_public_subnet = vgw_public_subnet[1:-1].split(';')
vgw_intf_list = vgw_intf_list[1:-1].split(';')
if vgw_gateway_routes is not None:
vgw_gateway_routes = vgw_gateway_routes[1:-1].split(';')
for i in range(len(vgw_public_vn_name)):
ip_blocks = ''
if vgw_public_subnet[i].find("[") != -1:
for ele in vgw_public_subnet[i][1:-1].split(","):
ip_blocks += ele[1:-1] + " "
else:
ip_blocks += vgw_public_subnet[i]
routes = ''
if (vgw_gateway_routes is not None and
i < len(vgw_gateway_routes)):
if vgw_gateway_routes[i] != '[]':
if vgw_gateway_routes[i].find("[") != -1:
for ele in vgw_gateway_routes[i][1:-1].split(
","):
routes += ele[1:-1] + " "
else:
routes += vgw_gateway_routes[i]
configs['GATEWAY-%s' % i] = {'interface': vgw_intf_list[i],
'ip_blocks': ip_blocks,
'routes': routes,
'routing_instance': vgw_public_vn_name[i]}
for section, key_vals in list(configs.items()):
for key, val in list(key_vals.items()):
self.set_config(
'/etc/contrail/contrail-vrouter-agent.conf',
section, key, val)
if self.running_in_container:
self.config_vhost0_interface_in_container()
else:
self.fixup_vhost0_interface_configs()
def config_vhost0_interface_in_container(self):
if self.reprov:
log.info("vhost0 configuration already present")
return
# Insert vrouter and setup vrouter vifs
insert_cmd = "source /opt/contrail/bin/vrouter-functions.sh && "
insert_cmd += "insert_vrouter"
local(insert_cmd, executable='/bin/bash')
# Move ip address from vrouter physical device to vhost
config_vhost0_cmd = "ip address delete %s/%s dev %s && " % (
self.vhost_ip, self.cidr.prefixlen, self.dev)
config_vhost0_cmd += "ip address add %s/%s dev vhost0 && " % (
self.vhost_ip, self.cidr.prefixlen)
config_vhost0_cmd += "ip link set dev vhost0 up"
local(config_vhost0_cmd)
# Add default gateway to new device as link local if /32 IP Address
if self.cidr.prefixlen == 32:
local("ip route add unicast %s dev vhost0 scope link" %
self.gateway)
if not self.multi_net:
# Add default gateway to vhost
local("ip route add default via %s dev vhost0" % self.gateway)
def fixup_contrail_lbaas(self):
auth_url = self._args.keystone_auth_protocol + '://'
auth_url += self._args.keystone_ip
auth_url += ':' + self._args.keystone_auth_port
auth_url += '/' + 'v2.0'
configs = {
'BARBICAN': {
'admin_tenant_name': 'service',
'admin_user': 'neutron',
'admin_password': self._args.neutron_password,
'auth_url': auth_url,
'region': 'RegionOne'}
}
# Workaround https://bugs.launchpad.net/juniperopenstack/+bug/1681172
cfgfile = '/etc/contrail/contrail-lbaas-auth.conf'
if not os.path.isfile(cfgfile):
local('sudo touch %s' % cfgfile)
for section, key_vals in list(configs.items()):
for key, val in list(key_vals.items()):
self.set_config(cfgfile, section, key, val)
def fixup_vhost0_interface_configs(self):
if self.reprov:
log.info("fixup_vhost0_interface_configs() not applicable")
return
if self.pdist in ['centos', 'fedora', 'redhat']:
# make ifcfg-vhost0
with open('%s/ifcfg-vhost0' % self._temp_dir_name, 'w') as f:
f.write('''#Contrail vhost0
DEVICE=vhost0
ONBOOT=yes
BOOTPROTO=none
IPV6INIT=no
USERCTL=yes
IPADDR=%s
NETMASK=%s
NM_CONTROLLED=no
#NETWORK MANAGER BUG WORKAROUND
SUBCHANNELS=1,2,3
''' % (self.vhost_ip, self.netmask))
# Don't set gateway and DNS on vhost0 if on non-mgmt network
if not self.multi_net:
if self.gateway:
f.write('GATEWAY=%s\n' % self.gateway)
dns_list = self.get_dns_servers(self.dev)
for i, dns in enumerate(dns_list):
f.write('DNS%d=%s\n' % (i + 1, dns))
domain_list = self.get_domain_search_list()
if domain_list:
f.write('DOMAIN="%s"\n' % domain_list)
prsv_cfg = []
mtu = self.get_if_mtu(self.dev)
if mtu:
dcfg = 'MTU=%s' % str(mtu)
f.write(dcfg + '\n')
prsv_cfg.append(dcfg)
f.flush()
if self.dev != 'vhost0':
src = "%s/ifcfg-vhost0" % self._temp_dir_name
dst = "/etc/sysconfig/network-scripts/ifcfg-vhost0"
local("sudo mv %s %s" % (src, dst), warn_only=True)
local("sudo sync", warn_only=True)
# make ifcfg-$dev
ifcfg = "/etc/sysconfig/network-scripts/ifcfg-%s" % self.dev
ifcfg_bkp = "/etc/sysconfig/network-scripts/orig.ifcfg-%s.rpmsave"\
% self.dev
if not os.path.isfile(ifcfg_bkp):
local("sudo cp %s %s" % (ifcfg, ifcfg_bkp), warn_only=True)
ifcfg_tmp = '%s/ifcfg-%s' % (self._temp_dir_name, self.dev)
self._rewrite_ifcfg_file(ifcfg_tmp, self.dev, prsv_cfg)
if self.multi_net:
self.migrate_routes(self.dev)
local("sudo mv %s /etc/contrail/" % ifcfg_tmp, warn_only=True)
if self.pdist not in ['Ubuntu']:
local("sudo chkconfig network on", warn_only=True)
local("sudo chkconfig supervisor-vrouter on",
warn_only=True)
# end self.pdist == centos | fedora | redhat
# setup lbaas prereqs
self.setup_lbaas_prereq()
if self.pdist in ['Ubuntu']:
self._rewrite_net_interfaces_file(
self.dev, self.mac, self.vhost_ip, self.netmask,
self.gateway, self._args.vmware,
self._args.vmware_vmpg_vswitch_mtu,
self._args.vmware_datanic_mtu)
# end self.pdist == ubuntu
def run_services(self):
if self.pdist not in ['Ubuntu']:
for svc in ['supervisor-vrouter']:
local('sudo chkconfig %s on' % svc)
if self.running_in_container:
for svc in ['contrail-vrouter-agent', 'contrail-vrouter-nodemgr']:
local('sudo service %s restart' % svc)
def add_vnc_config(self):
compute_ip = self._args.self_ip
compute_hostname = socket.gethostname()
use_ssl = False
if self._args.apiserver_auth_protocol == 'https':
use_ssl = True
prov_args = "--host_name %s --host_ip %s --api_server_ip %s "\
"--oper add --admin_user %s --admin_password %s "\
"--admin_tenant_name %s --openstack_ip %s "\
"--api_server_use_ssl %s" \
% (compute_hostname, compute_ip, self._args.cfgm_ip,
self._args.keystone_admin_user,
self._args.keystone_admin_password,
self._args.keystone_admin_tenant_name,
self._args.keystone_ip,
use_ssl)
if self._args.dpdk:
prov_args += " --dpdk_enabled"
cmd = "sudo python /opt/contrail/utils/provision_vrouter.py "
local(cmd + prov_args)
def add_qos_config(self):
qos_logical_queue = self._args.qos_logical_queue
qos_queue_id_list = self._args.qos_queue_id
default_hw_queue_qos = self._args.default_hw_queue_qos
qos_priority_tagging = self._args.qos_priority_tagging
priority_id_list = self._args.priority_id
priority_scheduling = self._args.priority_scheduling
priority_bandwidth = self._args.priority_bandwidth
agent_conf = "/etc/contrail/contrail-vrouter-agent.conf"
conf_file = "contrail-vrouter-agent.conf"
configs = {}
# Clean existing qos config
ltemp_dir = tempfile.mkdtemp()
local("sudo cp %s %s/" % (agent_conf, ltemp_dir))
local(
"sudo sed -i -e '/^\[QOS\]/d' -e '/^\[QUEUE-/d' -e '/^logical_queue/d' -e '/^default_hw_queue/d' -e '/^priority_tagging/d' %s/%s" %
(ltemp_dir, conf_file))
local(
"sudo sed -i -e '/^\[QOS-NIANTIC\]/d' -e '/^\[PG-/d' -e '/^scheduling/d' -e '/^bandwidth/d' %s/%s" %
(ltemp_dir, conf_file))
local("sudo cp %s/%s %s" % (ltemp_dir, conf_file, agent_conf))
local('sudo rm -rf %s' % (ltemp_dir))
# Set qos_enabled in agent_param to false
self.set_config(
'/etc/contrail/agent_param',
sec="''",
var='qos_enabled',
val='false')
# QOS configs
if qos_queue_id_list is not None:
self.set_config(
agent_conf,
'QOS',
'priority_tagging',
qos_priority_tagging)
num_sections = len(qos_logical_queue)
if(len(qos_logical_queue) == len(qos_queue_id_list) and
default_hw_queue_qos):
num_sections = num_sections - 1
for i in range(num_sections):
configs['QUEUE-%s' % qos_queue_id_list[i]] = {
'logical_queue':
'[%s]' % qos_logical_queue[i].replace(",", ", ")}
if (default_hw_queue_qos):
if(len(qos_logical_queue) == len(qos_queue_id_list)):
logical_queue = '[%s]' %\
qos_logical_queue[-1].replace(",", ", ")
else:
logical_queue = '[ ]'
configs['QUEUE-%s' % qos_queue_id_list[-1]] = {
'default_hw_queue': 'true',
'logical_queue': logical_queue}
for section, key_vals in list(configs.items()):
for key, val in list(key_vals.items()):
self.set_config(
agent_conf,
section, key, val)
if priority_id_list is not None:
local(
'sudo contrail-config --set /etc/contrail/contrail-vrouter-agent.conf QOS-NIANTIC')
for i in range(len(priority_id_list)):
configs['PG-%s' % priority_id_list[i]] = {
'scheduling': priority_scheduling[i],
'bandwidth': priority_bandwidth[i]}
for section, key_vals in list(configs.items()):
for key, val in list(key_vals.items()):
self.set_config(
agent_conf,
section, key, val)
if (qos_queue_id_list or priority_id_list):
# Set qos_enabled in agent_param
self.set_config(
'/etc/contrail/agent_param',
sec="''",
var='qos_enabled',
val='true')
# Run qosmap script on physical interface (on all members for bond
# interface)
physical_interface = local(
"sudo openstack-config --get /etc/contrail/contrail-vrouter-agent.conf VIRTUAL-HOST-INTERFACE physical_interface")
if os.path.isdir('/sys/class/net/%s/bonding' % physical_interface):
physical_interfaces_str = local(
"sudo cat /sys/class/net/%s/bonding/slaves | tr ' ' '\n' | sort | tr '\n' ' '" %
physical_interface)
else:
physical_interfaces_str = physical_interface
local(
"cd /opt/contrail/utils; python qosmap.py --interface_list %s " %
physical_interfaces_str)
def disable_nova_compute(self):
# Check if nova-compute is present in nova service list
# Disable nova-compute on TSN node
if local("nova service-list | grep nova-compute", warn_only=True).succeeded:
# Stop the service
local("sudo service nova-compute stop", warn_only=True)
if self.pdist in ['Ubuntu']:
local('sudo echo "manual" >> /etc/init/nova-compute.override')
else:
local('sudo chkconfig nova-compute off')
def add_tsn_vnc_config(self):
tsn_ip = self._args.self_ip
self.tsn_hostname = socket.gethostname()
prov_args = "--host_name %s --host_ip %s --api_server_ip %s --oper add "\
"--admin_user %s --admin_password %s --admin_tenant_name %s "\
"--openstack_ip %s --router_type tor-service-node --disable_vhost_vmi "\
% (self.tsn_hostname, tsn_ip, self._args.cfgm_ip,
self._args.keystone_admin_user,
self._args.keystone_admin_password,
self._args.keystone_admin_tenant_name, self._args.keystone_ip)
if self._args.apiserver_auth_protocol == 'https':
prov_args += " --api_server_use_ssl True"
local(
"python /opt/contrail/utils/provision_vrouter.py %s" %
(prov_args))
def start_tsn_service(self):
nova_conf_file = '/etc/contrail/contrail-vrouter-agent.conf'
mode = 'tsn'
if self._args.tsn_evpn_mode:
mode = 'tsn-no-forwarding'
local(
"openstack-config --set %s DEFAULT agent_mode %s" %
(nova_conf_file, mode))
def setup_tsn_node(self):
self.disable_nova_compute()
self.add_tsn_vnc_config()
self.start_tsn_service()
def increase_vrouter_limit(self):
"""Increase the maximum number of mpls label
and nexthop on tsn node"""
if self._args.vrouter_module_params:
vrouter_module_params = self._args.vrouter_module_params.rstrip(
',')
vrouter_module_params_args = dict(
u.split("=") for u in
vrouter_module_params.split(","))
if self._args.dpdk:
self.dpdk_increase_vrouter_limit(
vrouter_module_params_args)
else:
cmd = "options vrouter"
if 'mpls_labels' in list(vrouter_module_params_args.keys()):
cmd += " vr_mpls_labels=%s" % vrouter_module_params_args['mpls_labels']
if 'nexthops' in list(vrouter_module_params_args.keys()):
cmd += " vr_nexthops=%s" % vrouter_module_params_args['nexthops']
if 'vrfs' in list(vrouter_module_params_args.keys()):
cmd += " vr_vrfs=%s" % vrouter_module_params_args['vrfs']
if 'macs' in list(vrouter_module_params_args.keys()):
cmd += " vr_bridge_entries=%s" % vrouter_module_params_args['macs']
if 'flow_entries' in list(vrouter_module_params_args.keys()):
cmd += " vr_flow_entries=%s" % vrouter_module_params_args['flow_entries']
if 'oflow_entries' in list(vrouter_module_params_args.keys()):
cmd += " vr_oflow_entries=%s" % vrouter_module_params_args['oflow_entries']
if 'mac_oentries' in list(vrouter_module_params_args.keys()):
cmd += " vr_bridge_oentries=%s" % vrouter_module_params_args['mac_oentries']
if 'flow_hold_limit' in list(vrouter_module_params_args.keys()):
cmd += " vr_flow_hold_limit=%s" % vrouter_module_params_args['flow_hold_limit']
if 'max_interface_entries' in list(vrouter_module_params_args.keys()):
cmd += " vr_interfaces=%s" % vrouter_module_params_args['max_interface_entries']
if 'vrouter_dbg' in list(vrouter_module_params_args.keys()):
cmd += " vrouter_dbg=%s" % vrouter_module_params_args['vrouter_dbg']
if 'vr_memory_alloc_checks' in list(vrouter_module_params_args.keys()):
cmd += " vr_memory_alloc_checks=%s" % vrouter_module_params_args['vr_memory_alloc_checks']
local(
"echo %s > %s" %
(cmd, '/etc/modprobe.d/vrouter.conf'), warn_only=True)
def setup_vrouter_kmod_hugepages(self):
"""Setup 1G and 2M hugepages for vrouter"""
no_of_pages = 2
# Update vrouter kernel mode hugepage config
self.setup_vrouter_kmod_hugepage_grub()
# Delete vrouter kernel mode 1G hugepage config
if os.path.isfile('/etc/fstab'):
pattern = "hugepage_1G"
line = ""
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/fstab')
pattern = "vrouter_kmod_1G_hugepages"
line = "vrouter_kmod_1G_hugepages=0"
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/contrail/agent_param')
# Delete vrouter kernel mode 2M hugepage config
if os.path.isfile('/etc/fstab'):
pattern = "hugepage_2M"
line = ""
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/fstab')
pattern = "vrouter_kmod_2M_hugepages"
line = "vrouter_kmod_2M_hugepages=0"
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/contrail/agent_param')
# Configure vrouter kernel mode 1G hugepages
if self._args.vrouter_1G_hugepages != '0':
if int(self._args.vrouter_1G_hugepages) > 0 and int(self._args.vrouter_1G_hugepages) <= 2:
no_of_pages = int(self._args.vrouter_1G_hugepages)
mounted = local("sudo mount | grep hugepage_1G | cut -d' ' -f 3",
capture=True, warn_only=False)
if (mounted != ""):
print("hugepages already mounted on %s" % mounted)
else:
local("sudo mkdir -p /mnt/hugepage_1G", warn_only=False)
local("sudo mount -t hugetlbfs -o pagesize=1G none /mnt/hugepage_1G", warn_only=False)
if os.path.isdir('/mnt/hugepage_1G'):
for i in range(no_of_pages):
local("sudo touch /mnt/hugepage_1G/vrouter_1G_mem_%s " % i, warn_only=False)
pattern = "hugepage_1G"
line = "hugetlbfs "\
"/mnt/hugepage_1G hugetlbfs defaults,pagesize=1G 0 0"
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/fstab')
pattern = "vrouter_kmod_1G_hugepages"
line = "vrouter_kmod_1G_hugepages=%s" % no_of_pages
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/contrail/agent_param')
# Configure vrouter kernel mode 2M hugepages
if self._args.vrouter_2M_hugepages != '0' and self._args.vrouter_1G_hugepages != '0':
if int(self._args.vrouter_2M_hugepages) >= 0 and int(self._args.vrouter_2M_hugepages) <= 2:
no_of_pages = int(self._args.vrouter_2M_hugepages)
mounted = local("sudo mount | grep hugepage_2M | cut -d' ' -f 3",
capture=True, warn_only=False)
if (mounted != ""):
print("hugepages already mounted on %s" % mounted)
else:
local("sudo mkdir -p /mnt/hugepage_2M", warn_only=False)
local("sudo mount -t hugetlbfs -o pagesize=2M none /mnt/hugepage_2M", warn_only=False)
if os.path.isdir('/mnt/hugepage_2M'):
for i in range(no_of_pages):
local("sudo touch /mnt/hugepage_2M/vrouter_2M_mem_%s " % i, warn_only=False)
pattern = "hugepage_2M"
line = "hugetlbfs "\
"/mnt/hugepage_2M hugetlbfs defaults,pagesize=2M 0 0"
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/fstab')
pattern = "vrouter_kmod_2M_hugepages"
line = "vrouter_kmod_2M_hugepages=%s" % no_of_pages
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/contrail/agent_param')
def setup(self):
self.disable_selinux()
self.disable_iptables()
self.setup_coredump()
self.fixup_config_files()
self.increase_vrouter_limit()
self.setup_sriov_grub()
if self._args.tsn_mode or self._args.tsn_evpn_mode:
self.setup_tsn_node()
self.run_services()
else:
self.run_services()
if self._args.register and not self.reprov:
self.add_vnc_config()
| 46.113553 | 144 | 0.533422 |
13a907b30889762024bb9a3d3a5ea7e0e33036e1 | 3,567 | py | Python | tests/test_figures_formating.py | lexman/tuttle | dab07db4a1e3e18c876deb2897c07be3935acd60 | [
"MIT"
] | 26 | 2015-10-08T17:12:56.000Z | 2021-10-21T14:47:22.000Z | tests/test_figures_formating.py | Pandinosaurus/tuttle | dab07db4a1e3e18c876deb2897c07be3935acd60 | [
"MIT"
] | 11 | 2015-10-09T12:37:15.000Z | 2018-04-01T15:47:49.000Z | tests/test_figures_formating.py | Pandinosaurus/tuttle | dab07db4a1e3e18c876deb2897c07be3935acd60 | [
"MIT"
] | 3 | 2016-03-29T17:15:41.000Z | 2018-11-16T13:39:31.000Z | from tuttle.error import TuttleError
from tuttle.figures_formating import nice_size, nice_duration, parse_duration
class TestFileSizeFormating:
def test_nice_size_B(self):
""" A number below 1 000 B should be expressed in B"""
nice = nice_size(12)
assert nice == "12 B", nice
def test_nice_size_KB(self):
""" A number below 1 000 000 B should be expressed in KB"""
nice = nice_size(12034)
assert nice == "11.7 KB", nice
def test_nice_size_MB(self):
""" A number below 1 000 000 0000 B should be expressed in MB"""
nice = nice_size(12056000)
assert nice == "11.4 MB", nice
def test_nice_size_MB_after_dot(self):
""" A number below 1 000 000 0000 B should be expressed in MB"""
nice = nice_size(12506000)
assert nice == "11.9 MB", nice
def test_nice_size_GB(self):
""" A number below 1 000 000 0000 000 B should be expressed in GB"""
nice = nice_size(12049000000)
assert nice == "11.2 GB", nice
class TestDurationFormating:
def test_nice_duration_s(self):
""" A duration below the minute should be expressed in seconds"""
nice = nice_duration(12)
assert nice == "12s", nice
def test_nice_duration_min(self):
""" A duration below the hour should be expressed in minutes and seconds"""
nice = nice_duration(64)
assert nice == "1min 4s", nice
def test_nice_size_hour(self):
""" A duration below the day should be expressed in hours and minutes"""
nice = nice_duration(10000)
assert nice == "2h 46min", nice
def test_nice_size_day(self):
""" A duration above the day should be expressed in days and hours"""
nice = nice_duration(1000000)
assert nice == "11d 13h", nice
def test_nice_duration_ms(self):
""" A duration must be rounded to seconds"""
nice = nice_duration(73.3)
assert nice == "1min 13s", nice
class TestDurationParsing:
def test_parse_negative_value(self):
""" Should raise if the expression is negative because a duration can't be negative"""
try:
d = parse_duration("-1")
assert False, "Should have raised"
except ValueError as e:
assert True
def test_parse_seconds(self):
""" should interpret s as seconds """
d = parse_duration("12s")
assert d == 12, d
def test_parse_bad_expression(self):
""" Should raise if the expression isn't a duration"""
try:
d = parse_duration("Not a number, Bro")
assert False, "Should have raised"
except ValueError as e:
assert True
def test_parse_minutes_secs(self):
""" A duration can have minutes and seconds """
d = parse_duration("14min 12s")
assert d == 14*60 + 12, d
def test_parse_minutes(self):
""" A duration can have only minutes """
d = parse_duration("14min")
assert d == 14*60, d
def test_parse_several_spaces(self):
""" Figures and units also parts of the duration can be separated by any number of spaces """
d = parse_duration("14 min 12 s")
assert d == 14*60 + 12, d
def test_parse_hours(self):
""" A duration can have hours """
d = parse_duration("3 h 12s")
assert d == 3*3600 + 12, d
def test_parse_days(self):
""" A duration can have days """
d = parse_duration("4d 12s")
assert d == 4*24*3600 + 12, d
| 33.027778 | 101 | 0.612279 |
3c381a3e6bb61cf2a3a4113d09220b1a47efe4ed | 700 | py | Python | tools/voc_eval_lib/utils/timer.py | Zhen-Dong/CoDeNet | 5f5cb78859a691db532c8318a38c1b124adfb104 | [
"MIT"
] | 15 | 2021-03-03T03:16:32.000Z | 2022-03-25T04:14:57.000Z | rcnn/utils/timer.py | truetqy/lesion_det_dual_att | 5a5a77dd7f3aa195a2f5b84169822eb32c396d65 | [
"Apache-2.0"
] | 3 | 2021-07-12T07:04:12.000Z | 2022-03-30T12:20:47.000Z | tools/voc_eval_lib/utils/timer.py | Zhen-Dong/CoDeNet | 5f5cb78859a691db532c8318a38c1b124adfb104 | [
"MIT"
] | 3 | 2021-03-04T14:50:49.000Z | 2021-12-16T08:38:56.000Z | import time
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
| 25.925926 | 71 | 0.58 |
e782335932931acf3167a5d5c633ef68706a3e06 | 983 | py | Python | snakes_on_a_plane/djang_models/migrations/0002_passenger_seats.py | ravewillow6383/django_models | e7f6a641ac30ac347f0561f44f8c33f13df6f3b4 | [
"MIT"
] | null | null | null | snakes_on_a_plane/djang_models/migrations/0002_passenger_seats.py | ravewillow6383/django_models | e7f6a641ac30ac347f0561f44f8c33f13df6f3b4 | [
"MIT"
] | null | null | null | snakes_on_a_plane/djang_models/migrations/0002_passenger_seats.py | ravewillow6383/django_models | e7f6a641ac30ac347f0561f44f8c33f13df6f3b4 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.4 on 2019-08-13 22:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('djang_models', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Passenger',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Seats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('seatnumber', models.CharField(max_length=3)),
('passenger', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='djang_models.Passenger')),
],
),
]
| 30.71875 | 134 | 0.585961 |
9994db7b05fe237b424990f9acd5d985e635d89a | 3,679 | py | Python | rpython/rlib/rsre/rsre_constants.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 333 | 2015-08-08T18:03:38.000Z | 2022-03-22T18:13:12.000Z | rpython/rlib/rsre/rsre_constants.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 7 | 2020-02-16T16:49:05.000Z | 2021-11-26T09:00:56.000Z | rpython/rlib/rsre/rsre_constants.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 55 | 2015-08-16T02:41:30.000Z | 2022-03-20T20:33:35.000Z | # Horrible import-time hack.
# Blame CPython for renumbering these OPCODE_* at some point.
from rpython.rlib.objectmodel import specialize
try:
import pypy.module.sys.version
V37 = pypy.module.sys.version.CPYTHON_VERSION >= (3, 7)
except ImportError:
raise ImportError("Cannot import pypy.module.sys.version. You can safely "
"remove this 'raise' line if you are not interested in "
"PyPy but only RPython.")
V37 = False
OPCODE_FAILURE = 0
OPCODE_SUCCESS = 1
OPCODE_ANY = 2
OPCODE_ANY_ALL = 3
OPCODE_ASSERT = 4
OPCODE_ASSERT_NOT = 5
OPCODE_AT = 6
OPCODE_BRANCH = 7
OPCODE_CALL = 8 # not used
OPCODE_CATEGORY = 9
OPCODE_CHARSET = 10
OPCODE_BIGCHARSET = 11
OPCODE_GROUPREF = 12
OPCODE_GROUPREF_EXISTS = 13
OPCODE_GROUPREF_IGNORE = 28 if V37 else 14
OPCODE_IN = 14 if V37 else 15
OPCODE_IN_IGNORE = 29 if V37 else 16
OPCODE_INFO = 15 if V37 else 17
OPCODE_JUMP = 16 if V37 else 18
OPCODE_LITERAL = 17 if V37 else 19
OPCODE_LITERAL_IGNORE = 30 if V37 else 20
OPCODE_MARK = 18 if V37 else 21
OPCODE_MAX_UNTIL = 19 if V37 else 22
OPCODE_MIN_UNTIL = 20 if V37 else 23
OPCODE_NOT_LITERAL = 21 if V37 else 24
OPCODE_NOT_LITERAL_IGNORE = 31 if V37 else 25
OPCODE_NEGATE = 22 if V37 else 26
OPCODE_RANGE = 23 if V37 else 27
OPCODE_REPEAT = 24 if V37 else 28
OPCODE_REPEAT_ONE = 25 if V37 else 29
OPCODE_SUBPATTERN = 26 if V37 else 30 # not used
OPCODE_MIN_REPEAT_ONE = 27 if V37 else 31
OPCODE27_RANGE_IGNORE = None if V37 else 32
OPCODE37_GROUPREF_LOC_IGNORE = 32 if V37 else None
OPCODE37_IN_LOC_IGNORE = 33 if V37 else None
OPCODE37_LITERAL_LOC_IGNORE = 34 if V37 else None
OPCODE37_NOT_LITERAL_LOC_IGNORE = 35 if V37 else None
OPCODE37_GROUPREF_UNI_IGNORE = 36 if V37 else None
OPCODE37_IN_UNI_IGNORE = 37 if V37 else None
OPCODE37_LITERAL_UNI_IGNORE = 38 if V37 else None
OPCODE37_NOT_LITERAL_UNI_IGNORE = 39 if V37 else None
OPCODE37_RANGE_UNI_IGNORE = 40 if V37 else None
# not used by Python itself
OPCODE_UNICODE_GENERAL_CATEGORY = 70
@specialize.argtype(1)
def eq(op, const):
return const is not None and op == const
AT_BEGINNING = 0
AT_BEGINNING_LINE = 1
AT_BEGINNING_STRING = 2
AT_BOUNDARY = 3
AT_NON_BOUNDARY = 4
AT_END = 5
AT_END_LINE = 6
AT_END_STRING = 7
AT_LOC_BOUNDARY = 8
AT_LOC_NON_BOUNDARY = 9
AT_UNI_BOUNDARY = 10
AT_UNI_NON_BOUNDARY = 11
def _makecodes(s):
d = {}
for i, name in enumerate(s.strip().split()):
d[name] = i
globals().update(d)
return d
ATCODES = _makecodes("""
AT_BEGINNING AT_BEGINNING_LINE AT_BEGINNING_STRING
AT_BOUNDARY AT_NON_BOUNDARY
AT_END AT_END_LINE AT_END_STRING
AT_LOC_BOUNDARY AT_LOC_NON_BOUNDARY
AT_UNI_BOUNDARY AT_UNI_NON_BOUNDARY
""")
# categories
CHCODES = _makecodes("""
CATEGORY_DIGIT CATEGORY_NOT_DIGIT
CATEGORY_SPACE CATEGORY_NOT_SPACE
CATEGORY_WORD CATEGORY_NOT_WORD
CATEGORY_LINEBREAK CATEGORY_NOT_LINEBREAK
CATEGORY_LOC_WORD CATEGORY_LOC_NOT_WORD
CATEGORY_UNI_DIGIT CATEGORY_UNI_NOT_DIGIT
CATEGORY_UNI_SPACE CATEGORY_UNI_NOT_SPACE
CATEGORY_UNI_WORD CATEGORY_UNI_NOT_WORD
CATEGORY_UNI_LINEBREAK CATEGORY_UNI_NOT_LINEBREAK
""")
SRE_INFO_PREFIX = 1
SRE_INFO_LITERAL = 2
SRE_INFO_CHARSET = 4
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_UNICODE = 32 # use unicode locale
| 32.848214 | 78 | 0.687687 |
ee07a36c87b9e3d7126ff66e8f91316ecb53b733 | 682 | py | Python | models/ConnectToSSH.py | GerardoLozano1423/AWS | 909e940fb194e6ade2b5df60adacaa74d2137ced | [
"Apache-2.0"
] | null | null | null | models/ConnectToSSH.py | GerardoLozano1423/AWS | 909e940fb194e6ade2b5df60adacaa74d2137ced | [
"Apache-2.0"
] | null | null | null | models/ConnectToSSH.py | GerardoLozano1423/AWS | 909e940fb194e6ade2b5df60adacaa74d2137ced | [
"Apache-2.0"
] | null | null | null | #ssh -i "key_pair.pem" ubuntu@ec2-35-164-15-203.us-west-2.compute.amazonaws.com
#ssh -i "key_pair.pem" ubuntu@35.164.15.203
import paramiko
class ConnectToSSH:
def __init__(self, ip = '', key = ''):
self.ip = ip
self.key = key
def Open(self):
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.client.connect(hostname='35.164.15.203',port=22,username='ubuntu',key_filename='keys/key_pair.pem')
#comando2 = 'sudo apt-get install apache2'
comando2 = 'sudo apt-get upgrade -b -n1'
stdin, stdout, stder = self.client.exec_command(comando2)
#stdin.write('Y')
print(stdout.read())
def Close(self):
pass | 29.652174 | 106 | 0.71261 |
6e77eb5a931d9a79b32f4257a9dcb703e54011c9 | 132 | py | Python | configs/fcn/fcn_r50b-d8_769x769_80k_cityscapes.py | Xlinford/mmsegmentation | 8b444de5e6db2af2538a73a93ac75204f5c3bb2f | [
"Apache-2.0"
] | null | null | null | configs/fcn/fcn_r50b-d8_769x769_80k_cityscapes.py | Xlinford/mmsegmentation | 8b444de5e6db2af2538a73a93ac75204f5c3bb2f | [
"Apache-2.0"
] | null | null | null | configs/fcn/fcn_r50b-d8_769x769_80k_cityscapes.py | Xlinford/mmsegmentation | 8b444de5e6db2af2538a73a93ac75204f5c3bb2f | [
"Apache-2.0"
] | null | null | null | _base_ = './fcn_r50-d8_769x769_80k_cityscapes.py'
model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet'))
| 44 | 80 | 0.757576 |
f5221622ed405b966f205fc7ffb680217959e62f | 3,708 | py | Python | tests/nlu/classifiers/test_logistic_regression_classifier.py | mukulbalodi/rasa | 3126ef1148c165f2402f3c7203138d429e46c68c | [
"Apache-2.0"
] | 3,603 | 2017-05-21T18:34:55.000Z | 2019-04-16T11:58:09.000Z | tests/nlu/classifiers/test_logistic_regression_classifier.py | mukulbalodi/rasa | 3126ef1148c165f2402f3c7203138d429e46c68c | [
"Apache-2.0"
] | 2,782 | 2017-05-21T20:36:15.000Z | 2019-04-16T14:35:20.000Z | tests/nlu/classifiers/test_logistic_regression_classifier.py | mukulbalodi/rasa | 3126ef1148c165f2402f3c7203138d429e46c68c | [
"Apache-2.0"
] | 1,337 | 2017-05-21T18:10:33.000Z | 2019-04-16T09:14:42.000Z | import copy
import pytest
import pathlib
import numpy as np
from rasa.shared.nlu.training_data.message import Message
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.local_model_storage import LocalModelStorage
from rasa.engine.graph import ExecutionContext
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.nlu.featurizers.sparse_featurizer.count_vectors_featurizer import (
CountVectorsFeaturizer,
)
from rasa.nlu.classifiers.logistic_regression_classifier import (
LogisticRegressionClassifier,
)
@pytest.fixture
def featurizer_sparse(tmpdir):
"""Generate a featurizer for tests."""
node_storage = LocalModelStorage(pathlib.Path(tmpdir))
node_resource = Resource("sparse_feat")
context = ExecutionContext(node_storage, node_resource)
return CountVectorsFeaturizer(
config=CountVectorsFeaturizer.get_default_config(),
resource=node_resource,
model_storage=node_storage,
execution_context=context,
)
tokeniser = WhitespaceTokenizer(
{
"only_alphanum": False,
"intent_tokenization_flag": False,
"intent_split_symbol": "_",
}
)
@pytest.fixture()
def training_data():
# Create training data.
return TrainingData(
[
Message({"text": "hello", "intent": "greet"}),
Message({"text": "hi there", "intent": "greet"}),
Message({"text": "ciao", "intent": "goodbye"}),
Message({"text": "bye", "intent": "goodbye"}),
]
)
def is_sorted(ranking):
"""Confirms if the ranking is sorted."""
for i in range(len(ranking) - 1):
assert ranking[i]["confidence"] >= ranking[i + 1]["confidence"]
return True
def test_predictions_added(training_data, tmpdir, featurizer_sparse):
"""Checks if the sizes are appropriate."""
# Set up classifier
node_storage = LocalModelStorage(pathlib.Path(tmpdir))
node_resource = Resource("classifier")
context = ExecutionContext(node_storage, node_resource)
classifier = LogisticRegressionClassifier(
config=LogisticRegressionClassifier.get_default_config(),
name=context.node_name,
resource=node_resource,
model_storage=node_storage,
)
# First we add tokens.
tokeniser.process(training_data.training_examples)
# Next we add features.
featurizer_sparse.train(training_data)
featurizer_sparse.process(training_data.training_examples)
# Train the classifier.
classifier.train(training_data)
# Make predictions.
classifier.process(training_data.training_examples)
# Check that the messages have been processed correctly
for msg in training_data.training_examples:
_, conf = msg.get("intent")["name"], msg.get("intent")["confidence"]
# Confidence should be between 0 and 1.
assert 0 < conf < 1
ranking = msg.get("intent_ranking")
assert is_sorted(ranking)
assert {i["name"] for i in ranking} == {"greet", "goodbye"}
# Confirm the sum of confidences is 1.0
assert np.isclose(np.sum([i["confidence"] for i in ranking]), 1.0)
classifier.persist()
loaded_classifier = LogisticRegressionClassifier.load(
{}, node_storage, node_resource, context
)
predicted = copy.copy(training_data)
actual = copy.copy(training_data)
loaded_messages = loaded_classifier.process(predicted.training_examples)
trained_messages = classifier.process(actual.training_examples)
for m1, m2 in zip(loaded_messages, trained_messages):
assert m1.get("intent") == m2.get("intent")
| 33.405405 | 77 | 0.70658 |
ba64f17a6118888b9aa4f89dffda1a902a60e83c | 58 | py | Python | reikna/__init__.py | fjarri/reikna | 171bf233664f5f7ec4f457e826e0b91849e91384 | [
"MIT"
] | 122 | 2015-05-01T12:42:34.000Z | 2021-09-30T22:47:59.000Z | lib/python/reikna-0.7.5/reikna/__init__.py | voxie-viewer/voxie | d2b5e6760519782e9ef2e51f5322a3baa0cb1198 | [
"MIT"
] | 42 | 2015-05-04T16:55:47.000Z | 2021-09-18T04:53:34.000Z | lib/python/reikna-0.7.5/reikna/__init__.py | voxie-viewer/voxie | d2b5e6760519782e9ef2e51f5322a3baa0cb1198 | [
"MIT"
] | 14 | 2015-05-01T19:22:52.000Z | 2021-09-30T22:48:03.000Z | from reikna.cluda.array_helpers import concatenate, roll
| 19.333333 | 56 | 0.844828 |
c153578be4103dd443c4c79892d7b7d191bb8550 | 1,674 | py | Python | tangos/properties/pynbody/SF.py | j-davies-astro/tangos | ae15578820087765e5180e69310e274c237d3912 | [
"BSD-3-Clause"
] | null | null | null | tangos/properties/pynbody/SF.py | j-davies-astro/tangos | ae15578820087765e5180e69310e274c237d3912 | [
"BSD-3-Clause"
] | null | null | null | tangos/properties/pynbody/SF.py | j-davies-astro/tangos | ae15578820087765e5180e69310e274c237d3912 | [
"BSD-3-Clause"
] | null | null | null | from .. import TimeChunkedProperty
from . import pynbody_handler_module, PynbodyPropertyCalculation
import numpy as np
class StarFormHistogram(TimeChunkedProperty):
works_with_handler = pynbody_handler_module.PynbodyInputHandler
requires_particle_data = True
names = "SFR_histogram",
def plot_xlabel(self):
return "t/Gyr"
def plot_ylabel(self):
return r"$SFR/M_{\odot} yr^{-1}$"
def calculate(self, halo, existing_properties):
try:
weights = halo.st['massform']
except KeyError:
weights = halo.st['mass']
tmax_Gyr = 20.0 # calculate up to 20 Gyr
nbins = int(20.0/self.pixel_delta_t_Gyr)
M,_ = np.histogram(halo.st['tform'].in_units("Gyr"),weights=weights.in_units("Msol"),bins=nbins,range=(0,tmax_Gyr))
t_now = halo.properties['time'].in_units("Gyr")
M/=self.pixel_delta_t_Gyr
M = M[self.store_slice(t_now)]
return M,
def reassemble(self, *options):
reassembled = super(StarFormHistogram, self).reassemble(*options)
return reassembled/1e9 # Msol per Gyr -> Msol per yr
class StarForm(PynbodyPropertyCalculation):
names = "SFR_10Myr", "SFR_100Myr"
def calculate(self, halo, existing_properties):
halo = halo.star
t_now = halo.properties['time'].in_units("Gyr")
tform = halo['tform'].in_units("Gyr")
mask_10Myr = (t_now-tform)<0.01
mask_100Myr = (t_now-tform)<0.1
# Because physical_units has been called previously, mass is in Msol. Return results in Msol/yr.
return halo['mass'][mask_10Myr].sum()/1e7, halo['mass'][mask_100Myr].sum()/1e8
| 34.163265 | 123 | 0.658901 |
f004f01d16056597da911207077df1e2e298cfa7 | 3,473 | py | Python | tools/phenomics/gol_blood/data_utils.py | skitchen19/galaxy_tools | b935f36cfe430263564503ebb71f78dc79315acb | [
"MIT"
] | 3 | 2017-04-05T18:01:59.000Z | 2019-05-03T14:15:31.000Z | tools/phenomics/gol_blood/data_utils.py | skitchen19/galaxy_tools | b935f36cfe430263564503ebb71f78dc79315acb | [
"MIT"
] | 6 | 2019-02-27T15:45:58.000Z | 2021-01-12T15:18:50.000Z | tools/phenomics/gol_blood/data_utils.py | skitchen19/galaxy_tools | b935f36cfe430263564503ebb71f78dc79315acb | [
"MIT"
] | 2 | 2018-10-26T18:36:39.000Z | 2019-01-28T15:12:39.000Z | #!/usr/bin/env python
import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
def load_raw3d_data(filename):
with open(filename, 'r') as file:
data = np.loadtxt(file, delimiter=',', usecols=(0, 1, 2), skiprows=1)
return data
def normalize_data(data, log_fh):
dataNorm = np.copy(data)
dataNorm[:, 0] = data[:, 0]-np.mean(data[:, 0])
dataNorm[:, 1] = data[:, 1]-np.mean(data[:, 1])
dataNorm[:, 2] = data[:, 2]-np.mean(data[:, 2])
log_fh.write("data-mean\n{}\n".format(dataNorm[:5]))
pca = PCA()
dataNorm1 = pca.fit_transform(dataNorm)
log_fh.write("data-pca\n{}\n".format(dataNorm1[:5]))
# maxAll = np.amax(dataNorm1)*1.1
# minAll = np.amin(dataNorm1)*1.1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# X, Y, Z = dataNorm1[:, 0], dataNorm1[:, 1], dataNorm1[:, 2]
ax.scatter(dataNorm1[:, 0], dataNorm1[:, 1], dataNorm1[:, 2])
ax.set_xlabel('X Axis')
ax.set_ylabel('Y Axis')
ax.set_zlabel('Z Axis')
plt.show(block=False)
r = np.sqrt((dataNorm[:, 0]**2)+(dataNorm[:, 1]**2)+(dataNorm[:, 2]**2))
meanR = np.mean(r)
# stdR = np.std(r)
dataNorm = dataNorm/meanR
r = np.sqrt((dataNorm1[:, 0]**2)+(dataNorm1[:, 1]**2)+(dataNorm1[:, 2]**2))
meanR = np.mean(r)
# stdR = np.std(r)
dataNorm1 = dataNorm1/meanR
# maxAll = np.amax(dataNorm1)*1.1
# minAll = np.amin(dataNorm1)*1.1
plot_2D_graphs(dataNorm1)
return dataNorm1, meanR
def map_to_non_euclid_3d(data, xc=0, yc=0, zc=0, xE=1, yE=1, zE=1):
"""
Converts data form Catesian coordinate system to spherical coordinate systems
data (numpy array): Set of points in 3D Cartesian Coordinates.
Output (rTheta) - Data points in spherical coordinate systems
"""
n = data.shape[0]
rTheta = np.zeros(data.shape)
rTheta[:, 2] = np.sqrt(((data[:, 0] - xc) / xE) ** 2 + ((data[:, 1] - yc) / yE) ** 2 + ((data[:, 2] - zc) / zE) ** 2)
# Phi: acos(z/r)
rTheta[:, 1] = np.arccos(np.divide(((data[:, 2] - zc) / zE), rTheta[:, 2]))
# Theta: atan(y/x)
rTheta[:, 0] = np.arctan(np.divide((abs(data[:, 1] - yc) / yE), ((data[:, 0] - xc) / xE)))
for i in range(n):
# Quadrant I doesn't change.
if data[i, 0] < 0:
if data[i, 1] > 0:
# Quadrant II
rTheta[i, 0] = np.pi + rTheta[i, 0]
elif data[i, 1] < 0:
# Quadrant III
rTheta[i, 0] = np.pi - rTheta[i, 0]
elif data[i, 0] > 0 and data[i, 1] < 0:
# Quadrant IV
rTheta[i, 0] = 2 * np.pi - rTheta[i, 0]
return rTheta
def map_to_non_euclid_to_plot_3d(rTheta):
tTheta = np.zeros(rTheta.shape)
tTheta[:, 2] = rTheta[:, 2]
tTheta[:, 1] = (rTheta[:, 1]*rTheta[:, 2])*np.sin(rTheta[:, 0])
tTheta[:, 0] = (rTheta[:, 1]*rTheta[:, 2])*np.cos(rTheta[:, 0])
return tTheta
def plot_2D_graphs(data):
fig, axs = plt.subplots(3)
axs[0].scatter(data[:, 0], data[:, 1], linestyle='--')
axs[0].set_xlabel('X Axis')
axs[0].set_ylabel('Y Axis')
axs[0].grid()
axs[1].scatter(data[:, 0], data[:, 2], color='y', linestyle='--')
axs[1].set_xlabel('X Axis')
axs[1].set_ylabel('Z Axis')
axs[1].grid()
axs[2].scatter(data[:, 1], data[:, 2], color='green', linestyle='--')
axs[2].set_xlabel('Y Axis')
axs[2].set_ylabel('Z Axis')
axs[2].grid()
plt.show(block=False)
| 32.157407 | 121 | 0.558307 |
ae805ea1fde8dd52070d7591523be128aedcda0e | 10,967 | py | Python | tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py | daoran/opendr | bca25f6a43244fe9c219a24576181f94a0726923 | [
"Apache-2.0"
] | null | null | null | tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py | daoran/opendr | bca25f6a43244fe9c219a24576181f94a0726923 | [
"Apache-2.0"
] | null | null | null | tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py | daoran/opendr | bca25f6a43244fe9c219a24576181f94a0726923 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import shutil
import os
import torch
from opendr.engine.datasets import PointCloudsDatasetIterator
from opendr.perception.object_detection_3d import VoxelObjectDetection3DLearner
from opendr.perception.object_detection_3d import KittiDataset, LabeledPointCloudsDatasetIterator
DEVICE = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu'
print("Using device:", DEVICE)
print("Using device:", DEVICE, file=sys.stderr)
def rmfile(path):
try:
os.remove(path)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
def rmdir(_dir):
try:
shutil.rmtree(_dir)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
class TestVoxelObjectDetection3DLearner(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("\n\n****************************************\nTEST Voxel Object Detection 3D Learner\n"
"****************************************")
cls.temp_dir = os.path.join("tests", "sources", "tools",
"perception", "object_detection_3d",
"voxel_object_detection_3d",
"voxel_object_detection_3d_temp")
cls.config_tanet_car = os.path.join(".", "src", "opendr", "perception",
"object_detection_3d",
"voxel_object_detection_3d",
"second_detector", "configs", "tanet",
"car", "test_short.proto")
cls.config_tanet_ped_cycle = os.path.join(".", "src", "opendr", "perception",
"object_detection_3d",
"voxel_object_detection_3d",
"second_detector", "configs", "tanet",
"ped_cycle",
"test_short.proto")
cls.config_pointpillars_car = os.path.join(
".", "src", "opendr", "perception", "object_detection_3d",
"voxel_object_detection_3d", "second_detector", "configs", "pointpillars",
"car", "test_short.proto")
cls.config_pointpillars_ped_cycle = os.path.join(
".", "src", "opendr", "perception", "object_detection_3d",
"voxel_object_detection_3d", "second_detector", "configs", "pointpillars",
"ped_cycle", "test_short.proto")
cls.subsets_path = os.path.join(
".", "src", "opendr", "perception", "object_detection_3d",
"datasets", "nano_kitti_subsets")
cls.download_model_names = {
"tanet_car": "tanet_car_xyres_16",
"tanet_ped_cycle": "tanet_ped_cycle_xyres_16",
"pointpillars_car": "pointpillars_car_xyres_16",
"pointpillars_ped_cycle": "pointpillars_ped_cycle_xyres_16",
}
cls.all_configs = {
"tanet_car": cls.config_tanet_car,
"tanet_ped_cycle": cls.config_tanet_ped_cycle,
"pointpillars_car": cls.config_pointpillars_car,
"pointpillars_ped_cycle": cls.config_pointpillars_ped_cycle,
}
cls.car_configs = {
"tanet_car": cls.config_tanet_car,
"pointpillars_car": cls.config_pointpillars_car,
}
cls.dataset_path = KittiDataset.download_nano_kitti(
cls.temp_dir, True, cls.subsets_path
).path
print("Dataset downloaded", file=sys.stderr)
for model_name in cls.download_model_names.values():
VoxelObjectDetection3DLearner.download(
model_name, cls.temp_dir
)
print("Models downloaded", file=sys.stderr)
@classmethod
def tearDownClass(cls):
# Clean up downloaded files
rmdir(os.path.join(cls.temp_dir))
pass
def test_fit(self):
def test_model(name, config):
print("Fit", name, "start", file=sys.stderr)
model_path = os.path.join(self.temp_dir, "test_fit_" + name)
dataset = KittiDataset(self.dataset_path, self.subsets_path)
learner = VoxelObjectDetection3DLearner(
model_config_path=config, device=DEVICE,
checkpoint_after_iter=2,
)
starting_param = list(learner.model.parameters())[0].clone()
learner.fit(
dataset,
model_dir=model_path,
verbose=True,
evaluate=False,
)
new_param = list(learner.model.parameters())[0].clone()
self.assertFalse(torch.equal(starting_param, new_param))
del learner
print("Fit", name, "ok", file=sys.stderr)
for name, config in self.car_configs.items():
test_model(name, config)
def test_fit_iterator(self):
def test_model(name, config):
print("Fit iterator", name, "start", file=sys.stderr)
model_path = os.path.join(self.temp_dir, "test_fit_iterator_" + name)
dataset = LabeledPointCloudsDatasetIterator(
self.dataset_path + "/training/velodyne_reduced",
self.dataset_path + "/training/label_2",
self.dataset_path + "/training/calib",
)
val_dataset = LabeledPointCloudsDatasetIterator(
self.dataset_path + "/training/velodyne_reduced",
self.dataset_path + "/training/label_2",
self.dataset_path + "/training/calib",
)
learner = VoxelObjectDetection3DLearner(
model_config_path=config, device=DEVICE,
checkpoint_after_iter=90,
)
starting_param = list(learner.model.parameters())[0].clone()
learner.fit(
dataset,
val_dataset=val_dataset,
model_dir=model_path,
evaluate=False,
)
new_param = list(learner.model.parameters())[0].clone()
self.assertFalse(torch.equal(starting_param, new_param))
del learner
print("Fit iterator", name, "ok", file=sys.stderr)
for name, config in self.car_configs.items():
test_model(name, config)
def test_eval(self):
def test_model(name, config):
print("Eval", name, "start", file=sys.stderr)
model_path = os.path.join(self.temp_dir, self.download_model_names[name])
dataset = KittiDataset(self.dataset_path, self.subsets_path)
learner = VoxelObjectDetection3DLearner(model_config_path=config, device=DEVICE)
learner.load(model_path)
mAPbbox, mAPbev, mAP3d, mAPaos = learner.eval(dataset, count=2)
self.assertTrue(mAPbbox[0][0][0] > 1 and mAPbbox[0][0][0] < 95, msg=mAPbbox[0][0][0])
del learner
print("Eval", name, "ok", file=sys.stderr)
for name, config in self.car_configs.items():
test_model(name, config)
def test_infer(self):
def test_model(name, config):
print("Infer", name, "start", file=sys.stderr)
dataset = PointCloudsDatasetIterator(self.dataset_path + "/testing/velodyne_reduced")
learner = VoxelObjectDetection3DLearner(
model_config_path=config, device=DEVICE
)
result = learner.infer(
dataset[0]
)
self.assertTrue(len(result) > 0)
result = learner.infer(
[dataset[0], dataset[1], dataset[2]]
)
self.assertTrue(len(result) == 3)
self.assertTrue(len(result[0]) > 0)
del learner
print("Infer", name, "ok", file=sys.stderr)
for name, config in self.car_configs.items():
test_model(name, config)
def test_save(self):
def test_model(name, config):
print("Save", name, "start", file=sys.stderr)
model_path = os.path.join(self.temp_dir, "test_save_" + name)
save_path = os.path.join(model_path, "save")
learner = VoxelObjectDetection3DLearner(
model_config_path=config, device=DEVICE
)
learner.save(save_path, True)
starting_param_1 = list(learner.model.parameters())[0].clone()
learner2 = VoxelObjectDetection3DLearner(
model_config_path=config, device=DEVICE
)
starting_param_2 = list(learner2.model.parameters())[0].clone()
learner2.load(save_path)
new_param = list(learner2.model.parameters())[0].clone()
self.assertFalse(torch.equal(starting_param_1, starting_param_2))
self.assertTrue(torch.equal(starting_param_1, new_param))
del learner
del learner2
print("Save", name, "ok", file=sys.stderr)
for name, config in self.car_configs.items():
test_model(name, config)
def test_optimize(self):
def test_model(name, config):
print("Optimize", name, "start", file=sys.stderr)
model_path = os.path.join(self.temp_dir, "test_optimize_" + name)
dataset = PointCloudsDatasetIterator(self.dataset_path + "/testing/velodyne_reduced")
learner = VoxelObjectDetection3DLearner(
model_config_path=config, device=DEVICE,
temp_path=self.temp_dir
)
learner.optimize()
result = learner.infer(
dataset[0]
)
self.assertTrue(len(result) > 0)
learner.save(model_path)
learner2 = VoxelObjectDetection3DLearner(
model_config_path=config, device=DEVICE
)
learner2.load(model_path, True)
self.assertTrue(learner2.model.rpn_ort_session is not None)
del learner
del learner2
print("Optimize", name, "ok", file=sys.stderr)
for name, config in self.car_configs.items():
test_model(name, config)
if __name__ == "__main__":
unittest.main()
| 37.050676 | 102 | 0.577733 |
34c1772b54b9a0bb871ede4157051530b34321b5 | 4,863 | py | Python | huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/search_corp_admins_response.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/search_corp_admins_response.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/search_corp_admins_response.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class SearchCorpAdminsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'offset': 'int',
'limit': 'int',
'count': 'int',
'data': 'list[QueryCorpAdminResultDTO]'
}
attribute_map = {
'offset': 'offset',
'limit': 'limit',
'count': 'count',
'data': 'data'
}
def __init__(self, offset=None, limit=None, count=None, data=None):
"""SearchCorpAdminsResponse - a model defined in huaweicloud sdk"""
super(SearchCorpAdminsResponse, self).__init__()
self._offset = None
self._limit = None
self._count = None
self._data = None
self.discriminator = None
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if count is not None:
self.count = count
if data is not None:
self.data = data
@property
def offset(self):
"""Gets the offset of this SearchCorpAdminsResponse.
页面起始页,从0开始
:return: The offset of this SearchCorpAdminsResponse.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this SearchCorpAdminsResponse.
页面起始页,从0开始
:param offset: The offset of this SearchCorpAdminsResponse.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this SearchCorpAdminsResponse.
每页显示的条目数量。 默认值:10。
:return: The limit of this SearchCorpAdminsResponse.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this SearchCorpAdminsResponse.
每页显示的条目数量。 默认值:10。
:param limit: The limit of this SearchCorpAdminsResponse.
:type: int
"""
self._limit = limit
@property
def count(self):
"""Gets the count of this SearchCorpAdminsResponse.
总数量。
:return: The count of this SearchCorpAdminsResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this SearchCorpAdminsResponse.
总数量。
:param count: The count of this SearchCorpAdminsResponse.
:type: int
"""
self._count = count
@property
def data(self):
"""Gets the data of this SearchCorpAdminsResponse.
查询企业管理员返回的结果
:return: The data of this SearchCorpAdminsResponse.
:rtype: list[QueryCorpAdminResultDTO]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this SearchCorpAdminsResponse.
查询企业管理员返回的结果
:param data: The data of this SearchCorpAdminsResponse.
:type: list[QueryCorpAdminResultDTO]
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchCorpAdminsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.328125 | 75 | 0.556652 |
6d0173b30e99a2b2f8938a6f7da5b502a83b046f | 9,182 | py | Python | corehq/messaging/scheduling/scheduling_partitioned/tests/test_dbaccessors_non_partitioned.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | corehq/messaging/scheduling/scheduling_partitioned/tests/test_dbaccessors_non_partitioned.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | corehq/messaging/scheduling/scheduling_partitioned/tests/test_dbaccessors_non_partitioned.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | import uuid
from corehq.form_processor.tests.utils import only_run_with_non_partitioned_database
from corehq.messaging.scheduling.scheduling_partitioned.dbaccessors import (
get_alert_schedule_instance,
get_timed_schedule_instance,
save_alert_schedule_instance,
save_timed_schedule_instance,
delete_alert_schedule_instance,
delete_timed_schedule_instance,
get_active_schedule_instance_ids,
get_alert_schedule_instances_for_schedule,
get_timed_schedule_instances_for_schedule,
)
from corehq.messaging.scheduling.models import (
AlertSchedule,
TimedSchedule,
)
from corehq.messaging.scheduling.scheduling_partitioned.models import (
AlertScheduleInstance,
TimedScheduleInstance,
)
from corehq.util.exceptions import AccessRestricted
from datetime import datetime, date
from django.test import TestCase
@only_run_with_non_partitioned_database
class BaseSchedulingNontPartitionedDBAccessorsTest(TestCase):
@classmethod
def setUpClass(cls):
super(BaseSchedulingNontPartitionedDBAccessorsTest, cls).setUpClass()
cls.domain = 'scheduling-non-partitioned-test'
cls.db = 'default'
@classmethod
def make_alert_schedule_instance(cls, schedule_instance_id=None, schedule_id=None, active=True):
return AlertScheduleInstance(
schedule_instance_id=schedule_instance_id or uuid.uuid4(),
domain=cls.domain,
recipient_type='CommCareUser',
recipient_id=uuid.uuid4().hex,
current_event_num=0,
schedule_iteration_num=1,
next_event_due=datetime(2017, 3, 1),
active=active,
alert_schedule_id=schedule_id or uuid.uuid4(),
)
@classmethod
def make_timed_schedule_instance(cls, schedule_instance_id=None, schedule_id=None, active=True):
return TimedScheduleInstance(
schedule_instance_id=schedule_instance_id or uuid.uuid4(),
domain=cls.domain,
recipient_type='CommCareUser',
recipient_id=uuid.uuid4().hex,
current_event_num=0,
schedule_iteration_num=1,
next_event_due=datetime(2017, 3, 1),
active=active,
timed_schedule_id=schedule_id or uuid.uuid4(),
start_date=date(2017, 3, 1),
)
class TestSchedulingNonPartitionedDBAccessorsGetAndSave(BaseSchedulingNontPartitionedDBAccessorsTest):
def tearDown(self):
AlertScheduleInstance.objects.using(self.db).filter(domain=self.domain).delete()
TimedScheduleInstance.objects.using(self.db).filter(domain=self.domain).delete()
def test_save_alert_schedule_instance(self):
self.assertEqual(AlertScheduleInstance.objects.using(self.db).count(), 0)
instance = self.make_alert_schedule_instance()
save_alert_schedule_instance(instance)
self.assertEqual(AlertScheduleInstance.objects.using(self.db).count(), 1)
def test_save_timed_schedule_instance(self):
self.assertEqual(TimedScheduleInstance.objects.using(self.db).count(), 0)
instance = self.make_timed_schedule_instance()
save_timed_schedule_instance(instance)
self.assertEqual(TimedScheduleInstance.objects.using(self.db).count(), 1)
def test_get_alert_schedule_instance(self):
instance1 = self.make_alert_schedule_instance()
save_alert_schedule_instance(instance1)
instance2 = get_alert_schedule_instance(instance1.schedule_instance_id)
self.assertTrue(isinstance(instance2, AlertScheduleInstance))
self.assertEqual(instance1.schedule_instance_id, instance2.schedule_instance_id)
with self.assertRaises(AlertScheduleInstance.DoesNotExist):
get_alert_schedule_instance(uuid.uuid4())
def test_get_timed_schedule_instance(self):
instance1 = self.make_timed_schedule_instance()
save_timed_schedule_instance(instance1)
instance2 = get_timed_schedule_instance(instance1.schedule_instance_id)
self.assertTrue(isinstance(instance2, TimedScheduleInstance))
self.assertEqual(instance1.schedule_instance_id, instance2.schedule_instance_id)
with self.assertRaises(TimedScheduleInstance.DoesNotExist):
get_timed_schedule_instance(uuid.uuid4())
class TestSchedulingNonPartitionedDBAccessorsDeleteAndFilter(BaseSchedulingNontPartitionedDBAccessorsTest):
@classmethod
def setUpClass(cls):
super(TestSchedulingNonPartitionedDBAccessorsDeleteAndFilter, cls).setUpClass()
cls.schedule_id1 = uuid.uuid4()
cls.schedule_id2 = uuid.uuid4()
cls.uuid1 = uuid.uuid4()
cls.uuid2 = uuid.uuid4()
cls.uuid3 = uuid.uuid4()
cls.uuid4 = uuid.uuid4()
cls.uuid5 = uuid.uuid4()
cls.uuid6 = uuid.uuid4()
def setUp(self):
self.alert_instance1 = self.make_alert_schedule_instance(self.uuid1)
save_alert_schedule_instance(self.alert_instance1)
self.alert_instance2 = self.make_alert_schedule_instance(self.uuid2, schedule_id=self.schedule_id1)
save_alert_schedule_instance(self.alert_instance2)
self.alert_instance3 = self.make_alert_schedule_instance(self.uuid3, schedule_id=self.schedule_id1,
active=False)
save_alert_schedule_instance(self.alert_instance3)
self.timed_instance1 = self.make_timed_schedule_instance(self.uuid4)
save_timed_schedule_instance(self.timed_instance1)
self.timed_instance2 = self.make_timed_schedule_instance(self.uuid5, schedule_id=self.schedule_id2)
save_timed_schedule_instance(self.timed_instance2)
self.timed_instance3 = self.make_timed_schedule_instance(self.uuid6, schedule_id=self.schedule_id2,
active=False)
save_timed_schedule_instance(self.timed_instance3)
def tearDown(self):
AlertScheduleInstance.objects.using(self.db).filter(domain=self.domain).delete()
TimedScheduleInstance.objects.using(self.db).filter(domain=self.domain).delete()
def test_delete_alert_schedule_instance(self):
self.assertEqual(AlertScheduleInstance.objects.using(self.db).count(), 3)
self.assertEqual(TimedScheduleInstance.objects.using(self.db).count(), 3)
delete_alert_schedule_instance(self.alert_instance1)
self.assertEqual(AlertScheduleInstance.objects.using(self.db).count(), 2)
self.assertEqual(TimedScheduleInstance.objects.using(self.db).count(), 3)
with self.assertRaises(AlertScheduleInstance.DoesNotExist):
get_alert_schedule_instance(self.uuid1)
def test_delete_timed_schedule_instance(self):
self.assertEqual(AlertScheduleInstance.objects.using(self.db).count(), 3)
self.assertEqual(TimedScheduleInstance.objects.using(self.db).count(), 3)
delete_timed_schedule_instance(self.timed_instance1)
self.assertEqual(AlertScheduleInstance.objects.using(self.db).count(), 3)
self.assertEqual(TimedScheduleInstance.objects.using(self.db).count(), 2)
with self.assertRaises(TimedScheduleInstance.DoesNotExist):
get_timed_schedule_instance(self.uuid4)
def test_get_active_alert_schedule_instance_ids(self):
self.assertItemsEqual(
get_active_schedule_instance_ids(
AlertScheduleInstance,
datetime(2017, 4, 1),
due_after=datetime(2017, 2, 1),
),
[(self.domain, self.alert_instance1.schedule_instance_id, self.alert_instance1.next_event_due),
(self.domain, self.alert_instance2.schedule_instance_id, self.alert_instance2.next_event_due)]
)
self.assertItemsEqual(
get_active_schedule_instance_ids(
AlertScheduleInstance,
datetime(2016, 4, 1),
due_after=datetime(2016, 2, 1),
),
[]
)
def test_get_active_timed_schedule_instance_ids(self):
self.assertItemsEqual(
get_active_schedule_instance_ids(
TimedScheduleInstance,
datetime(2017, 4, 1),
due_after=datetime(2017, 2, 1),
),
[(self.domain, self.timed_instance1.schedule_instance_id, self.timed_instance1.next_event_due),
(self.domain, self.timed_instance2.schedule_instance_id, self.timed_instance2.next_event_due)],
)
self.assertItemsEqual(
get_active_schedule_instance_ids(
TimedScheduleInstance,
datetime(2016, 4, 1),
due_after=datetime(2016, 2, 1),
),
[]
)
def test_get_alert_schedule_instances_for_schedule(self):
self.assertItemsEqual(
get_alert_schedule_instances_for_schedule(AlertSchedule(schedule_id=self.schedule_id1)),
[self.alert_instance2, self.alert_instance3]
)
def test_get_timed_schedule_instances_for_schedule(self):
self.assertItemsEqual(
get_timed_schedule_instances_for_schedule(TimedSchedule(schedule_id=self.schedule_id2)),
[self.timed_instance2, self.timed_instance3]
)
| 41.174888 | 108 | 0.716075 |
f9fd8c24782c7f9646f30326366160be9f853da1 | 22,304 | py | Python | src/bioregistry/compare.py | Adafede/bioregistry | 9b6a281354554005d985499f9d61aecb58a0a84c | [
"MIT"
] | null | null | null | src/bioregistry/compare.py | Adafede/bioregistry | 9b6a281354554005d985499f9d61aecb58a0a84c | [
"MIT"
] | null | null | null | src/bioregistry/compare.py | Adafede/bioregistry | 9b6a281354554005d985499f9d61aecb58a0a84c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""This script compares what's in each resource."""
import datetime
import itertools as itt
import logging
import math
import random
import sys
import typing
from collections import Counter, defaultdict
from typing import Collection, List, Mapping, Set, Tuple
import click
import bioregistry
from bioregistry import (
get_contact_email,
get_description,
get_example,
get_external,
get_homepage,
get_json_download,
get_license,
get_name,
get_obo_download,
get_owl_download,
get_pattern,
get_uri_format,
get_version,
is_deprecated,
manager,
read_registry,
)
from bioregistry.constants import DOCS_IMG
from bioregistry.license_standardizer import standardize_license
from bioregistry.schema import Resource
logger = logging.getLogger(__name__)
# see named colors https://matplotlib.org/stable/gallery/color/named_colors.html
BIOREGISTRY_COLOR = "silver"
def _get_has(func, yes: str = "Yes", no: str = "No") -> Counter:
return Counter(
no if func(prefix) is None else yes
for prefix in read_registry()
if not is_deprecated(prefix)
)
HAS_WIKIDATA_DATABASE = Counter(
"No" if key is None else "Yes"
for key in read_registry()
if not is_deprecated(key) and "database" in get_external(key, "wikidata")
)
def _get_has_present(func) -> Counter:
return Counter(x for x in (func(prefix) for prefix in read_registry()) if x)
SINGLE_FIG = (8, 3.5)
TODAY = datetime.datetime.today().strftime("%Y-%m-%d")
WATERMARK_TEXT = f"https://github.com/biopragmatics/bioregistry ({TODAY})"
def _save(fig, name: str, *, svg: bool = True, png: bool = False, eps: bool = False) -> None:
import matplotlib.pyplot as plt
path = DOCS_IMG.joinpath(name).with_suffix(".svg")
click.echo(f"output to {path}")
fig.tight_layout()
if svg:
fig.savefig(path)
if eps:
fig.savefig(DOCS_IMG.joinpath(name).with_suffix(".eps"))
if png:
fig.savefig(DOCS_IMG.joinpath(name).with_suffix(".png"), dpi=300)
plt.close(fig)
def _plot_attribute_pies(*, measurements, watermark, ncols: int = 4, keep_ontology: bool = True):
import matplotlib.pyplot as plt
if not keep_ontology:
measurements = [
(label, counter)
for label, counter in measurements
if label not in {"OWL", "JSON", "OBO"}
]
nrows = int(math.ceil(len(measurements) / ncols))
figsize = (2.75 * ncols, 2.0 * nrows)
fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=figsize)
for (label, counter), ax in itt.zip_longest(measurements, axes.ravel(), fillvalue=(None, None)):
if label is None:
ax.axis("off")
continue
if label == "License Type":
labels, sizes = zip(*counter.most_common())
explode = None
else:
labels = ("Yes", "No")
n_yes = counter.get("Yes")
sizes = (n_yes, len(read_registry()) - n_yes)
explode = [0.1, 0]
ax.pie(
sizes,
labels=labels,
autopct="%1.f%%",
startangle=30,
explode=explode,
)
ax.set_title(label)
if watermark:
fig.text(
0.5,
0,
WATERMARK_TEXT,
fontsize=8,
color="gray",
alpha=0.5,
ha="center",
va="bottom",
)
return fig, axes
REMAPPED_KEY = "x"
REMAPPED_VALUE = "y"
def make_overlaps(keys) -> Mapping[str, Mapping[str, Set[str]]]:
"""Make overlaps ditionary."""
rv = {}
for key, _, _, prefixes in keys:
# Remap bioregistry prefixes to match the external
# vocabulary, when possible
bioregistry_remapped = {
bioregistry.get_external(br_key, key).get("prefix", br_key)
for br_key, br_entry in bioregistry.read_registry().items()
}
rv[key] = {
REMAPPED_KEY: bioregistry_remapped,
REMAPPED_VALUE: prefixes,
}
return rv
def _plot_coverage(*, keys, overlaps, watermark, ncols: int = 3):
import matplotlib.pyplot as plt
from matplotlib_venn import venn2
nrows = int(math.ceil(len(keys) / ncols))
figsize = (3.25 * ncols, 2.0 * nrows)
fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=figsize)
for key, ax in itt.zip_longest(keys, axes.ravel()):
if key is None:
ax.axis("off")
continue
key, label, color, prefixes = key
bioregistry_remapped = overlaps[key][REMAPPED_KEY]
venn2(
subsets=(bioregistry_remapped, prefixes),
set_labels=("Bioregistry", label),
set_colors=(BIOREGISTRY_COLOR, color),
ax=ax,
)
if watermark:
fig.text(
0.5,
0,
WATERMARK_TEXT,
fontsize=8,
color="gray",
alpha=0.5,
ha="center",
va="bottom",
)
return fig, axes
def _plot_external_overlap(*, keys, watermark, ncols: int = 4):
import matplotlib.pyplot as plt
from matplotlib_venn import venn2
pairs = list(itt.combinations(keys, r=2))
nrows = int(math.ceil(len(pairs) / ncols))
figsize = (3 * ncols, 2.5 * nrows)
fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=figsize)
for pair, ax in itt.zip_longest(pairs, axes.ravel()):
if pair is None:
ax.axis("off")
continue
(l_key, l_label, l_color, l_prefixes), (r_key, r_label, r_color, r_prefixes) = pair
# Remap external vocabularies to bioregistry
# prefixes, when possible
l_prefixes = _remap(key=l_key, prefixes=l_prefixes)
r_prefixes = _remap(key=r_key, prefixes=r_prefixes)
venn2(
subsets=(l_prefixes, r_prefixes),
set_labels=(l_label, r_label),
set_colors=(l_color, r_color),
ax=ax,
)
if watermark:
fig.text(
0.5,
0,
WATERMARK_TEXT, # transform=plt.gca().transAxes,
fontsize=14,
color="gray",
alpha=0.5,
ha="center",
va="bottom",
)
return fig, axes
def get_getters():
"""Get getter functions, which requires alignment dependencies."""
try:
from bioregistry.external import GETTERS
except ImportError:
click.secho(
"Could not import alignment dependencies."
" Install bioregistry again with `pip install bioregistry[align]`.",
fg="red",
)
return sys.exit(1)
else:
return GETTERS
def get_keys() -> List[Tuple[str, str, str, Set[str]]]:
"""Get keys for plots."""
getters = get_getters()
try:
import seaborn as sns
except ImportError:
raise
else:
palette = sns.color_palette("Paired", len(getters))
return [
(metaprefix, label, color, set(func(force_download=False)))
for (metaprefix, label, func), color in zip(getters, palette)
]
@click.command()
@click.option("--paper", is_flag=True)
def compare(paper: bool): # noqa:C901
"""Compare the registries."""
paper = True
random.seed(0)
try:
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
except ImportError:
click.secho(
"Could not import matplotlib dependencies."
" Install bioregistry again with `pip install bioregistry[charts]`.",
fg="red",
)
return sys.exit(1)
keys = get_keys()
overlaps = make_overlaps(keys)
# This should make SVG output deterministic
# See https://matplotlib.org/3.1.0/users/prev_whats_new/whats_new_2.0.0.html#added-svg-hashsalt-key-to-rcparams
plt.rcParams["svg.hashsalt"] = "saltyregistry"
watermark = True
sns.set_style("white")
###############################################
# What kinds of licenses are resources using? #
###############################################
licenses, conflicts, obo_has_license, ols_has_license = _get_license_and_conflicts()
# How many times does the license appear in OLS / OBO Foundry
# fig, ax = plt.subplots(figsize=SINGLE_FIG)
# venn2(
# subsets=(obo_has_license, ols_has_license),
# set_labels=("OBO", "OLS"),
# set_colors=("red", "green"),
# ax=ax,
# )
# if watermark:
# ax.text(
# 0.5,
# -0.1,
# WATERMARK_TEXT,
# transform=plt.gca().transAxes,
# fontsize=10,
# color="gray",
# alpha=0.5,
# ha="center",
# va="bottom",
# )
# _save(fig, name="license_coverage", eps=paper)
fig, ax = plt.subplots(figsize=SINGLE_FIG)
licenses_counter: typing.Counter[str] = Counter(licenses)
licenses_mapped = [
"None" if license_ is None else license_ if licenses_counter[license_] > 30 else "Other"
for license_ in licenses
]
licenses_mapped_counter = Counter(licenses_mapped)
licenses_mapped_order = [license_ for license_, _ in licenses_mapped_counter.most_common()]
sns.countplot(x=licenses_mapped, ax=ax, order=licenses_mapped_order)
ax.set_xlabel("License")
ax.set_ylabel("Count")
ax.set_yscale("log")
if watermark:
fig.text(
1.0,
0.5,
WATERMARK_TEXT,
fontsize=8,
color="gray",
alpha=0.5,
ha="right",
va="center",
rotation=90,
)
_save(fig, name="licenses", eps=paper)
##############################################
# How many entries have version information? #
##############################################
measurements = [
("Name", _get_has(get_name)),
("Homepage", _get_has(get_homepage)),
("Description", _get_has(get_description)),
("Example", _get_has(get_example)),
("Pattern", _get_has(get_pattern)),
("Provider", _get_has(get_uri_format)),
("License", _get_has(get_license)),
("License Type", _get_has_present(get_license)),
("Version", _get_has(get_version)),
("Contact Email", _get_has(get_contact_email)),
("Wikidata Database", HAS_WIKIDATA_DATABASE),
("OBO", _get_has(get_obo_download)),
("OWL", _get_has(get_owl_download)),
("JSON", _get_has(get_json_download)),
]
fig, axes = _plot_attribute_pies(measurements=measurements, watermark=watermark)
_save(fig, "has_attribute", eps=paper)
# Slightly reorganized for the paper
if paper:
fig, axes = _plot_attribute_pies(
measurements=measurements, watermark=watermark, keep_ontology=False
)
_save(fig, "paper_figure_3", png=True, eps=True)
# -------------------------------------------------------------------- #
############################################################
# How well does the Bioregistry cover the other resources? #
############################################################
fig, axes = _plot_coverage(keys=keys, overlaps=overlaps, watermark=watermark)
_save(fig, name="bioregistry_coverage", eps=paper)
plot_coverage_bar_abridged(overlaps=overlaps, paper=paper)
plot_coverage_bar(overlaps=overlaps, paper=True)
######################################################
# What's the overlap between each pair of resources? #
######################################################
fig, axes = _plot_external_overlap(keys=keys, watermark=watermark)
_save(fig, name="external_overlap", eps=paper)
##############################################
# Histogram of how many xrefs each entry has #
##############################################
xref_counts = [
sum(0 < len(entry.get_external(key)) for key, *_ in keys)
for entry in read_registry().values()
]
fig, ax = plt.subplots(figsize=SINGLE_FIG)
xrefs_counter: typing.Counter[int] = Counter(xref_counts)
n_mappable_metaprefixes = len(
{
metaprefix
for entry in read_registry().values()
for metaprefix in (entry.get_mappings() or {})
}
)
zero_pad_count = 0 # how many columns left from the end should it go
for i in range(n_mappable_metaprefixes):
if i not in xrefs_counter:
zero_pad_count += 1
xrefs_counter[i] = 0
xrefs_df = pd.DataFrame(sorted(xrefs_counter.items()), columns=["frequency", "count"])
palette = sns.color_palette("tab10")
xrefs_colors = [palette[2]] + ([palette[1]] * (len(xrefs_df.index) - 1))
sns.barplot(
data=xrefs_df,
x="frequency",
y="count",
ci=None,
palette=xrefs_colors,
alpha=1.0,
ax=ax,
)
# There should only be one container here
_labels = xrefs_df["count"].to_list()
_labels[0] = f"{_labels[0]}\nNovel"
for i in ax.containers:
ax.bar_label(i, _labels)
ax.set_xlabel("Number Cross-Registry Mappings")
ax.set_ylabel("Number Prefixes")
ax.set_yscale("log")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
h = 15 # how high should the text go
x1, _y1 = ax.patches[-zero_pad_count].get_xy()
x2, _y2 = ax.patches[-1].get_xy()
ax.text(
x1,
h + 1,
"No prefixes are available\nin $\\it{all}$ mappable external\nregistries",
horizontalalignment="center",
verticalalignment="bottom",
fontdict=dict(fontsize=12),
)
ax.arrow(x1, h, x2 - x1, 2 - h, head_width=0.3, head_length=0.2, fc="k", ec="k")
if watermark:
fig.text(
1.0,
0.5,
WATERMARK_TEXT,
fontsize=8,
color="gray",
alpha=0.5,
ha="right",
va="center",
rotation=90,
)
offset = 0.6
ax.set_xlim([-offset, len(ax.patches) - (1 + offset)])
_save(fig, name="xrefs", eps=paper, png=paper)
##################################################
# Histogram of how many providers each entry has #
##################################################
provider_counts = [_count_providers(resource) for resource in read_registry().values()]
fig, ax = plt.subplots(figsize=SINGLE_FIG)
sns.barplot(
data=sorted(Counter(provider_counts).items()), ci=None, color="blue", alpha=0.4, ax=ax
)
ax.set_xlabel("Number Providers")
ax.set_ylabel("Count")
ax.set_yscale("log")
if watermark:
fig.text(
1.0,
0.5,
WATERMARK_TEXT,
fontsize=8,
color="gray",
alpha=0.5,
ha="right",
va="center",
rotation=90,
)
_save(fig, name="providers", eps=paper)
########################################
# Regular expression complexity report #
########################################
g = sns.displot(x=get_regex_complexities(), log_scale=2, height=3, aspect=4 / 3)
g.set(xlabel="Regular Expression Complexity")
_save(g.figure, name="regex_report", eps=paper)
def _count_providers(resource: Resource) -> int:
rv = 0
if resource.get_uri_prefix():
rv += 1
rv += len(resource.get_extra_providers())
return rv
def _get_license_and_conflicts():
licenses = []
conflicts = set()
obo_has_license, ols_has_license = set(), set()
for key in read_registry():
obo_license = standardize_license(get_external(key, "obofoundry").get("license"))
if obo_license:
obo_has_license.add(key)
ols_license = standardize_license(get_external(key, "ols").get("license"))
if ols_license:
ols_has_license.add(key)
if not obo_license and not ols_license:
licenses.append("None")
if obo_license and not ols_license:
licenses.append(obo_license)
elif not obo_license and ols_license:
licenses.append(ols_license)
elif obo_license == ols_license:
licenses.append(obo_license)
else: # different licenses!
licenses.append(ols_license)
licenses.append(obo_license)
conflicts.add(key)
# logger.warning(f"[{key}] Conflicting licenses- {obo_license} and {ols_license}")
continue
return licenses, conflicts, obo_has_license, ols_has_license
def _remap(*, key: str, prefixes: Collection[str]) -> Set[str]:
br_external_to = {}
for br_id, resource in read_registry().items():
_k = (resource.dict().get(key) or {}).get("prefix")
if _k:
br_external_to[_k] = br_id
return {br_external_to.get(prefix, prefix) for prefix in prefixes}
def get_regex_complexities() -> Collection[float]:
"""Get a list of regular expression complexities."""
rows = []
for prefix in manager.registry:
pattern = manager.get_pattern(prefix)
if pattern is None:
continue
# Consider alternate complexity estimates
rows.append(float(len(pattern)))
return sorted(rows)
def plot_coverage_bar_abridged(*, overlaps, paper: bool = False):
"""Plot and save the abridged coverage bar chart."""
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style("white")
rows = []
for metaprefix, data in overlaps.items():
br, external = data[REMAPPED_KEY], data[REMAPPED_VALUE]
rows.append(
(
bioregistry.get_registry_short_name(metaprefix),
len(external - br),
len(br.intersection(external)),
)
)
rows = sorted(rows, key=lambda row: sum(row[1:]), reverse=True)
df2 = pd.DataFrame(rows, columns=["metaprefix", "external_only", "intersection"])
df2.set_index("metaprefix", inplace=True)
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
df2.plot(
kind="barh",
stacked=True,
ax=ax,
width=0.85,
fontsize=14,
grid=False,
)
ax.grid(False)
ax.set_ylabel("")
ax.set_xticks([])
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.tick_params(
top=False, bottom=False, left=False, right=False, labelleft=True, labelbottom=False
)
dd = defaultdict(list)
for p in ax.patches:
width, height = p.get_width(), p.get_height()
x, y = p.get_xy()
dd[y, height].append((width, x))
if width < 20:
continue
ax.text(
x + width / 2,
y + height / 2,
f"{int(width):,}",
horizontalalignment="center",
verticalalignment="center",
fontdict=dict(weight="bold", color="white", fontsize=12),
)
for (y, height), values in dd.items():
width_total = sum(int(w) for w, _ in values)
percentage = values[-1][0] / width_total
width, x = max(values, key=lambda item: item[1])
ax.text(
width + x + 20,
y + height / 2,
f"{percentage:.1%} coverage",
fontdict=dict(weight="normal", color="black", fontsize=12),
verticalalignment="center",
)
ax.get_legend().remove()
plt.tight_layout()
_save(fig, name="bioregistry_coverage_bar_short", eps=paper)
def plot_coverage_bar(*, overlaps, paper: bool = False):
"""Plot and save the coverage bar chart."""
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style("white")
rows_1 = []
for metaprefix, data in overlaps.items():
br, external = data[REMAPPED_KEY], data[REMAPPED_VALUE]
rows_1.append(
(
bioregistry.get_registry_short_name(metaprefix),
len(external - br),
len(br.intersection(external)),
len(br - external),
)
)
rows_1 = sorted(rows_1, key=lambda row: sum(row[1:]), reverse=True)
df1 = pd.DataFrame(
rows_1, columns=["metaprefix", "external_only", "intersection", "bioregistry_only"]
)
df1.set_index("metaprefix", inplace=True)
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
df1.plot(
kind="barh",
stacked=True,
ax=ax,
width=0.85,
fontsize=14,
grid=False,
)
ax.set_ylabel("")
ax.set_xticks([])
ax.grid(False)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.tick_params(
top=False, bottom=False, left=False, right=False, labelleft=True, labelbottom=False
)
dd = defaultdict(list)
for p in ax.patches:
width, height = p.get_width(), p.get_height()
x, y = p.get_xy()
dd[y, height].append((width, x))
if width < 40:
continue
ax.text(
x + width / 2,
y + height / 2,
f"{int(width):,}",
horizontalalignment="center",
verticalalignment="center",
fontdict=dict(weight="bold", color="white", fontsize=12),
)
for (y, height), values in dd.items():
width_total = sum(int(w) for w, _ in values)
without_br = sum(int(w) for w, _ in values[:-1])
increase = (width_total - without_br) / without_br
width, x = max(values, key=lambda item: item[1])
ax.text(
width + x + 20,
y + height / 2,
f"{int(width_total):,} (+{increase:,.0%})",
fontdict=dict(weight="normal", color="black", fontsize=12),
verticalalignment="center",
)
for label in ax.get_yticklabels():
label.set_fontweight("bold")
ax.get_legend().remove()
plt.tight_layout()
_save(fig, name="bioregistry_coverage_bar", eps=paper, png=True)
if __name__ == "__main__":
compare()
| 31.414085 | 115 | 0.573485 |
e9d5d2b404d4045ed25e6fe30e6496f58c949b30 | 7,475 | py | Python | monitor/core/taps/bgpstreamkafka.py | kruisdraad/artemis | 426e4d2592f8175aafffc8b727ed06e4e16d50be | [
"TCL",
"BSD-3-Clause",
"PostgreSQL"
] | null | null | null | monitor/core/taps/bgpstreamkafka.py | kruisdraad/artemis | 426e4d2592f8175aafffc8b727ed06e4e16d50be | [
"TCL",
"BSD-3-Clause",
"PostgreSQL"
] | null | null | null | monitor/core/taps/bgpstreamkafka.py | kruisdraad/artemis | 426e4d2592f8175aafffc8b727ed06e4e16d50be | [
"TCL",
"BSD-3-Clause",
"PostgreSQL"
] | null | null | null | import argparse
import os
import time
import _pybgpstream
import redis
from kombu import Connection
from kombu import Exchange
from kombu import Producer
from netaddr import IPAddress
from netaddr import IPNetwork
from utils import get_logger
from utils import key_generator
from utils import load_json
from utils import mformat_validator
from utils import normalize_msg_path
from utils import ping_redis
from utils import RABBITMQ_URI
from utils import REDIS_HOST
from utils import REDIS_PORT
# install as described in https://bgpstream.caida.org/docs/install/pybgpstream
START_TIME_OFFSET = 3600 # seconds
log = get_logger()
redis = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)
DEFAULT_MON_TIMEOUT_LAST_BGP_UPDATE = 60 * 60
def run_bgpstream(
prefixes_file=None,
kafka_host=None,
kafka_port=None,
kafka_topic="openbmp.bmp_raw",
start=0,
end=0,
):
"""
Retrieve all records related to a list of prefixes
https://bgpstream.caida.org/docs/api/pybgpstream/_pybgpstream.html
:param prefixes_file: <str> input prefix json
:param kafka_host: <str> kafka host
:param kafka_port: <int> kafka_port
:param kafka_topic: <str> kafka topic
:param start: <int> start timestamp in UNIX epochs
:param end: <int> end timestamp in UNIX epochs (if 0 --> "live mode")
:return: -
"""
prefixes = load_json(prefixes_file)
assert prefixes is not None
# create a new bgpstream instance and a reusable bgprecord instance
stream = _pybgpstream.BGPStream()
# set kafka data interface
stream.set_data_interface("kafka")
# set host connection details
stream.set_data_interface_option(
"kafka", "brokers", "{}:{}".format(kafka_host, kafka_port)
)
# set topic
stream.set_data_interface_option("kafka", "topic", kafka_topic)
# filter prefixes
for prefix in prefixes:
stream.add_filter("prefix", prefix)
# filter record type
stream.add_filter("record-type", "updates")
# filter based on timing (if end=0 --> live mode)
stream.add_interval_filter(start, end)
# set live mode
stream.set_live_mode()
# start the stream
stream.start()
with Connection(RABBITMQ_URI) as connection:
exchange = Exchange(
"bgp-update", channel=connection, type="direct", durable=False
)
exchange.declare()
producer = Producer(connection)
validator = mformat_validator()
while True:
# get next record
try:
rec = stream.get_next_record()
except BaseException:
continue
if (rec.status != "valid") or (rec.type != "update"):
continue
# get next element
try:
elem = rec.get_next_elem()
except BaseException:
continue
while elem:
if elem.type in {"A", "W"}:
redis.set(
"bgpstreamkafka_seen_bgp_update",
"1",
ex=int(
os.getenv(
"MON_TIMEOUT_LAST_BGP_UPDATE",
DEFAULT_MON_TIMEOUT_LAST_BGP_UPDATE,
)
),
)
this_prefix = str(elem.fields["prefix"])
service = "bgpstreamkafka|{}".format(str(rec.collector))
type_ = elem.type
if type_ == "A":
as_path = elem.fields["as-path"].split(" ")
communities = [
{
"asn": int(comm.split(":")[0]),
"value": int(comm.split(":")[1]),
}
for comm in elem.fields["communities"]
]
else:
as_path = []
communities = []
timestamp = float(rec.time)
if timestamp == 0:
timestamp = time.time()
log.debug("fixed timestamp: {}".format(timestamp))
peer_asn = elem.peer_asn
for prefix in prefixes:
base_ip, mask_length = this_prefix.split("/")
our_prefix = IPNetwork(prefix)
if (
IPAddress(base_ip) in our_prefix
and int(mask_length) >= our_prefix.prefixlen
):
msg = {
"type": type_,
"timestamp": timestamp,
"path": as_path,
"service": service,
"communities": communities,
"prefix": this_prefix,
"peer_asn": peer_asn,
}
try:
if validator.validate(msg):
msgs = normalize_msg_path(msg)
for msg in msgs:
key_generator(msg)
log.debug(msg)
producer.publish(
msg,
exchange=exchange,
routing_key="update",
serializer="ujson",
)
else:
log.warning(
"Invalid format message: {}".format(msg)
)
except BaseException:
log.exception(
"Error when normalizing BGP message: {}".format(msg)
)
break
try:
elem = rec.get_next_elem()
except BaseException:
continue
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="BGPStream Kafka Monitor")
parser.add_argument(
"-p",
"--prefixes",
type=str,
dest="prefixes_file",
default=None,
help="Prefix(es) to be monitored (json file with prefix list)",
)
parser.add_argument(
"--kafka_host", type=str, dest="kafka_host", default=None, help="kafka host"
)
parser.add_argument(
"--kafka_port", type=str, dest="kafka_port", default=None, help="kafka port"
)
parser.add_argument(
"--kafka_topic",
type=str,
dest="kafka_topic",
default="openbmp.bmp_raw",
help="kafka topic",
)
args = parser.parse_args()
ping_redis(redis)
try:
run_bgpstream(
args.prefixes_file,
args.kafka_host,
int(args.kafka_port),
args.kafka_topic,
start=int(time.time()) - START_TIME_OFFSET,
end=0,
)
except Exception:
log.exception("exception")
except KeyboardInterrupt:
pass
| 33.222222 | 88 | 0.482943 |
f30ffbd41de3e8eadd578dbdc628e5a828bd3e12 | 1,831 | py | Python | server/src/ps.py | xraymemory/ar-cutpaste | eb29714818d72c096a7a1121d9e1c939fe9f6431 | [
"MIT"
] | 1 | 2022-01-21T14:12:26.000Z | 2022-01-21T14:12:26.000Z | server/src/ps.py | KonstantinKlepikov/ar-cutpaste | e87916b9f42a42fed4d341a01310480a8cc9fe7a | [
"MIT"
] | 2 | 2022-01-13T02:40:44.000Z | 2022-03-12T00:27:46.000Z | server/src/ps.py | KonstantinKlepikov/ar-cutpaste | e87916b9f42a42fed4d341a01310480a8cc9fe7a | [
"MIT"
] | 1 | 2020-05-21T02:18:03.000Z | 2020-05-21T02:18:03.000Z | from photoshop import PhotoshopConnection
# TODO: This offset should be detected by getTopLeft() but the new version
# of Photoshop doesn't seem to support executeActionGet so we put it
# manually here in the meantime.
SCREEN_PIXELS_DENSITY = 2
DOC_OFFSET_X = 74 * SCREEN_PIXELS_DENSITY
DOC_OFFSET_Y = 130 * SCREEN_PIXELS_DENSITY
DOC_WIDTH = 2121
DOC_HEIGHT = 1280
def paste(filename, name, x, y, password='123456'):
with PhotoshopConnection(password=password) as conn:
script = """
function pasteImage(filename, layerName, x, y) {
var fileRef = new File(filename);
var doc = app.activeDocument;
var currentLayer = doc.artLayers.add();
var curr_file = app.open(fileRef);
curr_file.selection.selectAll();
curr_file.selection.copy();
curr_file.close();
doc.paste();
doc.activeLayer.name = layerName;
doc.activeLayer.translate(x, y);
try {
doc.activeLayer.move(doc.layers[doc.layers.length - 1], ElementPlacement.PLACEBEFORE);
} catch(e) {
alert(e);
}
}
function getTopLeft() {
try {
var r = new ActionReference();
var bounds = executeActionGet(r)
.getObjectValue(stringIDToTypeID("viewInfo"))
.getObjectValue(stringIDToTypeID("activeView"))
.getObjectValue(stringIDToTypeID("globalBounds"));
alert(t)
} catch (e) {
alert(e);
}
}
"""
x -= DOC_WIDTH * 0.5 + DOC_OFFSET_X
y -= DOC_HEIGHT * 0.5 + DOC_OFFSET_Y
script += f'pasteImage("{filename}", "{name}", {x}, {y})'
conn.execute(script) | 35.901961 | 102 | 0.565811 |
9661c70e6644af46cb4f4ccff1db177768be0e71 | 20,417 | py | Python | oslo_versionedobjects/fixture.py | openstack/oslo.versionedobjects | e7b6d52aa4b1b40e68a21a122c09b968d5959b0e | [
"Apache-2.0"
] | 37 | 2015-02-19T15:53:04.000Z | 2021-04-24T19:16:44.000Z | oslo_versionedobjects/fixture.py | openstack/oslo.versionedobjects | e7b6d52aa4b1b40e68a21a122c09b968d5959b0e | [
"Apache-2.0"
] | null | null | null | oslo_versionedobjects/fixture.py | openstack/oslo.versionedobjects | e7b6d52aa4b1b40e68a21a122c09b968d5959b0e | [
"Apache-2.0"
] | 18 | 2015-03-06T21:20:15.000Z | 2020-05-13T06:21:09.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for writing tests for code using oslo.versionedobjects
.. note::
This module has several extra dependencies not needed at runtime
for production code, and therefore not installed by default. To
ensure those dependencies are present for your tests, add
``oslo.versionedobjects[fixtures]`` to your list of test dependencies.
"""
from collections import namedtuple
from collections import OrderedDict
import copy
import datetime
import inspect
import logging
from unittest import mock
import fixtures
from oslo_utils.secretutils import md5
from oslo_utils import versionutils as vutils
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
LOG = logging.getLogger(__name__)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a VersionedObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param test: The TestCase doing the comparison
:param obj: The VersionedObject to examine
:param db_obj: The dict-like database object to use as reference
:param subs: A dict of objkey=dbkey field substitutions
:param allow_missing: A list of fields that may not be in db_obj
:param comparators: Map of comparator functions to use for certain fields
"""
subs = subs or {}
allow_missing = allow_missing or []
comparators = comparators or {}
for key in obj.fields:
db_key = subs.get(key, key)
# If this is an allow_missing key and it's missing in either obj or
# db_obj, just skip it
if key in allow_missing:
if key not in obj or db_key not in db_obj:
continue
# If the value isn't set on the object, and also isn't set on the
# db_obj, we'll skip the value check, unset in both is equal
if not obj.obj_attr_is_set(key) and db_key not in db_obj:
continue
# If it's set on the object and not on the db_obj, they aren't equal
elif obj.obj_attr_is_set(key) and db_key not in db_obj:
raise AssertionError(("%s (db_key: %s) is set on the object, but "
"not on the db_obj, so the objects are not "
"equal")
% (key, db_key))
# If it's set on the db_obj and not the object, they aren't equal
elif not obj.obj_attr_is_set(key) and db_key in db_obj:
raise AssertionError(("%s (db_key: %s) is set on the db_obj, but "
"not on the object, so the objects are not "
"equal")
% (key, db_key))
# All of the checks above have safeguarded us, so we know we will
# get an obj_val and db_val without issue
obj_val = getattr(obj, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
if isinstance(db_val, datetime.datetime):
db_val = obj_val.replace(tzinfo=None)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class FakeIndirectionAPI(base.VersionedObjectIndirectionAPI):
def __init__(self, serializer=None):
super(FakeIndirectionAPI, self).__init__()
self._ser = serializer or base.VersionedObjectSerializer()
def _get_changes(self, orig_obj, new_obj):
updates = dict()
for name, field in new_obj.fields.items():
if not new_obj.obj_attr_is_set(name):
continue
if (not orig_obj.obj_attr_is_set(name) or
getattr(orig_obj, name) != getattr(new_obj, name)):
updates[name] = field.to_primitive(new_obj, name,
getattr(new_obj, name))
return updates
def _canonicalize_args(self, context, args, kwargs):
args = tuple(
[self._ser.deserialize_entity(
context, self._ser.serialize_entity(context, arg))
for arg in args])
kwargs = dict(
[(argname, self._ser.deserialize_entity(
context, self._ser.serialize_entity(context, arg)))
for argname, arg in kwargs.items()])
return args, kwargs
def object_action(self, context, objinst, objmethod, args, kwargs):
objinst = self._ser.deserialize_entity(
context, self._ser.serialize_entity(
context, objinst))
objmethod = str(objmethod)
args, kwargs = self._canonicalize_args(context, args, kwargs)
original = objinst.obj_clone()
with mock.patch('oslo_versionedobjects.base.VersionedObject.'
'indirection_api', new=None):
result = getattr(objinst, objmethod)(*args, **kwargs)
updates = self._get_changes(original, objinst)
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
objname = str(objname)
objmethod = str(objmethod)
objver = str(objver)
args, kwargs = self._canonicalize_args(context, args, kwargs)
cls = base.VersionedObject.obj_class_from_name(objname, objver)
with mock.patch('oslo_versionedobjects.base.VersionedObject.'
'indirection_api', new=None):
result = getattr(cls, objmethod)(context, *args, **kwargs)
return (base.VersionedObject.obj_from_primitive(
result.obj_to_primitive(target_version=objver),
context=context)
if isinstance(result, base.VersionedObject) else result)
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
objname = str(objname)
objmethod = str(objmethod)
object_versions = {str(o): str(v) for o, v in object_versions.items()}
args, kwargs = self._canonicalize_args(context, args, kwargs)
objver = object_versions[objname]
cls = base.VersionedObject.obj_class_from_name(objname, objver)
with mock.patch('oslo_versionedobjects.base.VersionedObject.'
'indirection_api', new=None):
result = getattr(cls, objmethod)(context, *args, **kwargs)
return (base.VersionedObject.obj_from_primitive(
result.obj_to_primitive(target_version=objver),
context=context)
if isinstance(result, base.VersionedObject) else result)
def object_backport(self, context, objinst, target_version):
raise Exception('not supported')
class IndirectionFixture(fixtures.Fixture):
def __init__(self, indirection_api=None):
self.indirection_api = indirection_api or FakeIndirectionAPI()
def setUp(self):
super(IndirectionFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'oslo_versionedobjects.base.VersionedObject.indirection_api',
self.indirection_api))
class ObjectHashMismatch(Exception):
def __init__(self, expected, actual):
self.expected = expected
self.actual = actual
def __str__(self):
return 'Hashes have changed for %s' % (
','.join(set(self.expected.keys() + self.actual.keys())))
CompatArgSpec = namedtuple(
'ArgSpec', ('args', 'varargs', 'keywords', 'defaults'))
def get_method_spec(method):
"""Get a stable and compatible method spec.
Newer features in Python3 (kw-only arguments and annotations) are
not supported or representable with inspect.getargspec() but many
object hashes are already recorded using that method. This attempts
to return something compatible with getargspec() when possible (i.e.
when those features are not used), and otherwise just returns the
newer getfullargspec() representation.
"""
fullspec = inspect.getfullargspec(method)
if any([fullspec.kwonlyargs, fullspec.kwonlydefaults,
fullspec.annotations]):
# Method uses newer-than-getargspec() features, so return the
# newer full spec
return fullspec
else:
return CompatArgSpec(fullspec.args, fullspec.varargs,
fullspec.varkw, fullspec.defaults)
class ObjectVersionChecker(object):
def __init__(self, obj_classes=base.VersionedObjectRegistry.obj_classes()):
self.obj_classes = obj_classes
def _find_remotable_method(self, cls, thing, parent_was_remotable=False):
"""Follow a chain of remotable things down to the original function."""
if isinstance(thing, classmethod):
return self._find_remotable_method(cls, thing.__get__(None, cls))
elif (inspect.ismethod(thing) or
inspect.isfunction(thing)) and hasattr(thing, 'remotable'):
return self._find_remotable_method(cls, thing.original_fn,
parent_was_remotable=True)
elif parent_was_remotable:
# We must be the first non-remotable thing underneath a stack of
# remotable things (i.e. the actual implementation method)
return thing
else:
# This means the top-level thing never hit a remotable layer
return None
def _get_fingerprint(self, obj_name, extra_data_func=None):
obj_class = self.obj_classes[obj_name][0]
obj_fields = list(obj_class.fields.items())
obj_fields.sort()
methods = []
for name in dir(obj_class):
thing = getattr(obj_class, name)
if inspect.ismethod(thing) or inspect.isfunction(thing) \
or isinstance(thing, classmethod):
method = self._find_remotable_method(obj_class, thing)
if method:
methods.append((name, get_method_spec(method)))
methods.sort()
# NOTE(danms): Things that need a version bump are any fields
# and their types, or the signatures of any remotable methods.
# Of course, these are just the mechanical changes we can detect,
# but many other things may require a version bump (method behavior
# and return value changes, for example).
if hasattr(obj_class, 'child_versions'):
relevant_data = (obj_fields, methods,
OrderedDict(
sorted(obj_class.child_versions.items())))
else:
relevant_data = (obj_fields, methods)
if extra_data_func:
relevant_data += extra_data_func(obj_class)
fingerprint = '%s-%s' % (obj_class.VERSION, md5(
bytes(repr(relevant_data).encode()),
usedforsecurity=False).hexdigest())
return fingerprint
def get_hashes(self, extra_data_func=None):
"""Return a dict of computed object hashes.
:param extra_data_func: a function that is given the object class
which gathers more relevant data about the
class that is needed in versioning. Returns
a tuple containing the extra data bits.
"""
fingerprints = {}
for obj_name in sorted(self.obj_classes):
fingerprints[obj_name] = self._get_fingerprint(
obj_name, extra_data_func=extra_data_func)
return fingerprints
def test_hashes(self, expected_hashes, extra_data_func=None):
fingerprints = self.get_hashes(extra_data_func=extra_data_func)
stored = set(expected_hashes.items())
computed = set(fingerprints.items())
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, hash in changed:
expected[name] = expected_hashes.get(name)
actual[name] = fingerprints.get(name)
return expected, actual
def _get_dependencies(self, tree, obj_class):
obj_name = obj_class.obj_name()
if obj_name in tree:
return
for name, field in obj_class.fields.items():
if isinstance(field._type, fields.Object):
sub_obj_name = field._type._obj_name
sub_obj_class = self.obj_classes[sub_obj_name][0]
self._get_dependencies(tree, sub_obj_class)
tree.setdefault(obj_name, {})
tree[obj_name][sub_obj_name] = sub_obj_class.VERSION
def get_dependency_tree(self):
tree = {}
for obj_name in self.obj_classes.keys():
self._get_dependencies(tree, self.obj_classes[obj_name][0])
return tree
def test_relationships(self, expected_tree):
actual_tree = self.get_dependency_tree()
stored = set([(x, str(y)) for x, y in expected_tree.items()])
computed = set([(x, str(y)) for x, y in actual_tree.items()])
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, deps in changed:
expected[name] = expected_tree.get(name)
actual[name] = actual_tree.get(name)
return expected, actual
def _test_object_compatibility(self, obj_class, manifest=None,
init_args=None, init_kwargs=None):
init_args = init_args or []
init_kwargs = init_kwargs or {}
version = vutils.convert_version_to_tuple(obj_class.VERSION)
kwargs = {'version_manifest': manifest} if manifest else {}
for n in range(version[1] + 1):
test_version = '%d.%d' % (version[0], n)
# Run the test with OS_DEBUG=True to see this.
LOG.debug('testing obj: %s version: %s' %
(obj_class.obj_name(), test_version))
kwargs['target_version'] = test_version
obj_class(*init_args, **init_kwargs).obj_to_primitive(**kwargs)
def test_compatibility_routines(self, use_manifest=False, init_args=None,
init_kwargs=None):
"""Test obj_make_compatible() on all object classes.
:param use_manifest: a boolean that determines if the version
manifest should be passed to obj_make_compatible
:param init_args: a dictionary of the format {obj_class: [arg1, arg2]}
that will be used to pass arguments to init on the
given obj_class. If no args are needed, the
obj_class does not need to be added to the dict
:param init_kwargs: a dictionary of the format
{obj_class: {'kwarg1': val1}} that will be used to
pass kwargs to init on the given obj_class. If no
kwargs are needed, the obj_class does not need to
be added to the dict
"""
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
init_args = init_args or {}
init_kwargs = init_kwargs or {}
for obj_name in self.obj_classes:
obj_classes = self.obj_classes[obj_name]
if use_manifest:
manifest = base.obj_tree_get_versions(obj_name)
else:
manifest = None
for obj_class in obj_classes:
args_for_init = init_args.get(obj_class, [])
kwargs_for_init = init_kwargs.get(obj_class, {})
self._test_object_compatibility(obj_class, manifest=manifest,
init_args=args_for_init,
init_kwargs=kwargs_for_init)
def _test_relationships_in_order(self, obj_class):
for field, versions in obj_class.obj_relationships.items():
last_my_version = (0, 0)
last_child_version = (0, 0)
for my_version, child_version in versions:
_my_version = vutils.convert_version_to_tuple(my_version)
_ch_version = vutils.convert_version_to_tuple(child_version)
if not (last_my_version < _my_version and
last_child_version <= _ch_version):
raise AssertionError(('Object %s relationship %s->%s for '
'field %s is out of order') % (
obj_class.obj_name(),
my_version, child_version,
field))
last_my_version = _my_version
last_child_version = _ch_version
def test_relationships_in_order(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in self.obj_classes:
obj_classes = self.obj_classes[obj_name]
for obj_class in obj_classes:
self._test_relationships_in_order(obj_class)
class VersionedObjectRegistryFixture(fixtures.Fixture):
"""Use a VersionedObjectRegistry as a temp registry pattern fixture.
The pattern solution is to backup the object registry, register
a class locally, and then restore the original registry. This could be
used for test objects that do not need to be registered permanently but
will have calls which lookup registration.
"""
def setUp(self):
super(VersionedObjectRegistryFixture, self).setUp()
self._base_test_obj_backup = copy.deepcopy(
base.VersionedObjectRegistry._registry._obj_classes)
self.addCleanup(self._restore_obj_registry)
@staticmethod
def register(cls_name):
base.VersionedObjectRegistry.register(cls_name)
def _restore_obj_registry(self):
base.VersionedObjectRegistry._registry._obj_classes = \
self._base_test_obj_backup
class StableObjectJsonFixture(fixtures.Fixture):
"""Fixture that makes sure we get stable JSON object representations.
Since objects contain things like set(), which can't be converted to
JSON, we have some situations where the representation isn't fully
deterministic. This doesn't matter at all at runtime, but does to
unit tests that try to assert things at a low level.
This fixture mocks the obj_to_primitive() call and makes sure to
sort the list of changed fields (which came from a set) before
returning it to the caller.
"""
def __init__(self):
self._original_otp = base.VersionedObject.obj_to_primitive
def setUp(self):
super(StableObjectJsonFixture, self).setUp()
def _doit(obj, *args, **kwargs):
result = self._original_otp(obj, *args, **kwargs)
changes_key = obj._obj_primitive_key('changes')
if changes_key in result:
result[changes_key].sort()
return result
self.useFixture(fixtures.MonkeyPatch(
'oslo_versionedobjects.base.VersionedObject.obj_to_primitive',
_doit))
| 43.164905 | 79 | 0.630602 |
94b0d307d583675abc1e0ffe206ae5c377b3724a | 3,619 | py | Python | pysnmp/CISCOSB-SENSORENTMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/CISCOSB-SENSORENTMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/CISCOSB-SENSORENTMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CISCOSB-SENSORENTMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCOSB-SENSORENTMIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:07:26 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint")
rlEnv, = mibBuilder.importSymbols("CISCOSB-HWENVIROMENT", "rlEnv")
entityPhysicalGroup, entPhysicalIndex = mibBuilder.importSymbols("ENTITY-MIB", "entityPhysicalGroup", "entPhysicalIndex")
entPhySensorEntry, EntitySensorValue = mibBuilder.importSymbols("ENTITY-SENSOR-MIB", "entPhySensorEntry", "EntitySensorValue")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
TimeTicks, MibIdentifier, Unsigned32, Bits, IpAddress, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Counter64, ModuleIdentity, NotificationType, ObjectIdentity, Gauge32, iso, mib_2 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "MibIdentifier", "Unsigned32", "Bits", "IpAddress", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Counter64", "ModuleIdentity", "NotificationType", "ObjectIdentity", "Gauge32", "iso", "mib-2")
TimeStamp, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TimeStamp", "TextualConvention", "DisplayString")
rlSensor = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 83, 4))
rlSensor.setRevisions(('2003-09-21 00:00',))
if mibBuilder.loadTexts: rlSensor.setLastUpdated('200309210000Z')
if mibBuilder.loadTexts: rlSensor.setOrganization('Cisco Small Business')
rlEntPhySensorTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 83, 3), )
if mibBuilder.loadTexts: rlEntPhySensorTable.setStatus('current')
rlEntPhySensorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 83, 3, 1), )
entPhySensorEntry.registerAugmentions(("CISCOSB-SENSORENTMIB", "rlEntPhySensorEntry"))
rlEntPhySensorEntry.setIndexNames(*entPhySensorEntry.getIndexNames())
if mibBuilder.loadTexts: rlEntPhySensorEntry.setStatus('current')
rlEnvPhySensorMinValue = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 83, 3, 1, 1), EntitySensorValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlEnvPhySensorMinValue.setStatus('current')
rlEnvPhySensorMaxValue = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 83, 3, 1, 2), EntitySensorValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlEnvPhySensorMaxValue.setStatus('current')
rlEnvPhySensorTestValue = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 83, 3, 1, 3), EntitySensorValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlEnvPhySensorTestValue.setStatus('current')
mibBuilder.exportSymbols("CISCOSB-SENSORENTMIB", rlEntPhySensorTable=rlEntPhySensorTable, rlEnvPhySensorMinValue=rlEnvPhySensorMinValue, rlEnvPhySensorTestValue=rlEnvPhySensorTestValue, rlEntPhySensorEntry=rlEntPhySensorEntry, rlEnvPhySensorMaxValue=rlEnvPhySensorMaxValue, rlSensor=rlSensor, PYSNMP_MODULE_ID=rlSensor)
| 103.4 | 493 | 0.788063 |
00a0f729a610c72a3fc6f4a934268e4238c638f8 | 3,917 | py | Python | yolo_video.py | mbalty/keras-yolo3 | 8680b0ceec28181a47f5efacd13419ff8fac2ef4 | [
"MIT"
] | null | null | null | yolo_video.py | mbalty/keras-yolo3 | 8680b0ceec28181a47f5efacd13419ff8fac2ef4 | [
"MIT"
] | null | null | null | yolo_video.py | mbalty/keras-yolo3 | 8680b0ceec28181a47f5efacd13419ff8fac2ef4 | [
"MIT"
] | 1 | 2020-03-04T08:26:36.000Z | 2020-03-04T08:26:36.000Z | import sys
import argparse
from yolo import YOLO, detect_video
from PIL import Image
from glob import glob
from os import path
def detect_img(yolo):
while True:
img = input('Input image filename:')
try:
image = Image.open(img)
except:
print('Open Error! Try again!')
continue
else:
r_image = yolo.detect_image(image)
r_image.show()
yolo.close_session()
def detect_img_folder(yolo, input_dir, output_dir, show = False):
for img in glob(input_dir + "/*.jpg"):
try:
image = Image.open(img)
except:
print('Open Error! Try again!')
continue
else:
r_image = yolo.detect_image(image)
r_image.save(path.join(output_dir, path.basename(img)))
if show:
r_image.show()
yolo.close_session()
# def detect_img_folder(yolo, dir_dir, output_dir, show = False):
# for input_dir in os.listdir(dir_dir):
# input_dir = path.join(dir_dir, input_dir)
# print(input_dir)
# if path.isfile(input_dir):
# for img in glob(input_dir.split(".")[0] + "/*.jpg"):
# try:
# image = Image.open(img)
# except:
# print(img)
# print('Open Error! Try again!')
# continue
# else:
# r_image = yolo.detect_image(image)
# r_image.save(path.join(output_dir, path.basename(img)))
# if show:
# r_image.show()
# yolo.close_session()
#
FLAGS = None
if __name__ == '__main__':
# class YOLO defines the default value, so suppress any default here
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
'''
Command line options
'''
parser.add_argument(
'--model', type=str,
help='path to model weight file, default ' + YOLO.get_defaults("model_path")
)
parser.add_argument(
'--anchors', type=str,
help='path to anchor definitions, default ' + YOLO.get_defaults("anchors_path")
)
parser.add_argument(
'--classes', type=str,
help='path to class definitions, default ' + YOLO.get_defaults("classes_path")
)
parser.add_argument(
'--gpu_num', type=int,
help='Number of GPU to use, default ' + str(YOLO.get_defaults("gpu_num"))
)
parser.add_argument(
'--image', default=False, action="store_true",
help='Image detection mode, will ignore all positional arguments'
)
parser.add_argument(
'--image_folder', default=False,
help='Image folder detection mode, will ignore all positional arguments'
)
'''
Command line positional arguments -- for video detection mode
'''
parser.add_argument(
"--input", nargs='?', type=str,required=False,default='./path2your_video',
help = "Video input path"
)
parser.add_argument(
"--output", nargs='?', type=str, default="",
help = "[Optional] Video output path"
)
FLAGS = parser.parse_args()
if FLAGS.image:
"""
Image detection mode, disregard any remaining command line arguments
"""
print("Image detection mode")
if "input" in FLAGS:
print(" Ignoring remaining command line arguments: " + FLAGS.input + "," + FLAGS.output)
detect_img(YOLO(**vars(FLAGS)))
elif FLAGS.image_folder:
# dir = input('Input image folder:')
# out_dir = input('Out image folder:')
detect_img_folder(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)
elif "input" in FLAGS:
detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)
else:
print("Must specify at least video_input_path. See usage with --help.")
| 30.601563 | 100 | 0.577227 |
c7d98da5229ce36e1915d69dc346a541d27a74a2 | 132 | py | Python | tensorflow/time_series_forecasting/time_series_forecasting/__init__.py | Elkinmt19/data-science-dojo | 9e3d7ca8774474e1ad74138c7215ca3acdabf07c | [
"MIT"
] | 1 | 2022-01-14T03:16:23.000Z | 2022-01-14T03:16:23.000Z | tensorflow/time_series_forecasting/time_series_forecasting/__init__.py | Elkinmt19/data-science-dojo | 9e3d7ca8774474e1ad74138c7215ca3acdabf07c | [
"MIT"
] | null | null | null | tensorflow/time_series_forecasting/time_series_forecasting/__init__.py | Elkinmt19/data-science-dojo | 9e3d7ca8774474e1ad74138c7215ca3acdabf07c | [
"MIT"
] | null | null | null | from .windows_generator import *
__author__ = 'Elkin Javier Guerra Galeano'
__email__ = 'elkinmt19@gmail.com'
__version__ = '0.0.1' | 26.4 | 42 | 0.765152 |
553747297794c310787bc7b6e9a9d8ae4b4b85ca | 408 | py | Python | setup.py | blackvvine/file-path | 091538feb0685185e68f594562051a4775c5d266 | [
"MIT"
] | null | null | null | setup.py | blackvvine/file-path | 091538feb0685185e68f594562051a4775c5d266 | [
"MIT"
] | null | null | null | setup.py | blackvvine/file-path | 091538feb0685185e68f594562051a4775c5d266 | [
"MIT"
] | 1 | 2019-10-13T15:22:23.000Z | 2019-10-13T15:22:23.000Z | from distutils.core import setup
setup(
name='file-path',
packages=['filepath'],
version='0.2',
description='Object-oriented wrapper for paths',
author='Iman Akbari',
author_email='imakbari@gmail.com',
url='https://github.com/blackvvine/file-path',
download_url='https://github.com/blackvvine/file-path/tarball/1.0',
keywords=['file', 'directory'],
classifiers=[],
)
| 27.2 | 71 | 0.671569 |
c97396826758166a2e8b7aac59c81f04017b466d | 680 | py | Python | notebooks/functions.py | Shrey-WadhwaniAI/LineList-Dashboard | 5937d3fb0f0c2e525952e509e52d7e30bf51dabb | [
"MIT"
] | null | null | null | notebooks/functions.py | Shrey-WadhwaniAI/LineList-Dashboard | 5937d3fb0f0c2e525952e509e52d7e30bf51dabb | [
"MIT"
] | null | null | null | notebooks/functions.py | Shrey-WadhwaniAI/LineList-Dashboard | 5937d3fb0f0c2e525952e509e52d7e30bf51dabb | [
"MIT"
] | null | null | null | from utils import *
import streamlit as st
def pop_filter(x,pops):
if 'gender' in pops :
x = get_valid_gender(x)
return x
@st.cache
def two_pop_groups(data,pop1,pop2):
df = remove_untested(data)
#print("untested filter")
#print(len(df))
df = pop_filter(df,[pop1,pop2])
#print(len(df))
grouped = df.groupby([pop1, pop2,'final result sample']).agg('size')
#print("grouped data")
#print(grouped)
grouped.to_csv('../data/grouped.csv')
df2 = grouped.unstack(fill_value=0)
df2 = add_test_summary(df2)
df2 = df2.round(4)
df2 = df2.reset_index()
# df2.to_csv('../Outputs/agegender_tests.csv')
return df2 | 25.185185 | 72 | 0.638235 |
0fbc735ca0c53011ee46344dfd7a4c3b740d3a44 | 2,894 | py | Python | 2021/Python/13.py | hckr/adventofcode-haskell | fa6a7624c68392d45b937c49cc35c17f314ea6e4 | [
"MIT"
] | null | null | null | 2021/Python/13.py | hckr/adventofcode-haskell | fa6a7624c68392d45b937c49cc35c17f314ea6e4 | [
"MIT"
] | null | null | null | 2021/Python/13.py | hckr/adventofcode-haskell | fa6a7624c68392d45b937c49cc35c17f314ea6e4 | [
"MIT"
] | null | null | null | import fileinput
import functools
from typing import List, Optional, Tuple
import numpy as np
def main(input_path: Optional[str] = None):
r"""
>>> from _pytest.monkeypatch import MonkeyPatch
>>> with MonkeyPatch.context() as monkeypatch:
... monkeypatch.setattr(fileinput, "input", lambda x: iter([
... "6,10\n",
... "0,14\n",
... "9,10\n",
... "0,3\n",
... "10,4\n",
... "4,11\n",
... "6,0\n",
... "6,12\n",
... "4,1\n",
... "0,13\n",
... "10,12\n",
... "3,4\n",
... "3,0\n",
... "8,4\n",
... "1,10\n",
... "2,14\n",
... "8,10\n",
... "9,0\n",
... "\n",
... "fold along y=7\n",
... "fold along x=5\n"]))
... main()
17
#####
# #
# #
# #
#####
<BLANKLINE>
<BLANKLINE>
>>> main('../13.in')
755
### # # # ## ### ### ## ##
# # # # # # # # # # # # # #
### # ## # # # ### # # #
# # # # # # ### # # #### # ##
# # # # # # # # # # # # # # #
### #### # # ## # # ### # # ###
"""
input_iter = (line.strip() for line in fileinput.input(input_path))
points: List[Tuple[int, int]] = []
max_x = 0
max_y = 0
for line in input_iter:
if len(line) == 0:
break
x, y = [int(x) for x in line.strip().split(",")]
if x > max_x:
max_x = x
if y > max_y:
max_y = y
points.append((x, y))
points_array = np.zeros((next_odd(max_y + 1), next_odd(max_x + 1)), dtype=int)
for x, y in points:
points_array[y, x] = 1
folds: List[Tuple[str, int]] = []
for line in input_iter:
axis, center = line.replace("fold along ", "").split("=")
folds.append((axis, int(center)))
after_first_fold = do_the_fold(points_array, folds[0])
print(np.sum(after_first_fold > 0))
after_all_folds = functools.reduce(do_the_fold, folds, points_array)
for row in after_all_folds:
print("".join((" " if val == 0 else "#") for val in row).rstrip())
def next_odd(number: int):
if number % 2 == 0:
return number + 1
return number
def do_the_fold(points_array: np.ndarray, fold: Tuple[str, int]):
axis, center = fold
# it looks like the fold is always in half
if axis == "y":
assert center == int(points_array.shape[0] / 2)
return points_array[:center, :] + np.flip(
points_array[(center + 1) :, :], axis=0
)
# axis == 'x'
assert center == int(points_array.shape[1] / 2)
return points_array[:, :center] + np.flip(points_array[:, (center + 1) :], axis=1)
if __name__ == "__main__":
main()
| 27.561905 | 86 | 0.43953 |
d17cc1b21a0296aa06d8c002fc4d8d7a40fa01fe | 40,827 | py | Python | tools/stats/print_test_stats.py | LiamZhuuu/pytorch | 0111065c8fd3112706a9ca287b56fb98549f581e | [
"Intel"
] | null | null | null | tools/stats/print_test_stats.py | LiamZhuuu/pytorch | 0111065c8fd3112706a9ca287b56fb98549f581e | [
"Intel"
] | null | null | null | tools/stats/print_test_stats.py | LiamZhuuu/pytorch | 0111065c8fd3112706a9ca287b56fb98549f581e | [
"Intel"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import bz2
import datetime
import json
import math
import os
import re
import statistics
import subprocess
import time
from collections import defaultdict
from pathlib import Path
from typing import (Any, DefaultDict, Dict, Iterable, Iterator, List, Optional,
Set, Tuple, cast)
from xml.dom import minidom
from typing_extensions import TypedDict
from tools.stats.s3_stat_parser import (newify_case, get_S3_object_from_bucket, get_test_stats_summaries_for_job,
Report, Status, Commit, HAVE_BOTO3, Version2Case, VersionedReport,
Version1Report, Version2Report, ReportMetaMeta)
from tools.stats.scribe import send_to_scribe, rds_write, register_rds_schema, schema_from_sample
SimplerSuite = Dict[str, Version2Case]
SimplerFile = Dict[str, SimplerSuite]
SimplerReport = Dict[str, SimplerFile]
class Stat(TypedDict):
center: float
spread: Optional[float]
class CaseDiff(TypedDict):
margin: str
name: str
was: Optional[Tuple[Stat, Status]]
now: Optional[Version2Case]
class SuiteDiff(TypedDict):
margin: str
name: str
was: Optional[Stat]
now: Optional[float]
cases: List[CaseDiff]
# TODO: consolidate this with the get_cases function from
# tools/stats/test_history.py
# Here we translate to a three-layer format (file -> suite -> case)
# rather than a two-layer format (suite -> case) because as mentioned in
# a comment in the body of this function, if we consolidate suites that
# share a name, there will be test case name collisions, and once we
# have those, there's no clean way to deal with it in the diffing logic.
# It's not great to have to add a dummy empty string for the filename
# for version 1 reports, but it's better than either losing cases that
# share a name (for version 2 reports) or using a list of cases rather
# than a dict.
def simplify(report: Report) -> SimplerReport:
if 'format_version' not in report: # version 1 implicitly
v1report = cast(Version1Report, report)
return {
# we just don't have test filename information sadly, so we
# just make one fake filename that is the empty string
'': {
suite_name: {
# This clobbers some cases that have duplicate names
# because in version 1, we would merge together all
# the suites with a given name (even if they came
# from different files), so there were actually
# situations in which two cases in the same suite
# shared a name (because they actually originally
# came from two suites that were then merged). It
# would probably be better to warn about the cases
# that we're silently discarding here, but since
# we're only uploading in the new format (where
# everything is also keyed by filename) going
# forward, it shouldn't matter too much.
case['name']: newify_case(case)
for case in suite['cases']
}
for suite_name, suite in v1report['suites'].items()
}
}
else:
v_report = cast(VersionedReport, report)
version = v_report['format_version']
if version == 2:
v2report = cast(Version2Report, v_report)
return {
filename: {
suite_name: suite['cases']
for suite_name, suite in file_data['suites'].items()
}
for filename, file_data in v2report['files'].items()
}
else:
raise RuntimeError(f'Unknown format version: {version}')
def plural(n: int) -> str:
return '' if n == 1 else 's'
def get_base_commit(sha1: str) -> str:
return subprocess.check_output(
["git", "merge-base", sha1, "origin/master"],
encoding="ascii",
).strip()
def display_stat(
x: Stat,
format: Tuple[Tuple[int, int], Tuple[int, int]],
) -> str:
spread_len = format[1][0] + 1 + format[1][1]
spread = x['spread']
if spread is not None:
spread_str = f' ± {spread:{spread_len}.{format[1][1]}f}s'
else:
spread_str = ' ' * (3 + spread_len + 1)
mean_len = format[0][0] + 1 + format[0][1]
return f'{x["center"]:{mean_len}.{format[0][1]}f}s{spread_str}'
def list_stat(l: List[float]) -> Stat:
return {
'center': statistics.mean(l),
'spread': statistics.stdev(l) if len(l) > 1 else None
}
def zero_stat() -> Stat:
return {'center': 0, 'spread': None}
def recenter(was: Stat, now: float) -> Stat:
return {'center': now - was['center'], 'spread': was['spread']}
def sum_normals(stats: Iterable[Stat]) -> Stat:
"""
Returns a stat corresponding to the sum of the given stats.
Assumes that the center and spread for each of the given stats are
mean and stdev, respectively.
"""
l = list(stats)
spread: Optional[float]
if any(stat['spread'] is not None for stat in l):
spread = math.sqrt(sum((stat['spread'] or 0)**2 for stat in l))
else:
spread = None
return {
'center': sum(stat['center'] for stat in l),
'spread': spread,
}
def format_seconds(seconds: List[float]) -> str:
if len(seconds) > 0:
x = list_stat(seconds)
return f'total time {display_stat(x, ((5, 2), (4, 2)))}'.strip()
return ''
def show_ancestors(num_commits: int) -> str:
return f' | : ({num_commits} commit{plural(num_commits)})'
def unlines(lines: List[str]) -> str:
return ''.join(f'{line}\n' for line in lines)
def matching_test_times(
*,
base_reports: Dict[Commit, List[SimplerReport]],
filename: str,
suite_name: str,
case_name: str,
status: Status,
) -> List[float]:
times: List[float] = []
for reports in base_reports.values():
for report in reports:
file_data = report.get(filename)
if file_data:
suite = file_data.get(suite_name)
if suite:
case = suite.get(case_name)
if case:
t = case['seconds']
s = case['status']
if s == status:
times.append(t)
return times
def analyze(
*,
head_report: SimplerReport,
base_reports: Dict[Commit, List[SimplerReport]],
) -> List[SuiteDiff]:
nonempty_shas = [sha for sha, reports in base_reports.items() if reports]
# most recent master ancestor with at least one S3 report,
# or empty list if there are none (will show all tests as added)
base_report = base_reports[nonempty_shas[0]] if nonempty_shas else []
# find all relevant suites (those in either base or head or both)
all_reports = [head_report] + base_report
all_suites: Set[Tuple[str, str]] = {
(filename, suite_name)
for r in all_reports
for filename, file_data in r.items()
for suite_name in file_data.keys()
}
removed_suites: List[SuiteDiff] = []
modified_suites: List[SuiteDiff] = []
added_suites: List[SuiteDiff] = []
for filename, suite_name in sorted(all_suites):
case_diffs: List[CaseDiff] = []
head_suite = head_report.get(filename, {}).get(suite_name)
base_cases: Dict[str, Status] = dict(sorted(set.intersection(*[
{
(n, case['status'])
for n, case
in report.get(filename, {}).get(suite_name, {}).items()
}
for report in base_report
] or [set()])))
case_stats: Dict[str, Stat] = {}
if head_suite:
now = sum(case['seconds'] for case in head_suite.values())
if any(
filename in report and suite_name in report[filename]
for report in base_report
):
removed_cases: List[CaseDiff] = []
for case_name, case_status in base_cases.items():
case_stats[case_name] = list_stat(matching_test_times(
base_reports=base_reports,
filename=filename,
suite_name=suite_name,
case_name=case_name,
status=case_status,
))
if case_name not in head_suite:
removed_cases.append({
'margin': '-',
'name': case_name,
'was': (case_stats[case_name], case_status),
'now': None,
})
modified_cases: List[CaseDiff] = []
added_cases: List[CaseDiff] = []
for head_case_name in sorted(head_suite):
head_case = head_suite[head_case_name]
if head_case_name in base_cases:
stat = case_stats[head_case_name]
base_status = base_cases[head_case_name]
if head_case['status'] != base_status:
modified_cases.append({
'margin': '!',
'name': head_case_name,
'was': (stat, base_status),
'now': head_case,
})
else:
added_cases.append({
'margin': '+',
'name': head_case_name,
'was': None,
'now': head_case,
})
# there might be a bug calculating this stdev, not sure
was = sum_normals(case_stats.values())
case_diffs = removed_cases + modified_cases + added_cases
if case_diffs:
modified_suites.append({
'margin': ' ',
'name': suite_name,
'was': was,
'now': now,
'cases': case_diffs,
})
else:
for head_case_name in sorted(head_suite):
head_case = head_suite[head_case_name]
case_diffs.append({
'margin': ' ',
'name': head_case_name,
'was': None,
'now': head_case,
})
added_suites.append({
'margin': '+',
'name': suite_name,
'was': None,
'now': now,
'cases': case_diffs,
})
else:
for case_name, case_status in base_cases.items():
case_stats[case_name] = list_stat(matching_test_times(
base_reports=base_reports,
filename=filename,
suite_name=suite_name,
case_name=case_name,
status=case_status,
))
case_diffs.append({
'margin': ' ',
'name': case_name,
'was': (case_stats[case_name], case_status),
'now': None,
})
removed_suites.append({
'margin': '-',
'name': suite_name,
# there might be a bug calculating this stdev, not sure
'was': sum_normals(case_stats.values()),
'now': None,
'cases': case_diffs,
})
return removed_suites + modified_suites + added_suites
def case_diff_lines(diff: CaseDiff) -> List[str]:
lines = [f'def {diff["name"]}: ...']
case_fmt = ((3, 3), (2, 3))
was = diff['was']
if was:
was_line = f' # was {display_stat(was[0], case_fmt)}'
was_status = was[1]
if was_status:
was_line += f' ({was_status})'
lines.append(was_line)
now = diff['now']
if now:
now_stat: Stat = {'center': now['seconds'], 'spread': None}
now_line = f' # now {display_stat(now_stat, case_fmt)}'
now_status = now['status']
if now_status:
now_line += f' ({now_status})'
lines.append(now_line)
return [''] + [f'{diff["margin"]} {l}' for l in lines]
def display_suite_diff(diff: SuiteDiff) -> str:
lines = [f'class {diff["name"]}:']
suite_fmt = ((4, 2), (3, 2))
was = diff['was']
if was:
lines.append(f' # was {display_stat(was, suite_fmt)}')
now = diff['now']
if now is not None:
now_stat: Stat = {'center': now, 'spread': None}
lines.append(f' # now {display_stat(now_stat, suite_fmt)}')
for case_diff in diff['cases']:
lines.extend([f' {l}' for l in case_diff_lines(case_diff)])
return unlines([''] + [f'{diff["margin"]} {l}'.rstrip() for l in lines] + [''])
def anomalies(diffs: List[SuiteDiff]) -> str:
return ''.join(map(display_suite_diff, diffs))
def graph(
*,
head_sha: Commit,
head_seconds: float,
base_seconds: Dict[Commit, List[float]],
on_master: bool,
ancestry_path: int = 0,
other_ancestors: int = 0,
) -> str:
lines = [
'Commit graph (base is most recent master ancestor with at least one S3 report):',
'',
' : (master)',
' |',
]
head_time_str = f' {format_seconds([head_seconds])}'
if on_master:
lines.append(f' * {head_sha[:10]} (HEAD) {head_time_str}')
else:
lines.append(f' | * {head_sha[:10]} (HEAD) {head_time_str}')
if ancestry_path > 0:
lines += [
' | |',
show_ancestors(ancestry_path),
]
if other_ancestors > 0:
lines += [
' |/|',
show_ancestors(other_ancestors),
' |',
]
else:
lines.append(' |/')
is_first = True
for sha, seconds in base_seconds.items():
num_runs = len(seconds)
prefix = str(num_runs).rjust(3)
base = '(base)' if is_first and num_runs > 0 else ' '
if num_runs > 0:
is_first = False
t = format_seconds(seconds)
p = plural(num_runs)
if t:
p = f'{p}, '.ljust(3)
lines.append(f' * {sha[:10]} {base} {prefix} report{p}{t}')
lines.extend([' |', ' :'])
return unlines(lines)
def case_delta(case: CaseDiff) -> Stat:
was = case['was']
now = case['now']
return recenter(
was[0] if was else zero_stat(),
now['seconds'] if now else 0,
)
def display_final_stat(stat: Stat) -> str:
center = stat['center']
spread = stat['spread']
displayed = display_stat(
{'center': abs(center), 'spread': spread},
((4, 2), (3, 2)),
)
if center < 0:
sign = '-'
elif center > 0:
sign = '+'
else:
sign = ' '
return f'{sign}{displayed}'.rstrip()
def summary_line(message: str, d: DefaultDict[str, List[CaseDiff]]) -> str:
all_cases = [c for cs in d.values() for c in cs]
tests = len(all_cases)
suites = len(d)
sp = f'{plural(suites)})'.ljust(2)
tp = f'{plural(tests)},'.ljust(2)
# there might be a bug calculating this stdev, not sure
stat = sum_normals(case_delta(c) for c in all_cases)
return ''.join([
f'{message} (across {suites:>4} suite{sp}',
f'{tests:>6} test{tp}',
f' totaling {display_final_stat(stat)}',
])
def summary(analysis: List[SuiteDiff]) -> str:
removed_tests: DefaultDict[str, List[CaseDiff]] = defaultdict(list)
modified_tests: DefaultDict[str, List[CaseDiff]] = defaultdict(list)
added_tests: DefaultDict[str, List[CaseDiff]] = defaultdict(list)
for diff in analysis:
# the use of 'margin' here is not the most elegant
name = diff['name']
margin = diff['margin']
cases = diff['cases']
if margin == '-':
removed_tests[name] += cases
elif margin == '+':
added_tests[name] += cases
else:
removed = list(filter(lambda c: c['margin'] == '-', cases))
added = list(filter(lambda c: c['margin'] == '+', cases))
modified = list(filter(lambda c: c['margin'] == '!', cases))
if removed:
removed_tests[name] += removed
if added:
added_tests[name] += added
if modified:
modified_tests[name] += modified
return unlines([
summary_line('Removed ', removed_tests),
summary_line('Modified', modified_tests),
summary_line('Added ', added_tests),
])
def regression_info(
*,
head_sha: Commit,
head_report: Report,
base_reports: Dict[Commit, List[Report]],
job_name: str,
on_master: bool,
ancestry_path: int,
other_ancestors: int,
) -> str:
"""
Return a human-readable report describing any test time regressions.
The head_sha and head_report args give info about the current commit
and its test times. Since Python dicts maintain insertion order
(guaranteed as part of the language spec since 3.7), the
base_reports argument must list the head's several most recent
master commits, from newest to oldest (so the merge-base is
list(base_reports)[0]).
"""
simpler_head = simplify(head_report)
simpler_base: Dict[Commit, List[SimplerReport]] = {}
for commit, reports in base_reports.items():
simpler_base[commit] = [simplify(r) for r in reports]
analysis = analyze(
head_report=simpler_head,
base_reports=simpler_base,
)
return '\n'.join([
unlines([
'----- Historic stats comparison result ------',
'',
f' job: {job_name}',
f' commit: {head_sha}',
]),
# don't print anomalies, because sometimes due to sharding, the
# output from this would be very long and obscure better signal
# anomalies(analysis),
graph(
head_sha=head_sha,
head_seconds=head_report['total_seconds'],
base_seconds={
c: [r['total_seconds'] for r in rs]
for c, rs in base_reports.items()
},
on_master=on_master,
ancestry_path=ancestry_path,
other_ancestors=other_ancestors,
),
summary(analysis),
])
class TestCase:
def __init__(self, dom: Any) -> None:
self.class_name = str(dom.attributes['classname'].value)
self.name = str(dom.attributes['name'].value)
self.time = float(dom.attributes['time'].value)
error_elements = dom.getElementsByTagName('error')
# DISCLAIMER: unexpected successes and expected failures are currently not reported in assemble_s3_object
self.expected_failure = False
self.skipped = False
self.errored = False
self.unexpected_success = False
if len(error_elements) > 0:
# We are only expecting 1 element here
error_element = error_elements[0]
self.unexpected_success = (error_element.hasAttribute('type') and
error_element.attributes['type'].value == 'UnexpectedSuccess')
self.errored = not self.unexpected_success
skipped_elements = dom.getElementsByTagName('skipped')
if len(skipped_elements) > 0:
# We are only expecting 1 element here
skipped_element = skipped_elements[0]
self.expected_failure = (skipped_element.hasAttribute('type') and
skipped_element.attributes['type'].value == 'XFAIL')
self.skipped = not self.expected_failure
self.failed = len(dom.getElementsByTagName('failure')) > 0
def __repr__(self) -> str:
return self.__str__()
def __str__(self) -> str:
return f'[TestCase name: {self.name} | class_name: {self.class_name} | time: {self.time} | ' \
f'expected_failure: {self.expected_failure} | skipped: {self.skipped} | errored: {self.errored} | ' \
f'unexpected_success: {self.unexpected_success} | failed: {self.failed}]'
class TestSuite:
def __init__(self, name: str) -> None:
self.name = name
self.test_cases: Dict[str, TestCase] = dict()
self.failed_count = 0
self.skipped_count = 0
self.errored_count = 0
self.total_time = 0.0
# The below are currently not included in test reports
self.unexpected_success_count = 0
self.expected_failure_count = 0
def __repr__(self) -> str:
rc = f'{self.name} run_time: {self.total_time:.2f} tests: {len(self.test_cases)}'
if self.skipped_count > 0:
rc += f' skipped: {self.skipped_count}'
return f'TestSuite({rc})'
def append(self, test_case: TestCase) -> None:
self.test_cases[test_case.name] = test_case
self.total_time += test_case.time
self.failed_count += 1 if test_case.failed else 0
self.skipped_count += 1 if test_case.skipped else 0
self.errored_count += 1 if test_case.errored else 0
self.unexpected_success_count += 1 if test_case.unexpected_success else 0
self.expected_failure_count += 1 if test_case.expected_failure else 0
def update(self, test_case: TestCase) -> None:
name = test_case.name
assert name in self.test_cases, f'Error: attempting to replace nonexistent test case {name}'
# Note that time for unexpected successes and expected failures are reported as 0s
self.test_cases[name].time += test_case.time
self.test_cases[name].failed |= test_case.failed
self.test_cases[name].errored |= test_case.errored
self.test_cases[name].skipped |= test_case.skipped
self.test_cases[name].unexpected_success |= test_case.unexpected_success
self.test_cases[name].expected_failure |= test_case.expected_failure
DuplicatedDict = Dict[str, Dict[str, List[TestCase]]]
class TestFile:
def __init__(self, name: str) -> None:
self.name = name
self.total_time = 0.0
self.test_suites: Dict[str, TestSuite] = dict()
def append(self, test_case: TestCase, test_type: str, duplicated_tests_dict: DuplicatedDict) -> None:
is_multi_test = self.name == 'test_cpp_extensions_aot' or \
self.name == 'distributed/test_distributed_spawn' or \
self.name == 'distributed/test_c10d_gloo' or \
self.name == 'cpp' # The caffe2 cpp tests spawn duplicate test cases as well.
if is_multi_test:
suite_name = test_case.class_name + '__' + test_type
else:
suite_name = test_case.class_name
if suite_name not in self.test_suites:
self.test_suites[suite_name] = TestSuite(suite_name)
if test_case.name in self.test_suites[suite_name].test_cases:
if is_multi_test:
self.test_suites[suite_name].update(test_case)
self.total_time += test_case.time
else:
# Gather up duplicated test cases
if suite_name not in duplicated_tests_dict:
duplicated_tests_dict[suite_name] = dict()
if test_case.name not in duplicated_tests_dict[suite_name]:
duplicated_tests_dict[suite_name][test_case.name] = [self.test_suites[suite_name].test_cases[test_case.name]]
duplicated_tests_dict[suite_name][test_case.name].append(test_case)
else:
self.test_suites[suite_name].append(test_case)
self.total_time += test_case.time
def parse_report(path: str) -> Iterator[TestCase]:
try:
dom = minidom.parse(path)
except Exception as e:
print(f"Error occurred when parsing {path}: {e}")
return
for test_case in dom.getElementsByTagName('testcase'):
yield TestCase(test_case)
def get_recursive_files(folder: str, extension: str) -> Iterable[str]:
"""
Get recursive list of files with given extension even.
Use it instead of glob(os.path.join(folder, '**', f'*{extension}'))
if folder/file names can start with `.`, which makes it hidden on Unix platforms
"""
assert extension.startswith(".")
for root, _, files in os.walk(folder):
for fname in files:
if os.path.splitext(fname)[1] == extension:
yield os.path.join(root, fname)
def parse_reports(folder: str) -> Tuple[Dict[str, TestFile], Dict[str, DuplicatedDict]]:
tests_by_file = dict()
duplicated_tests_by_file : Dict[str, DuplicatedDict] = dict()
for report in get_recursive_files(folder, ".xml"):
report_path = Path(report)
# basename of the directory of test-report is the test filename
test_filename = re.sub(r'\.', '/', report_path.parent.name)
# test type is the parent directory (only applies to dist-*)
# See: CUSTOM_HANDLERS in test/run_test.py
test_type = report_path.parent.parent.name
if test_filename not in duplicated_tests_by_file:
duplicated_tests_by_file[test_filename] = dict()
if test_filename not in tests_by_file:
tests_by_file[test_filename] = TestFile(test_filename)
for test_case in parse_report(report):
tests_by_file[test_filename].append(test_case, test_type, duplicated_tests_by_file[test_filename])
return tests_by_file, duplicated_tests_by_file
def process_intentional_test_runs(runs: List[TestCase]) -> Tuple[int, int]:
num_fail = 0
num_expected_fail = 0
num_pass = 0
num_unexpected_success = 0
num_errored = 0
num_skipped = 0
for test_run in runs:
if test_run.failed:
num_fail += 1
elif test_run.expected_failure:
num_expected_fail += 1
elif test_run.unexpected_success:
num_unexpected_success += 1
elif test_run.errored:
num_errored += 1
elif test_run.skipped:
num_skipped += 1
else:
num_pass += 1
REPEAT_TEST_FOR_TYPES_TESTS = [
"test_data_parallel_module",
"test_data_parallel_module_kwargs_only",
"test_data_parallel_module_kwargs_only_empty_list",
"test_data_parallel_module_kwargs_only_empty_dict",
"test_data_parallel_module_kwargs_only_empty_tuple"
]
# Do not run checks for tests that use repeat_test_for_types decorator as they do not go well with our retry
# functionality. Once issue https://github.com/pytorch/pytorch/issues/69865 is fixed, we should remove the exception
if not any([x in test_run.name for x in REPEAT_TEST_FOR_TYPES_TESTS]):
err_msg = f'Warning: unintentional test case duplicates found for {test_run.name} in suite {test_run.class_name}.'
report_only = os.getenv('PYTORCH_OVERRIDE_FLAKY_SIGNAL') != '1'
if report_only and num_fail + num_errored + num_unexpected_success < 1 or not report_only and num_expected_fail < 1:
raise RuntimeWarning(f'{err_msg} Intentional reruns are only triggered when the first run fails or errors, but'
' we found no failures nor errors.')
if num_unexpected_success + num_expected_fail < 1:
raise RuntimeWarning(f'{err_msg} Intentional reruns should raise at least one unexpected success or expected '
'failure, but none have been found.')
if report_only and num_pass != num_unexpected_success:
raise RuntimeWarning(f'{err_msg} Every success in an intentional rerun is shadowed by one unexpected success.'
f'However, successes = {num_pass} and unexpected successes = {num_unexpected_success}')
if not report_only and num_pass > 1:
raise RuntimeWarning(f'{err_msg} There should be at most 1 successful run in an intentional rerun that stops'
f' at first success. The number of successful runs = {num_pass}')
if num_skipped > 0:
raise RuntimeWarning(f'{err_msg} No skips should occur in intentional reruns, but skips = {num_skipped}')
return max(num_unexpected_success, num_pass), num_fail + num_expected_fail + num_errored
def assemble_flaky_test_stats(duplicated_tests_by_file: Dict[str, DuplicatedDict]) -> Any:
flaky_tests = []
workflow_id = os.environ.get("GITHUB_RUN_ID", os.environ.get("CIRCLE_WORKFLOW_ID", None))
for file_name, suite_to_dict in duplicated_tests_by_file.items():
for suite_name, testcase_to_runs in suite_to_dict.items():
for testcase_name, list_of_runs in testcase_to_runs.items():
num_green, num_red = process_intentional_test_runs(list_of_runs)
if num_green > 0: # Otherwise, it's likely just a failing test
flaky_tests.append({
"name": testcase_name,
"suite": suite_name,
"file": file_name,
"num_green": num_green,
"num_red": num_red,
})
if len(flaky_tests) > 0:
# write to RDS
register_rds_schema("flaky_tests", schema_from_sample(flaky_tests[0]))
rds_write("flaky_tests", flaky_tests, only_on_master=False)
# write to S3 to go to Rockset as well
import uuid
for flaky_test in flaky_tests:
flaky_test["job_id"] = os.environ["GHA_WORKFLOW_JOB_ID"]
flaky_test["workflow_id"] = workflow_id
key = f"flaky_tests/{workflow_id}/{uuid.uuid4()}.json"
obj = get_S3_object_from_bucket("ossci-raw-job-status", key)
obj.put(Body=json.dumps(flaky_test), ContentType="application/json")
def build_info() -> ReportMetaMeta:
return {
"build_pr": os.environ.get("PR_NUMBER", os.environ.get("CIRCLE_PR_NUMBER", "")),
"build_tag": os.environ.get("TAG", os.environ.get("CIRCLE_TAG", "")),
"build_sha1": os.environ.get("SHA1", os.environ.get("CIRCLE_SHA1", "")),
"build_base_commit": get_base_commit(os.environ.get("SHA1", os.environ.get("CIRCLE_SHA1", "HEAD"))),
"build_branch": os.environ.get("BRANCH", os.environ.get("CIRCLE_BRANCH", "")),
"build_job": os.environ.get("JOB_BASE_NAME", os.environ.get("CIRCLE_JOB", "")),
"build_workflow_id": os.environ.get("WORKFLOW_ID", os.environ.get("CIRCLE_WORKFLOW_ID", "")),
"build_start_time_epoch": str(int(os.path.getmtime(os.path.realpath(__file__)))),
}
def build_message(
test_file: TestFile,
test_suite: TestSuite,
test_case: TestCase,
meta_info: ReportMetaMeta
) -> Dict[str, Dict[str, Any]]:
return {
"normal": {
**meta_info,
"test_filename": test_file.name,
"test_suite_name": test_suite.name,
"test_case_name": test_case.name,
},
"int": {
"time": int(time.time()),
"test_total_count": 1,
"test_total_time": int(test_case.time * 1000),
"test_failed_count": 1 if test_case.failed > 0 else 0,
"test_skipped_count": 1 if test_case.skipped > 0 else 0,
"test_errored_count": 1 if test_case.errored > 0 else 0,
},
}
def send_report_to_scribe(reports: Dict[str, TestFile]) -> None:
meta_info = build_info()
logs = json.dumps(
[
{
"category": "perfpipe_pytorch_test_times",
"message": json.dumps(build_message(test_file, test_suite, test_case, meta_info)),
"line_escape": False,
}
for test_file in reports.values()
for test_suite in test_file.test_suites.values()
for test_case in test_suite.test_cases.values()
]
)
# no need to print send result as exceptions will be captured and print later.
send_to_scribe(logs)
def assemble_s3_object(
reports: Dict[str, TestFile],
*,
total_seconds: float,
) -> Version2Report:
return {
**build_info(), # type: ignore[misc]
'total_seconds': total_seconds,
'format_version': 2,
'files': {
name: {
'total_seconds': test_file.total_time,
'suites': {
name: {
'total_seconds': suite.total_time,
'cases': {
name: {
'seconds': case.time,
'status': 'errored' if case.errored else
'failed' if case.failed else
'skipped' if case.skipped else None
}
for name, case in suite.test_cases.items()
},
}
for name, suite in test_file.test_suites.items()
}
}
for name, test_file in reports.items()
}
}
def send_report_to_s3(head_report: Version2Report) -> None:
job = os.getenv('JOB_BASE_NAME', os.environ.get('CIRCLE_JOB'))
sha1 = os.environ.get('SHA1', os.environ.get('CIRCLE_SHA1', ''))
now = datetime.datetime.utcnow().isoformat()
# SHARD_NUMBER and TEST_CONFIG are specific to GHA, as these details would be included in CIRCLE_JOB already
shard = os.environ.get('SHARD_NUMBER', '')
test_config = os.environ.get('TEST_CONFIG')
job_report_dirname = f'{job}{f"-{test_config}" if test_config is not None else ""}{shard}'
key = f'test_time/{sha1}/{job_report_dirname}/{now}Z.json.bz2' # Z meaning UTC
obj = get_S3_object_from_bucket('ossci-metrics', key)
# use bz2 because the results are smaller than gzip, and the
# compression time penalty we pay is only about half a second for
# input files of a few megabytes in size like these JSON files, and
# because for some reason zlib doesn't seem to play nice with the
# gunzip command whereas Python's bz2 does work with bzip2
obj.put(Body=bz2.compress(json.dumps(head_report).encode()))
def upload_failures_to_rds(reports: Dict[str, TestFile]) -> None:
"""
We have 40k+ tests, so saving every test for every commit is not very
feasible for PyTorch. Most of these are things we don't care about anyways,
so this code filters out failures and saves only those to the DB.
"""
# Gather all failures across the entire report
failures = []
for file in reports.values():
for suite in file.test_suites.values():
for case in suite.test_cases.values():
if case.errored or case.failed:
failures.append({
"name": case.name,
"suite": suite.name,
"file": file.name,
"status": "failure" if case.failed else "error"
})
if len(failures) > 0:
register_rds_schema("test_failures", schema_from_sample(failures[0]))
rds_write("test_failures", failures, only_on_master=False)
def print_regressions(head_report: Report, *, num_prev_commits: int) -> None:
sha1 = os.environ.get("SHA1", os.environ.get("CIRCLE_SHA1", "HEAD"))
base = get_base_commit(sha1)
count_spec = f"{base}..{sha1}"
intermediate_commits = int(subprocess.check_output(
["git", "rev-list", "--count", count_spec],
encoding="ascii"
))
ancestry_path = int(subprocess.check_output(
["git", "rev-list", "--ancestry-path", "--count", count_spec],
encoding="ascii",
))
# if current commit is already on master, we need to exclude it from
# this history; otherwise we include the merge-base
commits = subprocess.check_output(
["git", "rev-list", f"--max-count={num_prev_commits+1}", base],
encoding="ascii",
).splitlines()
on_master = False
if base == sha1:
on_master = True
commits = commits[1:]
else:
commits = commits[:-1]
job = os.environ.get("JOB_BASE_NAME", "")
objects: Dict[Commit, List[Report]] = defaultdict(list)
for commit in commits:
objects[commit]
summaries = get_test_stats_summaries_for_job(sha=commit, job_prefix=job)
for _, summary in summaries.items():
objects[commit].extend(summary)
print()
print(regression_info(
head_sha=sha1,
head_report=head_report,
base_reports=objects,
job_name=job,
on_master=on_master,
ancestry_path=ancestry_path - 1,
other_ancestors=intermediate_commits - ancestry_path,
), end="")
def positive_integer(value: str) -> float:
parsed = int(value)
if parsed < 1:
raise argparse.ArgumentTypeError(f"{value} is not a natural number")
return parsed
def positive_float(value: str) -> float:
parsed = float(value)
if parsed <= 0.0:
raise argparse.ArgumentTypeError(f"{value} is not a positive rational number")
return parsed
def reports_has_no_tests(reports: Dict[str, TestFile]) -> bool:
for test_file in reports.values():
for test_suite in test_file.test_suites.values():
if len(test_suite.test_cases) > 0:
return False
return True
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser(
"Print statistics from test XML output.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--longest-of-class",
type=positive_integer,
default=3,
metavar="N",
help="how many longest tests to show for each class",
)
parser.add_argument(
"--class-print-threshold",
type=positive_float,
default=1.0,
metavar="N",
help="Minimal total time to warrant class report",
)
parser.add_argument(
"--longest-of-run",
type=positive_integer,
default=10,
metavar="N",
help="how many longest tests to show from the entire run",
)
if HAVE_BOTO3:
parser.add_argument(
"--upload-to-s3",
action="store_true",
help="upload test time to S3 bucket",
)
parser.add_argument(
"--compare-with-s3",
action="store_true",
help="download test times for base commits and compare",
)
parser.add_argument(
"--num-prev-commits",
type=positive_integer,
default=10,
metavar="N",
help="how many previous commits to compare test times with",
)
parser.add_argument(
"--use-json",
metavar="FILE.json",
help="compare S3 with JSON file, instead of the test report folder",
)
parser.add_argument(
"folder",
help="test report folder",
)
args = parser.parse_args()
reports_by_file, duplicated_tests_by_file = parse_reports(args.folder)
assemble_flaky_test_stats(duplicated_tests_by_file)
upload_failures_to_rds(reports_by_file)
if reports_has_no_tests(reports_by_file):
print(f"No tests in reports found in {args.folder}")
sys.exit(0)
try:
send_report_to_scribe(reports_by_file)
except Exception as e:
print(f"ERROR ENCOUNTERED WHEN UPLOADING TO SCRIBE: {e}")
total_time = 0.0
for filename, test_filename in reports_by_file.items():
for suite_name, test_suite in test_filename.test_suites.items():
total_time += test_suite.total_time
obj = assemble_s3_object(reports_by_file, total_seconds=total_time)
if args.upload_to_s3:
try:
send_report_to_s3(obj)
except Exception as e:
print(f"ERROR ENCOUNTERED WHEN UPLOADING TO S3: {e}")
if args.compare_with_s3:
head_json = obj
if args.use_json:
head_json = json.loads(Path(args.use_json).read_text())
try:
print_regressions(head_json, num_prev_commits=args.num_prev_commits)
except Exception as e:
print(f"ERROR ENCOUNTERED WHEN COMPARING AGAINST S3: {e}")
| 37.319013 | 129 | 0.586842 |
1ffd656cc852793abdeb198e971428d5a47de6aa | 853 | py | Python | PyCIP/DataTypesModule/Constants.py | cpchrispye/PyCIP | 8cb2ccabbc45ee9eb24b1b4772e517df2b4078bd | [
"MIT"
] | 13 | 2016-06-14T10:35:06.000Z | 2021-03-16T11:42:02.000Z | PyCIP/DataTypesModule/Constants.py | cpchrispye/PyCIP | 8cb2ccabbc45ee9eb24b1b4772e517df2b4078bd | [
"MIT"
] | 1 | 2021-12-21T21:08:27.000Z | 2021-12-21T21:08:27.000Z | PyCIP/DataTypesModule/Constants.py | cpchrispye/PyCIP | 8cb2ccabbc45ee9eb24b1b4772e517df2b4078bd | [
"MIT"
] | 2 | 2020-05-20T14:01:34.000Z | 2020-12-07T02:31:49.000Z | from enum import IntEnum
class CIPServiceCode(IntEnum):
get_att_single = 0x0e
set_att_single = 0x10
get_att_all = 0x01
set_att_all = 0x02
unconnected_Send = 0x52
forward_open = 0x54
forward_close = 0x4E
class SegmentType(IntEnum):
PortSegment = 0
LogicalSegment = 1
NetworkSegment = 2
SymbolicSegment = 3
DataSegment = 4
DataType_c = 5
DataType_e = 6
Reserved = 7
class LogicalType(IntEnum):
ClassID = 0
InstanceID = 1
MemberID = 2
ConnectionPoint = 3
AttributeID = 4
Special = 5
ServiceID = 6
ExtendedLogical = 7
class LogicalFormat(IntEnum):
bit_8 = 0
bit_16 = 1
bit_32 = 2
Reserved = 3
class DataSubType(IntEnum):
SimpleData = 0
ANSI = 9 | 18.955556 | 30 | 0.581477 |
3c963860a40bb39d531ce598c6a8e5e76a92f4f2 | 503 | py | Python | zksync/metadata.py | zksync-sdk/schnorr-musig-sdk-python | e853b1fc54fcff1c896a6aaac44b0c72dd393a01 | [
"MIT"
] | 1 | 2021-05-19T10:05:37.000Z | 2021-05-19T10:05:37.000Z | zksync/metadata.py | zksync-sdk/schnorr-musig-sdk-python | e853b1fc54fcff1c896a6aaac44b0c72dd393a01 | [
"MIT"
] | 2 | 2021-03-10T12:38:40.000Z | 2021-05-21T10:28:11.000Z | zksync/metadata.py | zksync-sdk/schnorr-musig-sdk-python | e853b1fc54fcff1c896a6aaac44b0c72dd393a01 | [
"MIT"
] | 1 | 2021-05-19T10:05:38.000Z | 2021-05-19T10:05:38.000Z | # -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'zksync'
project = "schnorr-musig-sdk-python"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'TBC Description'
authors = ['kotsubavictor']
authors_string = ', '.join(authors)
emails = ['kotsubavictor@gmail.com']
copyright = '2020 ' + authors_string
url = 'https://github.com/zksync-sdk/schnorr-musig-sdk-python'
| 27.944444 | 66 | 0.709742 |
19e831f708b27487092efcd5475de7c7f39a9b80 | 515 | py | Python | answers/Tanmay Jaiswal/Day22/question1.py | sequel-tj/30-DaysOfCode-March-2021 | 0b831d03b7d717310e412f142f5bcfb049c757e9 | [
"MIT"
] | 22 | 2021-03-16T14:07:47.000Z | 2021-08-13T08:52:50.000Z | answers/Tanmay Jaiswal/Day22/question1.py | sequel-tj/30-DaysOfCode-March-2021 | 0b831d03b7d717310e412f142f5bcfb049c757e9 | [
"MIT"
] | 174 | 2021-03-16T21:16:40.000Z | 2021-06-12T05:19:51.000Z | answers/Tanmay Jaiswal/Day22/question1.py | sequel-tj/30-DaysOfCode-March-2021 | 0b831d03b7d717310e412f142f5bcfb049c757e9 | [
"MIT"
] | 135 | 2021-03-16T16:47:12.000Z | 2021-06-27T14:22:38.000Z | print("Input: ",end="")
string = input()
def stack(s):
if (len(s) == 0):
return
x = s[-1]
s.pop()
stack(s)
print(x, end="")
s.append(x)
def min(s):
Stack = []
Stack.append(s[0])
for i in range(1, len(s)):
if (len(Stack) == 0):
Stack.append(s[i])
else:
if (Stack[-1] == s[i]):
Stack.pop()
else:
Stack.append(s[i])
stack(Stack)
print("Output: ",end="")
min(string)
| 13.918919 | 35 | 0.413592 |
ac9de92ba6b0982f2a22af3b15658810ce41aa40 | 727 | py | Python | flask_app/app.py | hkato/sample-azure-functions-wsgi-flask | c0bc8da3d8924ee143a93d9ac6109b3e86c328ae | [
"MIT"
] | 2 | 2021-09-27T03:53:53.000Z | 2022-03-13T01:53:41.000Z | flask_app/app.py | hkato/sample-azure-functions-wsgi-flask | c0bc8da3d8924ee143a93d9ac6109b3e86c328ae | [
"MIT"
] | 1 | 2022-03-13T02:04:55.000Z | 2022-03-13T02:04:55.000Z | flask_app/app.py | hkato/sample-azure-functions-wsgi-flask | c0bc8da3d8924ee143a93d9ac6109b3e86c328ae | [
"MIT"
] | null | null | null | from flask import Flask, request
app = Flask(__name__)
@app.route("/hello", methods=['GET', 'POST'])
def hello():
name = request.args.get('name')
if not name:
req_body = request.get_json()
if req_body:
name = req_body['name']
if name:
return f"Hello, {name}. This HTTP triggered function executed successfully."
else:
return "This HTTP triggered function executed successfully. Pass a name in the query string or in the request body for a personalized response."
@app.route("/foo", methods=['GET'])
def foo():
return "test"
@app.route("/bar", methods=['POST'])
def bar():
return request.get_json()
if __name__ == "__main__":
app.run(debug=True)
| 22.71875 | 152 | 0.64099 |
329afb7ac79cd6e2f9963b5f85845b472f52dd14 | 2,742 | py | Python | src/awkward/_v2/numba.py | douglasdavis/awkward-1.0 | f00775803a5568efb0a8e2dae3b1a4f23228fa40 | [
"BSD-3-Clause"
] | 2 | 2019-09-12T03:07:23.000Z | 2019-09-27T05:32:07.000Z | src/awkward/_v2/numba.py | douglasdavis/awkward-1.0 | f00775803a5568efb0a8e2dae3b1a4f23228fa40 | [
"BSD-3-Clause"
] | 1 | 2019-09-26T17:57:45.000Z | 2019-09-26T17:57:45.000Z | src/awkward/_v2/numba.py | douglasdavis/awkward-1.0 | f00775803a5568efb0a8e2dae3b1a4f23228fa40 | [
"BSD-3-Clause"
] | null | null | null | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import awkward as ak
checked_version = False
def register_and_check():
global checked_version
if not checked_version:
try:
import numba
except ImportError:
raise ImportError(
"""install the 'numba' package with:
pip install numba --upgrade
or
conda install numba"""
)
checked_version = True
if ak._v2._util.parse_version(numba.__version__) < ak._v2._util.parse_version(
"0.50"
):
raise ImportError(
"Awkward Array can only work with numba 0.50 or later "
"(you have version {})".format(numba.__version__)
)
register()
def register():
if hasattr(ak._v2.numba, "ArrayViewType"):
return
import numba
import awkward._v2._connect.numba.arrayview
import awkward._v2._connect.numba.layout
import awkward._v2._connect.numba.builder
n = ak._v2.numba
n.ArrayViewType = awkward._v2._connect.numba.arrayview.ArrayViewType
n.ArrayViewModel = awkward._v2._connect.numba.arrayview.ArrayViewModel
n.RecordViewType = awkward._v2._connect.numba.arrayview.RecordViewType
n.RecordViewModel = awkward._v2._connect.numba.arrayview.RecordViewModel
n.ContentType = awkward._v2._connect.numba.layout.ContentType
n.NumpyArrayType = awkward._v2._connect.numba.layout.NumpyArrayType
n.RegularArrayType = awkward._v2._connect.numba.layout.RegularArrayType
n.ListArrayType = awkward._v2._connect.numba.layout.ListArrayType
n.IndexedArrayType = awkward._v2._connect.numba.layout.IndexedArrayType
n.IndexedOptionArrayType = awkward._v2._connect.numba.layout.IndexedOptionArrayType
n.ByteMaskedArrayType = awkward._v2._connect.numba.layout.ByteMaskedArrayType
n.BitMaskedArrayType = awkward._v2._connect.numba.layout.BitMaskedArrayType
n.UnmaskedArrayType = awkward._v2._connect.numba.layout.UnmaskedArrayType
n.RecordArrayType = awkward._v2._connect.numba.layout.RecordArrayType
n.UnionArrayType = awkward._v2._connect.numba.layout.UnionArrayType
n.ArrayBuilderType = awkward._v2._connect.numba.builder.ArrayBuilderType
n.ArrayBuilderModel = awkward._v2._connect.numba.builder.ArrayBuilderModel
@numba.extending.typeof_impl.register(ak._v2.highlevel.Array)
def typeof_Array(obj, c):
return obj.numba_type
@numba.extending.typeof_impl.register(ak._v2.highlevel.Record)
def typeof_Record(obj, c):
return obj.numba_type
@numba.extending.typeof_impl.register(ak._v2.highlevel.ArrayBuilder)
def typeof_ArrayBuilder(obj, c):
return obj.numba_type
| 36.078947 | 87 | 0.724289 |
94399425af9c5392225995b160c8417ba3fc2126 | 44,063 | py | Python | python/ray/serve/api.py | jamesliu/ray | 11ab412db1fa3603a3006e8ed414e80dd1f11c0c | [
"Apache-2.0"
] | 2 | 2022-03-25T04:15:10.000Z | 2022-03-25T04:15:15.000Z | python/ray/serve/api.py | jamesliu/ray | 11ab412db1fa3603a3006e8ed414e80dd1f11c0c | [
"Apache-2.0"
] | 227 | 2021-10-01T08:00:01.000Z | 2021-12-28T16:47:26.000Z | python/ray/serve/api.py | gramhagen/ray | c18caa4db36d466718bdbcb2229aa0b2dc03da1f | [
"Apache-2.0"
] | 1 | 2020-12-03T20:36:00.000Z | 2020-12-03T20:36:00.000Z | import asyncio
import atexit
import collections
import inspect
import logging
import random
import re
import time
from dataclasses import dataclass
from functools import wraps
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload
from fastapi import APIRouter, FastAPI
from starlette.requests import Request
from uvicorn.config import Config
from uvicorn.lifespan.on import LifespanOn
from ray.actor import ActorHandle
from ray.serve.common import DeploymentInfo, GoalId, ReplicaTag
from ray.serve.config import (AutoscalingConfig, DeploymentConfig, HTTPOptions,
ReplicaConfig)
from ray.serve.constants import (DEFAULT_CHECKPOINT_PATH, HTTP_PROXY_TIMEOUT,
SERVE_CONTROLLER_NAME, MAX_CACHED_HANDLES,
CONTROLLER_MAX_CONCURRENCY)
from ray.serve.controller import ServeController
from ray.serve.exceptions import RayServeException
from ray.serve.handle import RayServeHandle, RayServeSyncHandle
from ray.serve.http_util import ASGIHTTPSender, make_fastapi_class_based_view
from ray.serve.utils import (LoggingContext, ensure_serialization_context,
format_actor_name, get_current_node_resource_key,
get_random_letters, logger, DEFAULT)
from ray.util.annotations import PublicAPI
import ray
from ray import cloudpickle
_INTERNAL_REPLICA_CONTEXT = None
_global_client = None
_UUID_RE = re.compile(
"[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89aAbB][a-f0-9]{3}-[a-f0-9]{12}")
def _get_controller_namespace(detached):
controller_namespace = ray.get_runtime_context().namespace
if not detached:
return controller_namespace
# Start controller in "serve" namespace if detached and currently
# in anonymous namespace.
if _UUID_RE.fullmatch(controller_namespace) is not None:
controller_namespace = "serve"
return controller_namespace
def _get_global_client():
if _global_client is not None:
return _global_client
return _connect()
def _set_global_client(client):
global _global_client
_global_client = client
@dataclass
class ReplicaContext:
"""Stores data for Serve API calls from within deployments."""
deployment: str
replica_tag: ReplicaTag
_internal_controller_name: str
servable_object: Callable
def _set_internal_replica_context(
deployment: str,
replica_tag: ReplicaTag,
controller_name: str,
servable_object: Callable,
):
global _INTERNAL_REPLICA_CONTEXT
_INTERNAL_REPLICA_CONTEXT = ReplicaContext(
deployment, replica_tag, controller_name, servable_object)
def _ensure_connected(f: Callable) -> Callable:
@wraps(f)
def check(self, *args, **kwargs):
if self._shutdown:
raise RayServeException("Client has already been shut down.")
return f(self, *args, **kwargs)
return check
class Client:
def __init__(self,
controller: ActorHandle,
controller_name: str,
detached: bool = False):
self._controller = controller
self._controller_name = controller_name
self._detached = detached
self._shutdown = False
self._http_config: HTTPOptions = ray.get(
controller.get_http_config.remote())
self._root_url = ray.get(self._controller.get_root_url.remote())
# Each handle has the overhead of long poll client, therefore cached.
self.handle_cache = dict()
self._evicted_handle_keys = set()
# NOTE(edoakes): Need this because the shutdown order isn't guaranteed
# when the interpreter is exiting so we can't rely on __del__ (it
# throws a nasty stacktrace).
if not self._detached:
def shutdown_serve_client():
self.shutdown()
atexit.register(shutdown_serve_client)
@property
def root_url(self):
return self._root_url
def __del__(self):
if not self._detached:
logger.debug("Shutting down Ray Serve because client went out of "
"scope. To prevent this, either keep a reference to "
"the client or use serve.start(detached=True).")
self.shutdown()
def __reduce__(self):
raise RayServeException(("Ray Serve client cannot be serialized."))
def shutdown(self) -> None:
"""Completely shut down the connected Serve instance.
Shuts down all processes and deletes all state associated with the
instance.
"""
if (not self._shutdown) and ray.is_initialized():
for goal_id in ray.get(self._controller.shutdown.remote()):
self._wait_for_goal(goal_id)
ray.kill(self._controller, no_restart=True)
# Wait for the named actor entry gets removed as well.
started = time.time()
while True:
try:
controller_namespace = _get_controller_namespace(
self._detached)
ray.get_actor(
self._controller_name, namespace=controller_namespace)
if time.time() - started > 5:
logger.warning(
"Waited 5s for Serve to shutdown gracefully but "
"the controller is still not cleaned up. "
"You can ignore this warning if you are shutting "
"down the Ray cluster.")
break
except ValueError: # actor name is removed
break
self._shutdown = True
def _wait_for_goal(self,
goal_id: Optional[GoalId],
timeout: Optional[float] = None) -> bool:
if goal_id is None:
return True
ready, _ = ray.wait(
[self._controller.wait_for_goal.remote(goal_id)], timeout=timeout)
# AsyncGoal could return exception if set, ray.get()
# retrieves and throws it to user code explicitly.
if len(ready) == 1:
async_goal_exception = ray.get(ready)[0]
if async_goal_exception is not None:
raise async_goal_exception
return True
else:
return False
@_ensure_connected
def deploy(
self,
name: str,
deployment_def: Union[Callable, Type[Callable], str],
init_args: Tuple[Any],
init_kwargs: Dict[Any, Any],
ray_actor_options: Optional[Dict] = None,
config: Optional[Union[DeploymentConfig, Dict[str, Any]]] = None,
version: Optional[str] = None,
prev_version: Optional[str] = None,
route_prefix: Optional[str] = None,
url: Optional[str] = None,
_blocking: Optional[bool] = True) -> Optional[GoalId]:
if config is None:
config = {}
if ray_actor_options is None:
ray_actor_options = {}
curr_job_env = ray.get_runtime_context().runtime_env
if "runtime_env" in ray_actor_options:
ray_actor_options["runtime_env"].setdefault(
"working_dir", curr_job_env.get("working_dir"))
else:
ray_actor_options["runtime_env"] = curr_job_env
replica_config = ReplicaConfig(
deployment_def,
init_args=init_args,
init_kwargs=init_kwargs,
ray_actor_options=ray_actor_options)
if isinstance(config, dict):
deployment_config = DeploymentConfig.parse_obj(config)
elif isinstance(config, DeploymentConfig):
deployment_config = config
else:
raise TypeError(
"config must be a DeploymentConfig or a dictionary.")
if deployment_config.autoscaling_config is not None and \
deployment_config.max_concurrent_queries < deployment_config. \
autoscaling_config.target_num_ongoing_requests_per_replica:
logger.warning("Autoscaling will never happen, "
"because 'max_concurrent_queries' is less than "
"'target_num_ongoing_requests_per_replica' now.")
goal_id, updating = ray.get(
self._controller.deploy.remote(name,
deployment_config.to_proto_bytes(),
replica_config, version,
prev_version, route_prefix,
ray.get_runtime_context().job_id))
tag = f"component=serve deployment={name}"
if updating:
msg = f"Updating deployment '{name}'"
if version is not None:
msg += f" to version '{version}'"
logger.info(f"{msg}. {tag}")
else:
logger.info(f"Deployment '{name}' is already at version "
f"'{version}', not updating. {tag}")
if _blocking:
self._wait_for_goal(goal_id)
if url is not None:
url_part = f" at `{url}`"
else:
url_part = ""
logger.info(
f"Deployment '{name}{':'+version if version else ''}' is ready"
f"{url_part}. {tag}")
else:
return goal_id
@_ensure_connected
def delete_deployment(self, name: str) -> None:
self._wait_for_goal(
ray.get(self._controller.delete_deployment.remote(name)))
@_ensure_connected
def get_deployment_info(self, name: str) -> Tuple[DeploymentInfo, str]:
return ray.get(self._controller.get_deployment_info.remote(name))
@_ensure_connected
def list_deployments(self) -> Dict[str, Tuple[DeploymentInfo, str]]:
return ray.get(self._controller.list_deployments.remote())
@_ensure_connected
def get_handle(
self,
deployment_name: str,
missing_ok: Optional[bool] = False,
sync: bool = True,
_internal_pickled_http_request: bool = False,
) -> Union[RayServeHandle, RayServeSyncHandle]:
"""Retrieve RayServeHandle for service deployment to invoke it from Python.
Args:
deployment_name (str): A registered service deployment.
missing_ok (bool): If true, then Serve won't check the deployment
is registered. False by default.
sync (bool): If true, then Serve will return a ServeHandle that
works everywhere. Otherwise, Serve will return a ServeHandle
that's only usable in asyncio loop.
Returns:
RayServeHandle
"""
cache_key = (deployment_name, missing_ok, sync)
if cache_key in self.handle_cache:
cached_handle = self.handle_cache[cache_key]
if cached_handle.is_polling and cached_handle.is_same_loop:
return cached_handle
all_endpoints = ray.get(self._controller.get_all_endpoints.remote())
if not missing_ok and deployment_name not in all_endpoints:
raise KeyError(f"Deployment '{deployment_name}' does not exist.")
try:
asyncio_loop_running = asyncio.get_event_loop().is_running()
except RuntimeError as ex:
if "There is no current event loop in thread" in str(ex):
asyncio_loop_running = False
else:
raise ex
if asyncio_loop_running and sync:
logger.warning(
"You are retrieving a sync handle inside an asyncio loop. "
"Try getting client.get_handle(.., sync=False) to get better "
"performance. Learn more at https://docs.ray.io/en/master/"
"serve/http-servehandle.html#sync-and-async-handles")
if not asyncio_loop_running and not sync:
logger.warning(
"You are retrieving an async handle outside an asyncio loop. "
"You should make sure client.get_handle is called inside a "
"running event loop. Or call client.get_handle(.., sync=True) "
"to create sync handle. Learn more at https://docs.ray.io/en/"
"master/serve/http-servehandle.html#sync-and-async-handles")
if sync:
handle = RayServeSyncHandle(
self._controller,
deployment_name,
_internal_pickled_http_request=_internal_pickled_http_request,
)
else:
handle = RayServeHandle(
self._controller,
deployment_name,
_internal_pickled_http_request=_internal_pickled_http_request,
)
self.handle_cache[cache_key] = handle
if cache_key in self._evicted_handle_keys:
logger.warning(
"You just got a ServeHandle that was evicted from internal "
"cache. This means you are getting too many ServeHandles in "
"the same process, this will bring down Serve's performance. "
"Please post a github issue at "
"https://github.com/ray-project/ray/issues to let the Serve "
"team to find workaround for your use case.")
if len(self.handle_cache) > MAX_CACHED_HANDLES:
# Perform random eviction to keep the handle cache from growing
# infinitely. We used use WeakValueDictionary but hit
# https://github.com/ray-project/ray/issues/18980.
evict_key = random.choice(list(self.handle_cache.keys()))
self._evicted_handle_keys.add(evict_key)
self.handle_cache.pop(evict_key)
return handle
@PublicAPI(stability="beta")
def start(
detached: bool = False,
http_options: Optional[Union[dict, HTTPOptions]] = None,
dedicated_cpu: bool = False,
_checkpoint_path: str = DEFAULT_CHECKPOINT_PATH,
**kwargs,
) -> Client:
"""Initialize a serve instance.
By default, the instance will be scoped to the lifetime of the returned
Client object (or when the script exits). If detached is set to True, the
instance will instead persist until serve.shutdown() is called. This is
only relevant if connecting to a long-running Ray cluster (e.g., with
ray.init(address="auto") or ray.init("ray://<remote_addr>")).
Args:
detached (bool): Whether not the instance should be detached from this
script. If set, the instance will live on the Ray cluster until it is
explicitly stopped with serve.shutdown().
http_options (Optional[Dict, serve.HTTPOptions]): Configuration options
for HTTP proxy. You can pass in a dictionary or HTTPOptions object
with fields:
- host(str, None): Host for HTTP servers to listen on. Defaults to
"127.0.0.1". To expose Serve publicly, you probably want to set
this to "0.0.0.0".
- port(int): Port for HTTP server. Defaults to 8000.
- middlewares(list): A list of Starlette middlewares that will be
applied to the HTTP servers in the cluster. Defaults to [].
- location(str, serve.config.DeploymentMode): The deployment
location of HTTP servers:
- "HeadOnly": start one HTTP server on the head node. Serve
assumes the head node is the node you executed serve.start
on. This is the default.
- "EveryNode": start one HTTP server per node.
- "NoServer" or None: disable HTTP server.
- num_cpus (int): The number of CPU cores to reserve for each
internal Serve HTTP proxy actor. Defaults to 0.
dedicated_cpu (bool): Whether to reserve a CPU core for the internal
Serve controller actor. Defaults to False.
"""
http_deprecated_args = ["http_host", "http_port", "http_middlewares"]
for key in http_deprecated_args:
if key in kwargs:
raise ValueError(
f"{key} is deprecated, please use serve.start(http_options="
f'{{"{key}": {kwargs[key]}}}) instead.')
# Initialize ray if needed.
ray.worker.global_worker.filter_logs_by_job = False
if not ray.is_initialized():
ray.init(namespace="serve")
controller_namespace = _get_controller_namespace(detached)
try:
client = _get_global_client()
logger.info("Connecting to existing Serve instance in namespace "
f"'{controller_namespace}'.")
return client
except RayServeException:
pass
if detached:
controller_name = SERVE_CONTROLLER_NAME
else:
controller_name = format_actor_name(get_random_letters(),
SERVE_CONTROLLER_NAME)
if isinstance(http_options, dict):
http_options = HTTPOptions.parse_obj(http_options)
if http_options is None:
http_options = HTTPOptions()
controller = ServeController.options(
num_cpus=(1 if dedicated_cpu else 0),
name=controller_name,
lifetime="detached" if detached else None,
max_restarts=-1,
max_task_retries=-1,
# Pin Serve controller on the head node.
resources={
get_current_node_resource_key(): 0.01
},
namespace=controller_namespace,
max_concurrency=CONTROLLER_MAX_CONCURRENCY,
).remote(
controller_name,
http_options,
_checkpoint_path,
detached=detached,
)
proxy_handles = ray.get(controller.get_http_proxies.remote())
if len(proxy_handles) > 0:
try:
ray.get(
[handle.ready.remote() for handle in proxy_handles.values()],
timeout=HTTP_PROXY_TIMEOUT,
)
except ray.exceptions.GetTimeoutError:
raise TimeoutError(
"HTTP proxies not available after {HTTP_PROXY_TIMEOUT}s.")
client = Client(controller, controller_name, detached=detached)
_set_global_client(client)
logger.info(f"Started{' detached ' if detached else ' '}Serve instance in "
f"namespace '{controller_namespace}'.")
return client
def _connect() -> Client:
"""Connect to an existing Serve instance on this Ray cluster.
If calling from the driver program, the Serve instance on this Ray cluster
must first have been initialized using `serve.start(detached=True)`.
If called from within a replica, this will connect to the same Serve
instance that the replica is running in.
"""
# Initialize ray if needed.
ray.worker.global_worker.filter_logs_by_job = False
if not ray.is_initialized():
ray.init(namespace="serve")
# When running inside of a replica, _INTERNAL_REPLICA_CONTEXT is set to
# ensure that the correct instance is connected to.
if _INTERNAL_REPLICA_CONTEXT is None:
controller_name = SERVE_CONTROLLER_NAME
controller_namespace = _get_controller_namespace(detached=True)
else:
controller_name = _INTERNAL_REPLICA_CONTEXT._internal_controller_name
controller_namespace = _get_controller_namespace(detached=False)
# Try to get serve controller if it exists
try:
controller = ray.get_actor(
controller_name, namespace=controller_namespace)
except ValueError:
raise RayServeException("There is no "
"instance running on this Ray cluster. Please "
"call `serve.start(detached=True) to start "
"one.")
client = Client(controller, controller_name, detached=True)
_set_global_client(client)
return client
@PublicAPI
def shutdown() -> None:
"""Completely shut down the connected Serve instance.
Shuts down all processes and deletes all state associated with the
instance.
"""
if _global_client is None:
return
_get_global_client().shutdown()
_set_global_client(None)
@PublicAPI
def get_replica_context() -> ReplicaContext:
"""If called from a deployment, returns the deployment and replica tag.
A replica tag uniquely identifies a single replica for a Ray Serve
deployment at runtime. Replica tags are of the form
`<deployment_name>#<random letters>`.
Raises:
RayServeException: if not called from within a Ray Serve deployment.
Example:
>>> serve.get_replica_context().deployment # deployment_name
>>> serve.get_replica_context().replica_tag # deployment_name#krcwoa
"""
if _INTERNAL_REPLICA_CONTEXT is None:
raise RayServeException("`serve.get_replica_context()` "
"may only be called from within a "
"Ray Serve deployment.")
return _INTERNAL_REPLICA_CONTEXT
@PublicAPI(stability="beta")
def ingress(app: Union["FastAPI", "APIRouter", Callable]):
"""Mark an ASGI application ingress for Serve.
Args:
app (FastAPI,APIRouter,Starlette,etc): the app or router object serve
as ingress for this deployment. It can be any ASGI compatible
object.
Example:
>>> app = FastAPI()
>>> @serve.deployment
@serve.ingress(app)
class App:
pass
>>> App.deploy()
"""
def decorator(cls):
if not inspect.isclass(cls):
raise ValueError("@serve.ingress must be used with a class.")
if issubclass(cls, collections.abc.Callable):
raise ValueError(
"Class passed to @serve.ingress may not have __call__ method.")
# Sometimes there are decorators on the methods. We want to fix
# the fast api routes here.
if isinstance(app, (FastAPI, APIRouter)):
make_fastapi_class_based_view(app, cls)
# Free the state of the app so subsequent modification won't affect
# this ingress deployment. We don't use copy.copy here to avoid
# recursion issue.
ensure_serialization_context()
frozen_app = cloudpickle.loads(cloudpickle.dumps(app))
class ASGIAppWrapper(cls):
async def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._serve_app = frozen_app
# Use uvicorn's lifespan handling code to properly deal with
# startup and shutdown event.
self._serve_asgi_lifespan = LifespanOn(
Config(self._serve_app, lifespan="on"))
# Replace uvicorn logger with our own.
self._serve_asgi_lifespan.logger = logger
# LifespanOn's logger logs in INFO level thus becomes spammy
# Within this block we temporarily uplevel for cleaner logging
with LoggingContext(
self._serve_asgi_lifespan.logger,
level=logging.WARNING):
await self._serve_asgi_lifespan.startup()
async def __call__(self, request: Request):
sender = ASGIHTTPSender()
await self._serve_app(
request.scope,
request._receive,
sender,
)
return sender.build_starlette_response()
# NOTE: __del__ must be async so that we can run asgi shutdown
# in the same event loop.
async def __del__(self):
# LifespanOn's logger logs in INFO level thus becomes spammy
# Within this block we temporarily uplevel for cleaner logging
with LoggingContext(
self._serve_asgi_lifespan.logger,
level=logging.WARNING):
await self._serve_asgi_lifespan.shutdown()
# Make sure to call user's del method as well.
super_cls = super()
if hasattr(super_cls, "__del__"):
super_cls.__del__()
ASGIAppWrapper.__name__ = cls.__name__
return ASGIAppWrapper
return decorator
@PublicAPI
class Deployment:
def __init__(self,
func_or_class: Callable,
name: str,
config: DeploymentConfig,
version: Optional[str] = None,
prev_version: Optional[str] = None,
init_args: Optional[Tuple[Any]] = None,
init_kwargs: Optional[Tuple[Any]] = None,
route_prefix: Union[str, None, DEFAULT] = DEFAULT.VALUE,
ray_actor_options: Optional[Dict] = None,
_internal=False) -> None:
"""Construct a Deployment. CONSTRUCTOR SHOULDN'T BE USED DIRECTLY.
Deployments should be created, retrieved, and updated using
`@serve.deployment`, `serve.get_deployment`, and `Deployment.options`,
respectively.
"""
if not _internal:
raise RuntimeError(
"The Deployment constructor should not be called "
"directly. Use `@serve.deployment` instead.")
if not callable(func_or_class):
raise TypeError(
"@serve.deployment must be called on a class or function.")
if not isinstance(name, str):
raise TypeError("name must be a string.")
if not (version is None or isinstance(version, str)):
raise TypeError("version must be a string.")
if not (prev_version is None or isinstance(prev_version, str)):
raise TypeError("prev_version must be a string.")
if not (init_args is None or isinstance(init_args, (tuple, list))):
raise TypeError("init_args must be a tuple.")
if not (init_kwargs is None or isinstance(init_kwargs, dict)):
raise TypeError("init_kwargs must be a dict.")
if route_prefix is not DEFAULT.VALUE and route_prefix is not None:
if not isinstance(route_prefix, str):
raise TypeError("route_prefix must be a string.")
if not route_prefix.startswith("/"):
raise ValueError("route_prefix must start with '/'.")
if route_prefix != "/" and route_prefix.endswith("/"):
raise ValueError(
"route_prefix must not end with '/' unless it's the root.")
if "{" in route_prefix or "}" in route_prefix:
raise ValueError("route_prefix may not contain wildcards.")
if not (ray_actor_options is None
or isinstance(ray_actor_options, dict)):
raise TypeError("ray_actor_options must be a dict.")
if init_args is None:
init_args = ()
if init_kwargs is None:
init_kwargs = {}
# TODO(architkulkarni): Enforce that autoscaling_config and
# user-provided num_replicas should be mutually exclusive.
if version is None and config.autoscaling_config is not None:
# TODO(architkulkarni): Remove this restriction.
raise ValueError(
"Currently autoscaling is only supported for "
"versioned deployments. Try @serve.deployment(version=...).")
self._func_or_class = func_or_class
self._name = name
self._version = version
self._prev_version = prev_version
self._config = config
self._init_args = init_args
self._init_kwargs = init_kwargs
self._route_prefix = route_prefix
self._ray_actor_options = ray_actor_options
@property
def name(self) -> str:
"""Unique name of this deployment."""
return self._name
@property
def version(self) -> Optional[str]:
"""Version of this deployment.
If None, will be redeployed every time `.deploy()` is called.
"""
return self._version
@property
def prev_version(self) -> Optional[str]:
"""Existing version of deployment to target.
If prev_version does not match with existing deployment
version, the deployment will fail to be deployed.
"""
return self._prev_version
@property
def func_or_class(self) -> Callable:
"""Underlying class or function that this deployment wraps."""
return self._func_or_class
@property
def num_replicas(self) -> int:
"""Current target number of replicas."""
return self._config.num_replicas
@property
def user_config(self) -> Any:
"""Current dynamic user-provided config options."""
return self._config.user_config
@property
def max_concurrent_queries(self) -> int:
"""Current max outstanding queries from each handle."""
return self._config.max_concurrent_queries
@property
def route_prefix(self) -> Optional[str]:
"""HTTP route prefix that this deployment is exposed under."""
if self._route_prefix is DEFAULT.VALUE:
return f"/{self._name}"
return self._route_prefix
@property
def ray_actor_options(self) -> Optional[Dict]:
"""Actor options such as resources required for each replica."""
return self._ray_actor_options
@property
def init_args(self) -> Tuple[Any]:
"""Positional args passed to the underlying class's constructor."""
return self._init_args
@property
def init_kwargs(self) -> Tuple[Any]:
"""Keyword args passed to the underlying class's constructor."""
return self._init_args
@property
def url(self) -> Optional[str]:
"""Full HTTP url for this deployment."""
if self._route_prefix is None:
# this deployment is not exposed over HTTP
return None
return _get_global_client().root_url + self.route_prefix
def __call__(self):
raise RuntimeError("Deployments cannot be constructed directly. "
"Use `deployment.deploy() instead.`")
@PublicAPI
def deploy(self, *init_args, _blocking=True, **init_kwargs):
"""Deploy or update this deployment.
Args:
init_args (optional): args to pass to the class __init__
method. Not valid if this deployment wraps a function.
init_kwargs (optional): kwargs to pass to the class __init__
method. Not valid if this deployment wraps a function.
"""
if len(init_args) == 0 and self._init_args is not None:
init_args = self._init_args
if len(init_kwargs) == 0 and self._init_kwargs is not None:
init_kwargs = self._init_kwargs
return _get_global_client().deploy(
self._name,
self._func_or_class,
init_args,
init_kwargs,
ray_actor_options=self._ray_actor_options,
config=self._config,
version=self._version,
prev_version=self._prev_version,
route_prefix=self.route_prefix,
url=self.url,
_blocking=_blocking)
@PublicAPI
def delete(self):
"""Delete this deployment."""
return _get_global_client().delete_deployment(self._name)
@PublicAPI
def get_handle(self, sync: Optional[bool] = True
) -> Union[RayServeHandle, RayServeSyncHandle]:
"""Get a ServeHandle to this deployment to invoke it from Python.
Args:
sync (bool): If true, then Serve will return a ServeHandle that
works everywhere. Otherwise, Serve will return an
asyncio-optimized ServeHandle that's only usable in an asyncio
loop.
Returns:
ServeHandle
"""
return _get_global_client().get_handle(
self._name, missing_ok=True, sync=sync)
@PublicAPI
def options(self,
func_or_class: Optional[Callable] = None,
name: Optional[str] = None,
version: Optional[str] = None,
prev_version: Optional[str] = None,
init_args: Optional[Tuple[Any]] = None,
init_kwargs: Optional[Dict[Any, Any]] = None,
route_prefix: Union[str, None, DEFAULT] = DEFAULT.VALUE,
num_replicas: Optional[int] = None,
ray_actor_options: Optional[Dict] = None,
user_config: Optional[Any] = None,
max_concurrent_queries: Optional[int] = None,
_autoscaling_config: Optional[Union[Dict,
AutoscalingConfig]] = None,
_graceful_shutdown_wait_loop_s: Optional[float] = None,
_graceful_shutdown_timeout_s: Optional[float] = None
) -> "Deployment":
"""Return a copy of this deployment with updated options.
Only those options passed in will be updated, all others will remain
unchanged from the existing deployment.
"""
new_config = self._config.copy()
if num_replicas is not None:
new_config.num_replicas = num_replicas
if user_config is not None:
new_config.user_config = user_config
if max_concurrent_queries is not None:
new_config.max_concurrent_queries = max_concurrent_queries
if func_or_class is None:
func_or_class = self._func_or_class
if name is None:
name = self._name
if version is None:
version = self._version
if init_args is None:
init_args = self._init_args
if init_kwargs is None:
init_kwargs = self._init_kwargs
if route_prefix is DEFAULT.VALUE:
# Default is to keep the previous value
route_prefix = self._route_prefix
if ray_actor_options is None:
ray_actor_options = self._ray_actor_options
if _autoscaling_config is not None:
new_config.autoscaling_config = _autoscaling_config
if _graceful_shutdown_wait_loop_s is not None:
new_config.graceful_shutdown_wait_loop_s = (
_graceful_shutdown_wait_loop_s)
if _graceful_shutdown_timeout_s is not None:
new_config.graceful_shutdown_timeout_s = (
_graceful_shutdown_timeout_s)
return Deployment(
func_or_class,
name,
new_config,
version=version,
prev_version=prev_version,
init_args=init_args,
init_kwargs=init_kwargs,
route_prefix=route_prefix,
ray_actor_options=ray_actor_options,
_internal=True,
)
def __eq__(self, other):
return all([
self._name == other._name,
self._version == other._version,
self._config == other._config,
self._init_args == other._init_args,
self._init_kwargs == other._init_kwargs,
# compare route prefix with default value resolved
self.route_prefix == other.route_prefix,
self._ray_actor_options == self._ray_actor_options,
])
def __str__(self):
return (f"Deployment(name={self._name},"
f"version={self._version},"
f"route_prefix={self.route_prefix})")
def __repr__(self):
return str(self)
@overload
def deployment(func_or_class: Callable) -> Deployment:
pass
@overload
def deployment(
name: Optional[str] = None,
version: Optional[str] = None,
prev_version: Optional[str] = None,
num_replicas: Optional[int] = None,
init_args: Optional[Tuple[Any]] = None,
init_kwargs: Optional[Dict[Any, Any]] = None,
ray_actor_options: Optional[Dict] = None,
user_config: Optional[Any] = None,
max_concurrent_queries: Optional[int] = None,
_autoscaling_config: Optional[Union[Dict, AutoscalingConfig]] = None,
_graceful_shutdown_wait_loop_s: Optional[float] = None,
_graceful_shutdown_timeout_s: Optional[float] = None
) -> Callable[[Callable], Deployment]:
pass
@PublicAPI
def deployment(
_func_or_class: Optional[Callable] = None,
name: Optional[str] = None,
version: Optional[str] = None,
prev_version: Optional[str] = None,
num_replicas: Optional[int] = None,
init_args: Optional[Tuple[Any]] = None,
init_kwargs: Optional[Dict[Any, Any]] = None,
route_prefix: Union[str, None, DEFAULT] = DEFAULT.VALUE,
ray_actor_options: Optional[Dict] = None,
user_config: Optional[Any] = None,
max_concurrent_queries: Optional[int] = None,
_autoscaling_config: Optional[Union[Dict, AutoscalingConfig]] = None,
_graceful_shutdown_wait_loop_s: Optional[float] = None,
_graceful_shutdown_timeout_s: Optional[float] = None
) -> Callable[[Callable], Deployment]:
"""Define a Serve deployment.
Args:
name (Optional[str]): Globally-unique name identifying this deployment.
If not provided, the name of the class or function will be used.
version (Optional[str]): Version of the deployment. This is used to
indicate a code change for the deployment; when it is re-deployed
with a version change, a rolling update of the replicas will be
performed. If not provided, every deployment will be treated as a
new version.
prev_version (Optional[str]): Version of the existing deployment which
is used as a precondition for the next deployment. If prev_version
does not match with the existing deployment's version, the
deployment will fail. If not provided, deployment procedure will
not check the existing deployment's version.
num_replicas (Optional[int]): The number of processes to start up that
will handle requests to this deployment. Defaults to 1.
init_args (Optional[Tuple]): Positional args to be passed to the class
constructor when starting up deployment replicas. These can also be
passed when you call `.deploy()` on the returned Deployment.
init_kwargs (Optional[Dict]): Keyword args to be passed to the class
constructor when starting up deployment replicas. These can also be
passed when you call `.deploy()` on the returned Deployment.
route_prefix (Optional[str]): Requests to paths under this HTTP path
prefix will be routed to this deployment. Defaults to '/{name}'.
When set to 'None', no HTTP endpoint will be created.
Routing is done based on longest-prefix match, so if you have
deployment A with a prefix of '/a' and deployment B with a prefix
of '/a/b', requests to '/a', '/a/', and '/a/c' go to A and requests
to '/a/b', '/a/b/', and '/a/b/c' go to B. Routes must not end with
a '/' unless they're the root (just '/'), which acts as a
catch-all.
ray_actor_options (dict): Options to be passed to the Ray actor
constructor such as resource requirements.
user_config (Optional[Any]): [experimental] Config to pass to the
reconfigure method of the deployment. This can be updated
dynamically without changing the version of the deployment and
restarting its replicas. The user_config needs to be hashable to
keep track of updates, so it must only contain hashable types, or
hashable types nested in lists and dictionaries.
max_concurrent_queries (Optional[int]): The maximum number of queries
that will be sent to a replica of this deployment without receiving
a response. Defaults to 100.
Example:
>>> @serve.deployment(name="deployment1", version="v1")
class MyDeployment:
pass
>>> MyDeployment.deploy(*init_args)
>>> MyDeployment.options(num_replicas=2, init_args=init_args).deploy()
Returns:
Deployment
"""
if num_replicas is not None \
and _autoscaling_config is not None:
raise ValueError("Manually setting num_replicas is not allowed when "
"_autoscaling_config is provided.")
config = DeploymentConfig()
if num_replicas is not None:
config.num_replicas = num_replicas
if user_config is not None:
config.user_config = user_config
if max_concurrent_queries is not None:
config.max_concurrent_queries = max_concurrent_queries
if _autoscaling_config is not None:
config.autoscaling_config = _autoscaling_config
if _graceful_shutdown_wait_loop_s is not None:
config.graceful_shutdown_wait_loop_s = _graceful_shutdown_wait_loop_s
if _graceful_shutdown_timeout_s is not None:
config.graceful_shutdown_timeout_s = _graceful_shutdown_timeout_s
def decorator(_func_or_class):
return Deployment(
_func_or_class,
name if name is not None else _func_or_class.__name__,
config,
version=version,
prev_version=prev_version,
init_args=init_args,
init_kwargs=init_kwargs,
route_prefix=route_prefix,
ray_actor_options=ray_actor_options,
_internal=True,
)
# This handles both parametrized and non-parametrized usage of the
# decorator. See the @serve.batch code for more details.
return decorator(_func_or_class) if callable(_func_or_class) else decorator
@PublicAPI
def get_deployment(name: str) -> Deployment:
"""Dynamically fetch a handle to a Deployment object.
This can be used to update and redeploy a deployment without access to
the original definition.
Example:
>>> MyDeployment = serve.get_deployment("name")
>>> MyDeployment.options(num_replicas=10).deploy()
Args:
name(str): name of the deployment. This must have already been
deployed.
Returns:
Deployment
"""
try:
deployment_info, route_prefix = _get_global_client(
).get_deployment_info(name)
except KeyError:
raise KeyError(f"Deployment {name} was not found. "
"Did you call Deployment.deploy()?")
return Deployment(
cloudpickle.loads(
deployment_info.replica_config.serialized_deployment_def),
name,
deployment_info.deployment_config,
version=deployment_info.version,
init_args=deployment_info.replica_config.init_args,
init_kwargs=deployment_info.replica_config.init_kwargs,
route_prefix=route_prefix,
ray_actor_options=deployment_info.replica_config.ray_actor_options,
_internal=True,
)
@PublicAPI
def list_deployments() -> Dict[str, Deployment]:
"""Returns a dictionary of all active deployments.
Dictionary maps deployment name to Deployment objects.
"""
infos = _get_global_client().list_deployments()
deployments = {}
for name, (deployment_info, route_prefix) in infos.items():
deployments[name] = Deployment(
cloudpickle.loads(
deployment_info.replica_config.serialized_deployment_def),
name,
deployment_info.deployment_config,
version=deployment_info.version,
init_args=deployment_info.replica_config.init_args,
init_kwargs=deployment_info.replica_config.init_kwargs,
route_prefix=route_prefix,
ray_actor_options=deployment_info.replica_config.ray_actor_options,
_internal=True,
)
return deployments
| 38.584063 | 83 | 0.623494 |
41a980f4ccf2f9f7ca0d399278915dde01cbde22 | 3,016 | py | Python | src/trial2.py | tkosht/gremlin | 6c3cc8bc3d03c62c7d05662067ef5ccd1d9071b2 | [
"MIT"
] | null | null | null | src/trial2.py | tkosht/gremlin | 6c3cc8bc3d03c62c7d05662067ef5ccd1d9071b2 | [
"MIT"
] | null | null | null | src/trial2.py | tkosht/gremlin | 6c3cc8bc3d03c62c7d05662067ef5ccd1d9071b2 | [
"MIT"
] | null | null | null | from gremlin_python.process.anonymous_traversal import traversal
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
class GremlinProvider(object):
def __init__(self):
self.g = traversal().withRemote(
DriverRemoteConnection("ws://gremlin-server:8182/gremlin", "g")
)
def clear_nodes(self):
self.g.V().drop().iterate()
return self
def add_person(self, name: str):
node = self.g.addV("person").property("name", name).next()
return node
def add_age(self, name: str, age: int):
assert name and 0 <= age <= 150
node = self.g.V().has("person", "name", name).property("age", age).next()
return node
def add_job(self, name: str, job: str):
assert name and job
node = self.g.V().has("person", "name", name).property("job", job).next()
return node
def get_name(self, v):
name = self.g.V(v).values("name").toList()[0]
return name
def get_age(self, v):
age = self.g.V(v).values("age").toList()[0]
return age
def add_knows(self, v1, v2, weight=0.75):
edge = self.g.V(v1).addE("knows").to(v2).property("weight", weight).iterate()
return edge
def find_person_node(self, name: str):
node = self.g.V().has("person", "name", name).next()
return node
def find_person_whom_knows(self, center: str):
whom_knows = self.g.V().has("person", "name", center).out("knows").toList()[0]
return whom_knows
if __name__ == "__main__":
grp = GremlinProvider()
grp.clear_nodes()
# marko を登録
v1 = grp.add_person("marko")
assert grp.get_name(v1) == "marko"
print("v1:", grp.get_name(v1))
# v1: marko
# stephen を登録
v2 = grp.add_person("stephen")
assert grp.get_name(v2) == "stephen"
print("v2:", grp.get_name(v2))
# v2: stephen
# 年齢を追加・更新
grp.add_age("marko", 35) # insert
grp.add_age("marko", 31) # update
grp.add_age("stephen", 32)
# 職業を追加・更新
grp.add_job("marko", "SWE")
grp.add_job("stephen", "SWE")
# リレーション(knows) を追加
e1 = grp.add_knows(v1, v2, 0.1)
print(e1)
# [['V', v[34]], ['addE', 'knows'], ['to', v[35]], ['property', 'weight', 0.1], ['none']]
# marko を検索
marko = grp.find_person_node("marko")
print("marko:", grp.get_name(marko))
# marko: marko
# marko が知っている人を検索
v = grp.find_person_whom_knows("marko")
print("marko knows:", grp.get_name(v))
print("marko knows2:", grp.g.V(v1).outE().inV().values("name").toList()[0])
# marko knows: stephen
# marko knows2: stephen
# ノードオブジェクトから年齢を取得
print("marko.age:", grp.get_age(v1))
assert grp.get_age(v1) == 31
print("stephen.age:", grp.get_age(v2))
assert grp.get_age(v2) == 32
# marko.age: 31
# stephen.age: 32
# 職業が ”SWE" である人物の名前を取得(リスト)
print("SWE:", grp.g.V().has("person", "job", "SWE").values("name").toList())
# SWE: ['marko', 'stephen']
| 29.281553 | 93 | 0.592175 |
901c000bf0d25fb62609b1f0be80c50019605e34 | 651 | py | Python | src/command_modules/azure-cli-profile/azure/cli/command_modules/profile/__init__.py | 0cool321/azure-cli | fd8e6d46d5cee682aff51e262c06bc40c01636ba | [
"MIT"
] | 2 | 2020-07-22T18:53:05.000Z | 2021-09-11T05:52:33.000Z | src/command_modules/azure-cli-profile/azure/cli/command_modules/profile/__init__.py | 0cool321/azure-cli | fd8e6d46d5cee682aff51e262c06bc40c01636ba | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-profile/azure/cli/command_modules/profile/__init__.py | 0cool321/azure-cli | fd8e6d46d5cee682aff51e262c06bc40c01636ba | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import azure.cli.command_modules.profile._help # pylint: disable=unused-import
def load_params(_):
import azure.cli.command_modules.profile._params #pylint: disable=redefined-outer-name
def load_commands():
import azure.cli.command_modules.profile.commands #pylint: disable=redefined-outer-name
| 50.076923 | 94 | 0.56682 |
6a44bfa11953b348acb0a1a7a2026aad01c9d216 | 2,489 | py | Python | services/web/server/src/simcore_service_webserver/cli.py | elisabettai/osparc-simcore | ad7b6e05111b50fe95e49306a992170490a7247f | [
"MIT"
] | null | null | null | services/web/server/src/simcore_service_webserver/cli.py | elisabettai/osparc-simcore | ad7b6e05111b50fe95e49306a992170490a7247f | [
"MIT"
] | 1 | 2021-11-29T13:38:09.000Z | 2021-11-29T13:38:09.000Z | services/web/server/src/simcore_service_webserver/cli.py | mrnicegyu11/osparc-simcore | b6fa6c245dbfbc18cc74a387111a52de9b05d1f4 | [
"MIT"
] | null | null | null | """ Application's command line .
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -msimcore_service_webserver` python will execute
``__main__.py`` as a script. That means there won't be any
``simcore_service_webserver.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``simcore_service_webserver.__main__`` in ``sys.modules``.
"""
import logging
import os
from typing import Dict, Tuple
import typer
from aiohttp import web
from settings_library.utils_cli import create_settings_command
from .application import create_application, run_service
from .application_settings import ApplicationSettings
from .application_settings_utils import convert_to_app_config
from .log import setup_logging
# ptsvd cause issues with ProcessPoolExecutor
# SEE: https://github.com/microsoft/ptvsd/issues/1443
if os.environ.get("SC_BOOT_MODE") == "debug-ptvsd":
import multiprocessing
multiprocessing.set_start_method("spawn", True)
log = logging.getLogger(__name__)
def _setup_app_from_settings(
settings: ApplicationSettings,
) -> Tuple[web.Application, Dict]:
# NOTE: keeping an equivalent config allows us
# to keep some of the code from the previous
# design whose starting point was a validated
# config. E.g. many test fixtures were based on
# given configs and changing those would not have
# a meaningful RoI.
config = convert_to_app_config(settings)
setup_logging(
level=settings.log_level,
slow_duration=settings.AIODEBUG_SLOW_DURATION_SECS,
)
app = create_application()
return (app, config)
async def app_factory() -> web.Application:
"""Created to launch app from gunicorn (see docker/boot.sh)"""
app_settings = ApplicationSettings()
assert app_settings.SC_BUILD_TARGET # nosec
log.info("Application settings: %s", app_settings.json(indent=2, sort_keys=True))
app, _ = _setup_app_from_settings(app_settings)
return app
# CLI -------------
main = typer.Typer(name="simcore-service-webserver")
main.command()(create_settings_command(settings_cls=ApplicationSettings, logger=log))
@main.command()
def run():
app_settings = ApplicationSettings()
app, cfg = _setup_app_from_settings(app_settings)
run_service(app, cfg)
| 29.282353 | 85 | 0.746886 |
6cf4d962335c586d41247dca59535113fd7cf363 | 5,723 | py | Python | tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py | HassanDayoub/tfx | dc9221abbb8dad991d1ae22fb91876da1290efae | [
"Apache-2.0"
] | 2 | 2019-07-08T20:56:13.000Z | 2020-08-04T17:07:26.000Z | tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py | HassanDayoub/tfx | dc9221abbb8dad991d1ae22fb91876da1290efae | [
"Apache-2.0"
] | null | null | null | tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py | HassanDayoub/tfx | dc9221abbb8dad991d1ae22fb91876da1290efae | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import logging
import os
from tfx.components.evaluator.component import Evaluator
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
from tfx.components.example_validator.component import ExampleValidator
from tfx.components.model_validator.component import ModelValidator
from tfx.components.pusher.component import Pusher
from tfx.components.schema_gen.component import SchemaGen
from tfx.components.statistics_gen.component import StatisticsGen
from tfx.components.trainer.component import Trainer
from tfx.components.transform.component import Transform
from tfx.orchestration import pipeline
from tfx.orchestration.airflow.airflow_runner import AirflowDAGRunner
from tfx.proto import evaluator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import csv_input
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data/simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_taxi_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model/taxi_simple')
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines')
_metadata_db_root = os.path.join(_tfx_root, 'metadata')
_log_root = os.path.join(_tfx_root, 'logs')
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
# Logging overrides
logger_overrides = {'log_root': _log_root, 'log_level': logging.INFO}
def _create_pipeline():
"""Implements the chicago taxi pipeline with TFX."""
examples = csv_input(_data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(input_data=example_gen.outputs.examples)
# Generates schema based on statistics files.
infer_schema = SchemaGen(stats=statistics_gen.outputs.output)
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
stats=statistics_gen.outputs.output, schema=infer_schema.outputs.output)
# Performs transformations and feature engineering in training and serving.
transform = Transform(
input_data=example_gen.outputs.examples,
schema=infer_schema.outputs.output,
module_file=_taxi_module_file)
# Uses user-provided Python function that implements a model using TF-Learn.
trainer = Trainer(
module_file=_taxi_module_file,
transformed_examples=transform.outputs.transformed_examples,
schema=infer_schema.outputs.output,
transform_output=transform.outputs.transform_output,
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Uses TFMA to compute a evaluation statistics over features of a model.
model_analyzer = Evaluator(
examples=example_gen.outputs.examples,
model_exports=trainer.outputs.output,
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour'])
]))
# Performs quality validation of a candidate model (compared to a baseline).
model_validator = ModelValidator(
examples=example_gen.outputs.examples, model=trainer.outputs.output)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model_export=trainer.outputs.output,
model_blessing=model_validator.outputs.blessing,
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir)))
return pipeline.Pipeline(
pipeline_name='chicago_taxi_simple',
pipeline_root=_pipeline_root,
components=[
example_gen, statistics_gen, infer_schema, validate_stats, transform,
trainer, model_analyzer, model_validator, pusher
],
enable_cache=True,
metadata_db_root=_metadata_db_root,
additional_pipeline_args={'logger_args': logger_overrides},
)
airflow_pipeline = AirflowDAGRunner(_airflow_config).run(_create_pipeline())
| 42.080882 | 80 | 0.780884 |
509f8aa5ecd05f97ee4e83b78e517b3690e57c0c | 7,730 | py | Python | pypureclient/flasharray/FA_2_9/models/alert.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_9/models/alert.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_9/models/alert.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_9 import models
class Alert(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'actual': 'str',
'category': 'str',
'closed': 'int',
'code': 'int',
'component_name': 'str',
'component_type': 'str',
'created': 'int',
'description': 'str',
'expected': 'str',
'flagged': 'bool',
'issue': 'str',
'knowledge_base_url': 'str',
'notified': 'int',
'severity': 'str',
'state': 'str',
'summary': 'str',
'updated': 'int'
}
attribute_map = {
'id': 'id',
'name': 'name',
'actual': 'actual',
'category': 'category',
'closed': 'closed',
'code': 'code',
'component_name': 'component_name',
'component_type': 'component_type',
'created': 'created',
'description': 'description',
'expected': 'expected',
'flagged': 'flagged',
'issue': 'issue',
'knowledge_base_url': 'knowledge_base_url',
'notified': 'notified',
'severity': 'severity',
'state': 'state',
'summary': 'summary',
'updated': 'updated'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
actual=None, # type: str
category=None, # type: str
closed=None, # type: int
code=None, # type: int
component_name=None, # type: str
component_type=None, # type: str
created=None, # type: int
description=None, # type: str
expected=None, # type: str
flagged=None, # type: bool
issue=None, # type: str
knowledge_base_url=None, # type: str
notified=None, # type: int
severity=None, # type: str
state=None, # type: str
summary=None, # type: str
updated=None, # type: int
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A locally unique, system-generated name. The name cannot be modified.
actual (str): Actual condition at the time the alert is created.
category (str): The category of the alert. Valid values include `array`, `hardware` and `software`.
closed (int): The time the alert was closed in milliseconds since the UNIX epoch.
code (int): The code number of the alert.
component_name (str): The name of the component that generated the alert.
component_type (str): The type of component that generated the alert.
created (int): The time the alert was created in milliseconds since the UNIX epoch.
description (str): A short description of the alert.
expected (str): Expected state or threshold under normal conditions.
flagged (bool): If set to `true`, the message is flagged. Important messages can can be flagged and listed separately.
issue (str): Information about the alert cause.
knowledge_base_url (str): The URL of the relevant knowledge base page.
notified (int): The time the most recent alert notification was sent in milliseconds since the UNIX epoch.
severity (str): The severity level of the alert. Valid values include `info`, `warning`, `critical`, and `hidden`.
state (str): The current state of the alert. Valid values include `open`, `closing`, and `closed`.
summary (str): A summary of the alert.
updated (int): The time the alert was last updated in milliseconds since the UNIX epoch.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if actual is not None:
self.actual = actual
if category is not None:
self.category = category
if closed is not None:
self.closed = closed
if code is not None:
self.code = code
if component_name is not None:
self.component_name = component_name
if component_type is not None:
self.component_type = component_type
if created is not None:
self.created = created
if description is not None:
self.description = description
if expected is not None:
self.expected = expected
if flagged is not None:
self.flagged = flagged
if issue is not None:
self.issue = issue
if knowledge_base_url is not None:
self.knowledge_base_url = knowledge_base_url
if notified is not None:
self.notified = notified
if severity is not None:
self.severity = severity
if state is not None:
self.state = state
if summary is not None:
self.summary = summary
if updated is not None:
self.updated = updated
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Alert`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Alert, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Alert):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.136364 | 130 | 0.557568 |
7196be873262107c9bac44501fc50e6734fd0d98 | 16,074 | py | Python | manila_tempest_tests/tests/api/test_share_group_actions.py | raissaa/manila-tempest-plugin | 3dd627bdec282071450694bf5399d53e9f9abd5b | [
"Apache-2.0"
] | null | null | null | manila_tempest_tests/tests/api/test_share_group_actions.py | raissaa/manila-tempest-plugin | 3dd627bdec282071450694bf5399d53e9f9abd5b | [
"Apache-2.0"
] | null | null | null | manila_tempest_tests/tests/api/test_share_group_actions.py | raissaa/manila-tempest-plugin | 3dd627bdec282071450694bf5399d53e9f9abd5b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2016 Andrew Kerr
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from tempest import config
from tempest.lib.common.utils import data_utils
import testtools
from testtools import testcase as tc
from manila_tempest_tests.common import constants
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
CONF = config.CONF
@testtools.skipUnless(
CONF.share.run_share_group_tests, 'Share Group tests disabled.')
@base.skip_if_microversion_lt(constants.MIN_SHARE_GROUP_MICROVERSION)
@ddt.ddt
class ShareGroupActionsTest(base.BaseSharesTest):
"""Covers share group functionality."""
@classmethod
def resource_setup(cls):
super(ShareGroupActionsTest, cls).resource_setup()
# Create first share group
cls.share_group_name = data_utils.rand_name("tempest-sg-name")
cls.share_group_desc = data_utils.rand_name("tempest-sg-description")
cls.share_group = cls.create_share_group(
name=cls.share_group_name, description=cls.share_group_desc)
# Create second share group for purposes of sorting and snapshot
# filtering
cls.share_group2 = cls.create_share_group(
name=cls.share_group_name, description=cls.share_group_desc)
# Create 2 shares - inside first and second share groups
cls.share_name = data_utils.rand_name("tempest-share-name")
cls.share_desc = data_utils.rand_name("tempest-share-description")
cls.share_size = 1
cls.share_size2 = 2
cls.shares = cls.create_shares([
{'kwargs': {
'name': cls.share_name,
'description': cls.share_desc,
'size': size,
'share_group_id': sg_id,
'experimental': True,
}} for size, sg_id in ((cls.share_size, cls.share_group['id']),
(cls.share_size2, cls.share_group['id']),
(cls.share_size, cls.share_group2['id']))
])
# Create share group snapshots
cls.sg_snap_name = data_utils.rand_name("tempest-sg-snap-name")
cls.sg_snap_desc = data_utils.rand_name("tempest-sg-snap-desc")
cls.sg_snapshot = cls.create_share_group_snapshot_wait_for_active(
cls.share_group["id"],
name=cls.sg_snap_name,
description=cls.sg_snap_desc,
)
cls.sg_snapshot2 = cls.create_share_group_snapshot_wait_for_active(
cls.share_group2['id'],
name=cls.sg_snap_name,
description=cls.sg_snap_desc,
)
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_get_share_group_min_supported_sg_microversion(self):
# Get share group
share_group = self.shares_v2_client.get_share_group(
self.share_group['id'],
version=constants.MIN_SHARE_GROUP_MICROVERSION,
)
# Verify keys
actual_keys = set(share_group.keys())
self.assertTrue(
constants.SHARE_GROUP_DETAIL_REQUIRED_KEYS.issubset(actual_keys),
'Not all required keys returned for share group %s. '
'Expected at least: %s, found %s' % (
share_group['id'],
constants.SHARE_GROUP_DETAIL_REQUIRED_KEYS,
actual_keys))
# Verify values
self.assertEqual(self.share_group_name, share_group["name"])
self.assertEqual(self.share_group_desc, share_group["description"])
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_get_share_min_supported_sg_microversion(self):
# Get share
share = self.shares_v2_client.get_share(
self.shares[0]['id'],
version=constants.MIN_SHARE_GROUP_MICROVERSION,
experimental=True)
# Verify keys
expected_keys = {
"status", "description", "links", "availability_zone",
"created_at", "share_proto", "name", "snapshot_id",
"id", "size", "share_group_id",
}
actual_keys = set(share.keys())
self.assertTrue(
expected_keys.issubset(actual_keys),
'Not all required keys returned for share %s. '
'Expected at least: %s, found %s' % (
share['id'], expected_keys, actual_keys))
# Verify values
self.assertEqual(self.share_name, share["name"])
self.assertEqual(self.share_desc, share["description"])
self.assertEqual(self.share_size, int(share["size"]))
self.assertEqual(self.share_group["id"], share["share_group_id"])
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_list_share_groups_min(self):
# List share groups
share_groups = self.shares_v2_client.list_share_groups(
version=constants.MIN_SHARE_GROUP_MICROVERSION)
# Verify keys
self.assertGreater(len(share_groups), 0)
for sg in share_groups:
keys = set(sg.keys())
self.assertEqual(
constants.SHARE_GROUP_SIMPLE_KEYS,
keys,
'Incorrect keys returned for share group %s. '
'Expected: %s, found %s' % (
sg['id'],
constants.SHARE_GROUP_SIMPLE_KEYS,
','.join(keys)))
# Share group ids are in list exactly once
for sg_id in (self.share_group["id"], self.share_group2["id"]):
gen = [sg["id"] for sg in share_groups if sg["id"] == sg_id]
msg = ("Expected id %s exactly once in share group list" % sg_id)
self.assertEqual(1, len(gen), msg)
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@ddt.data(constants.MIN_SHARE_GROUP_MICROVERSION, '2.36')
def test_list_share_groups_with_detail_min(self, version):
params = None
if utils.is_microversion_ge(version, '2.36'):
params = {'name~': 'tempest', 'description~': 'tempest'}
# List share groups
share_groups = self.shares_v2_client.list_share_groups(
detailed=True, params=params, version=version)
# Verify keys
for sg in share_groups:
keys = set(sg.keys())
self.assertTrue(
constants.SHARE_GROUP_DETAIL_REQUIRED_KEYS.issubset(
keys),
'Not all required keys returned for share group %s. '
'Expected at least: %s, found %s' % (
sg['id'],
constants.SHARE_GROUP_DETAIL_REQUIRED_KEYS,
','.join(keys),
)
)
# Share group ids are in list exactly once
for group_id in (self.share_group["id"], self.share_group2["id"]):
gen = [share_group["id"] for share_group in share_groups
if share_group["id"] == group_id]
msg = ("Expected id %s exactly once in share group list" %
group_id)
self.assertEqual(1, len(gen), msg)
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_filter_shares_by_share_group_id_min(self):
shares = self.shares_v2_client.list_shares(
detailed=True,
params={'share_group_id': self.share_group['id']},
version=constants.MIN_SHARE_GROUP_MICROVERSION,
experimental=True,
)
share_ids = [share['id'] for share in shares]
self.assertEqual(
2, len(shares),
'Incorrect number of shares returned. '
'Expected 2, got %s' % len(shares))
self.assertIn(
self.shares[0]['id'], share_ids,
'Share %s expected in returned list, but got %s' % (
self.shares[0]['id'], share_ids))
self.assertIn(
self.shares[1]['id'], share_ids,
'Share %s expected in returned list, but got %s' % (
self.shares[0]['id'], share_ids))
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_get_share_group_snapshot_min(self):
# Get share group snapshot
sg_snapshot = self.shares_v2_client.get_share_group_snapshot(
self.sg_snapshot['id'],
version=constants.MIN_SHARE_GROUP_MICROVERSION,
)
# Verify keys
actual_keys = set(sg_snapshot.keys())
self.assertTrue(
constants.SHARE_GROUP_SNAPSHOT_DETAIL_REQUIRED_KEYS.issubset(
actual_keys),
'Not all required keys returned for share group %s. '
'Expected at least: %s, found %s' % (
sg_snapshot['id'],
constants.SHARE_GROUP_DETAIL_REQUIRED_KEYS,
actual_keys,
)
)
# Verify values
self.assertEqual(self.sg_snap_name, sg_snapshot["name"])
self.assertEqual(self.sg_snap_desc, sg_snapshot["description"])
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_get_share_group_snapshot_members_min(self):
sg_snapshot = self.shares_v2_client.get_share_group_snapshot(
self.sg_snapshot['id'],
version=constants.MIN_SHARE_GROUP_MICROVERSION,
)
sg_snapshot_members = sg_snapshot['members']
member_share_ids = [m['share_id'] for m in sg_snapshot_members]
self.assertEqual(
2, len(sg_snapshot_members),
'Unexpected number of share group snapshot members. '
'Expected 2, got %s.' % len(sg_snapshot_members))
# Verify each share is represented in the share group snapshot
# appropriately
for share_id in (self.shares[0]['id'], self.shares[1]['id']):
self.assertIn(
share_id, member_share_ids,
'Share missing %s missing from share group '
'snapshot. Found %s.' % (share_id, member_share_ids))
for share in (self.shares[0], self.shares[1]):
for member in sg_snapshot_members:
if share['id'] == member['share_id']:
self.assertEqual(share['size'], member['size'])
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_create_share_group_from_populated_share_group_snapshot_min(self):
sg_snapshot = self.shares_v2_client.get_share_group_snapshot(
self.sg_snapshot['id'],
version=constants.MIN_SHARE_GROUP_MICROVERSION,
)
snapshot_members = sg_snapshot['members']
new_share_group = self.create_share_group(
cleanup_in_class=False,
source_share_group_snapshot_id=self.sg_snapshot['id'],
version=constants.MIN_SHARE_GROUP_MICROVERSION,
)
new_share_group = self.shares_v2_client.get_share_group(
new_share_group['id'],
version=constants.MIN_SHARE_GROUP_MICROVERSION,
)
# Verify that share_network information matches source share group
self.assertEqual(
self.share_group['share_network_id'],
new_share_group['share_network_id'])
new_shares = self.shares_v2_client.list_shares(
params={'share_group_id': new_share_group['id']},
detailed=True,
version=constants.MIN_SHARE_GROUP_MICROVERSION,
experimental=True,
)
# Verify each new share is available
for share in new_shares:
self.assertEqual(
'available', share['status'],
'Share %s is not in available status.' % share['id'])
# Verify each sgsnapshot member is represented in the new sg
# appropriately
share_source_member_ids = [
share['source_share_group_snapshot_member_id']
for share in new_shares]
for member in snapshot_members:
self.assertIn(
member['id'], share_source_member_ids,
'Share group snapshot member %s not represented by '
'share group %s.' % (member['id'], new_share_group['id']))
for share in new_shares:
if (share['source_share_group_snapshot_member_id'] == (
member['id'])):
self.assertEqual(member['size'], share['size'])
self.assertEqual(
self.share_group['share_network_id'],
share['share_network_id'])
@testtools.skipUnless(
CONF.share.run_share_group_tests, 'Share Group tests disabled.')
@base.skip_if_microversion_lt(constants.MIN_SHARE_GROUP_MICROVERSION)
class ShareGroupRenameTest(base.BaseSharesTest):
@classmethod
def resource_setup(cls):
super(ShareGroupRenameTest, cls).resource_setup()
# Create share group
cls.share_group_name = data_utils.rand_name("tempest-sg-name")
cls.share_group_desc = data_utils.rand_name("tempest-sg-description")
cls.share_group = cls.create_share_group(
name=cls.share_group_name,
description=cls.share_group_desc,
)
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_update_share_group_min(self):
# Get share_group
share_group = self.shares_v2_client.get_share_group(
self.share_group['id'],
version=constants.MIN_SHARE_GROUP_MICROVERSION
)
self.assertEqual(self.share_group_name, share_group["name"])
self.assertEqual(self.share_group_desc, share_group["description"])
# Update share_group
new_name = data_utils.rand_name("tempest-new-name")
new_desc = data_utils.rand_name("tempest-new-description")
updated = self.shares_v2_client.update_share_group(
share_group["id"],
name=new_name,
description=new_desc,
version=constants.MIN_SHARE_GROUP_MICROVERSION,
)
self.assertEqual(new_name, updated["name"])
self.assertEqual(new_desc, updated["description"])
# Get share_group
share_group = self.shares_v2_client.get_share_group(
self.share_group['id'],
version=constants.MIN_SHARE_GROUP_MICROVERSION,
)
self.assertEqual(new_name, share_group["name"])
self.assertEqual(new_desc, share_group["description"])
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_create_update_read_share_group_with_unicode_min(self):
value1 = u'ಠ_ಠ'
value2 = u'ಠ_ರೃ'
# Create share_group
share_group = self.create_share_group(
cleanup_in_class=False,
name=value1,
description=value1,
version=constants.MIN_SHARE_GROUP_MICROVERSION,
)
self.assertEqual(value1, share_group["name"])
self.assertEqual(value1, share_group["description"])
# Update share group
updated = self.shares_v2_client.update_share_group(
share_group["id"],
name=value2,
description=value2,
version=constants.MIN_SHARE_GROUP_MICROVERSION,
)
self.assertEqual(value2, updated["name"])
self.assertEqual(value2, updated["description"])
# Get share group
share_group = self.shares_v2_client.get_share_group(
share_group['id'], version=constants.MIN_SHARE_GROUP_MICROVERSION)
self.assertEqual(value2, share_group["name"])
self.assertEqual(value2, share_group["description"])
| 39.985075 | 78 | 0.626291 |
1edcc8373b4e3195a5b88634bcb45964381abcc2 | 19,065 | py | Python | tools/testing/kunit/kunit_tool_test.py | jainsakshi2395/linux | 7ccb860232bb83fb60cd6bcf5aaf0c008d903acb | [
"Linux-OpenIB"
] | null | null | null | tools/testing/kunit/kunit_tool_test.py | jainsakshi2395/linux | 7ccb860232bb83fb60cd6bcf5aaf0c008d903acb | [
"Linux-OpenIB"
] | 1 | 2021-01-27T01:29:47.000Z | 2021-01-27T01:29:47.000Z | tools/testing/kunit/kunit_tool_test.py | jainsakshi2395/linux | 7ccb860232bb83fb60cd6bcf5aaf0c008d903acb | [
"Linux-OpenIB"
] | null | null | null | #!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# A collection of tests for tools/testing/kunit/kunit.py
#
# Copyright (C) 2019, Google LLC.
# Author: Brendan Higgins <brendanhiggins@google.com>
import unittest
from unittest import mock
import tempfile, shutil # Handling test_tmpdir
import itertools
import json
import signal
import os
import kunit_config
import kunit_parser
import kunit_kernel
import kunit_json
import kunit
test_tmpdir = ''
abs_test_data_dir = ''
def setUpModule():
global test_tmpdir, abs_test_data_dir
test_tmpdir = tempfile.mkdtemp()
abs_test_data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'test_data'))
def tearDownModule():
shutil.rmtree(test_tmpdir)
def test_data_path(path):
return os.path.join(abs_test_data_dir, path)
class KconfigTest(unittest.TestCase):
def test_is_subset_of(self):
kconfig0 = kunit_config.Kconfig()
self.assertTrue(kconfig0.is_subset_of(kconfig0))
kconfig1 = kunit_config.Kconfig()
kconfig1.add_entry(kunit_config.KconfigEntry('TEST', 'y'))
self.assertTrue(kconfig1.is_subset_of(kconfig1))
self.assertTrue(kconfig0.is_subset_of(kconfig1))
self.assertFalse(kconfig1.is_subset_of(kconfig0))
def test_read_from_file(self):
kconfig = kunit_config.Kconfig()
kconfig_path = test_data_path('test_read_from_file.kconfig')
kconfig.read_from_file(kconfig_path)
expected_kconfig = kunit_config.Kconfig()
expected_kconfig.add_entry(
kunit_config.KconfigEntry('UML', 'y'))
expected_kconfig.add_entry(
kunit_config.KconfigEntry('MMU', 'y'))
expected_kconfig.add_entry(
kunit_config.KconfigEntry('TEST', 'y'))
expected_kconfig.add_entry(
kunit_config.KconfigEntry('EXAMPLE_TEST', 'y'))
expected_kconfig.add_entry(
kunit_config.KconfigEntry('MK8', 'n'))
self.assertEqual(kconfig.entries(), expected_kconfig.entries())
def test_write_to_file(self):
kconfig_path = os.path.join(test_tmpdir, '.config')
expected_kconfig = kunit_config.Kconfig()
expected_kconfig.add_entry(
kunit_config.KconfigEntry('UML', 'y'))
expected_kconfig.add_entry(
kunit_config.KconfigEntry('MMU', 'y'))
expected_kconfig.add_entry(
kunit_config.KconfigEntry('TEST', 'y'))
expected_kconfig.add_entry(
kunit_config.KconfigEntry('EXAMPLE_TEST', 'y'))
expected_kconfig.add_entry(
kunit_config.KconfigEntry('MK8', 'n'))
expected_kconfig.write_to_file(kconfig_path)
actual_kconfig = kunit_config.Kconfig()
actual_kconfig.read_from_file(kconfig_path)
self.assertEqual(actual_kconfig.entries(),
expected_kconfig.entries())
class KUnitParserTest(unittest.TestCase):
def assertContains(self, needle: str, haystack: kunit_parser.LineStream):
# Clone the iterator so we can print the contents on failure.
copy, backup = itertools.tee(haystack)
for line in copy:
if needle in line:
return
raise AssertionError(f'"{needle}" not found in {list(backup)}!')
def test_output_isolated_correctly(self):
log_path = test_data_path('test_output_isolated_correctly.log')
with open(log_path) as file:
result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
self.assertContains(' # Subtest: example', result)
self.assertContains(' 1..2', result)
self.assertContains(' ok 1 - example_simple_test', result)
self.assertContains(' ok 2 - example_mock_test', result)
self.assertContains('ok 1 - example', result)
def test_output_with_prefix_isolated_correctly(self):
log_path = test_data_path('test_pound_sign.log')
with open(log_path) as file:
result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
self.assertContains(' # Subtest: kunit-resource-test', result)
self.assertContains(' 1..5', result)
self.assertContains(' ok 1 - kunit_resource_test_init_resources', result)
self.assertContains(' ok 2 - kunit_resource_test_alloc_resource', result)
self.assertContains(' ok 3 - kunit_resource_test_destroy_resource', result)
self.assertContains(' foo bar #', result)
self.assertContains(' ok 4 - kunit_resource_test_cleanup_resources', result)
self.assertContains(' ok 5 - kunit_resource_test_proper_free_ordering', result)
self.assertContains('ok 1 - kunit-resource-test', result)
self.assertContains(' foo bar # non-kunit output', result)
self.assertContains(' # Subtest: kunit-try-catch-test', result)
self.assertContains(' 1..2', result)
self.assertContains(' ok 1 - kunit_test_try_catch_successful_try_no_catch',
result)
self.assertContains(' ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
result)
self.assertContains('ok 2 - kunit-try-catch-test', result)
self.assertContains(' # Subtest: string-stream-test', result)
self.assertContains(' 1..3', result)
self.assertContains(' ok 1 - string_stream_test_empty_on_creation', result)
self.assertContains(' ok 2 - string_stream_test_not_empty_after_add', result)
self.assertContains(' ok 3 - string_stream_test_get_string', result)
self.assertContains('ok 3 - string-stream-test', result)
def test_parse_successful_test_log(self):
all_passed_log = test_data_path('test_is_test_passed-all_passed.log')
with open(all_passed_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
def test_parse_failed_test_log(self):
failed_log = test_data_path('test_is_test_passed-failure.log')
with open(failed_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.FAILURE,
result.status)
def test_no_header(self):
empty_log = test_data_path('test_is_test_passed-no_tests_run_no_header.log')
with open(empty_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
self.assertEqual(0, len(result.suites))
self.assertEqual(
kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS,
result.status)
def test_no_tests(self):
empty_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log')
with open(empty_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
self.assertEqual(0, len(result.suites))
self.assertEqual(
kunit_parser.TestStatus.NO_TESTS,
result.status)
def test_no_kunit_output(self):
crash_log = test_data_path('test_insufficient_memory.log')
print_mock = mock.patch('builtins.print').start()
with open(crash_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
print_mock.assert_any_call(StrContains('could not parse test results!'))
print_mock.stop()
file.close()
def test_crashed_test(self):
crashed_log = test_data_path('test_is_test_passed-crash.log')
with open(crashed_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.TEST_CRASHED,
result.status)
def test_skipped_test(self):
skipped_log = test_data_path('test_skip_tests.log')
file = open(skipped_log)
result = kunit_parser.parse_run_tests(file.readlines())
# A skipped test does not fail the whole suite.
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
file.close()
def test_skipped_all_tests(self):
skipped_log = test_data_path('test_skip_all_tests.log')
file = open(skipped_log)
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.SKIPPED,
result.status)
file.close()
def test_ignores_prefix_printk_time(self):
prefix_log = test_data_path('test_config_printk_time.log')
with open(prefix_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
self.assertEqual('kunit-resource-test', result.suites[0].name)
def test_ignores_multiple_prefixes(self):
prefix_log = test_data_path('test_multiple_prefixes.log')
with open(prefix_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
self.assertEqual('kunit-resource-test', result.suites[0].name)
def test_prefix_mixed_kernel_output(self):
mixed_prefix_log = test_data_path('test_interrupted_tap_output.log')
with open(mixed_prefix_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
self.assertEqual('kunit-resource-test', result.suites[0].name)
def test_prefix_poundsign(self):
pound_log = test_data_path('test_pound_sign.log')
with open(pound_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
self.assertEqual('kunit-resource-test', result.suites[0].name)
def test_kernel_panic_end(self):
panic_log = test_data_path('test_kernel_panic_interrupt.log')
with open(panic_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.TEST_CRASHED,
result.status)
self.assertEqual('kunit-resource-test', result.suites[0].name)
def test_pound_no_prefix(self):
pound_log = test_data_path('test_pound_no_prefix.log')
with open(pound_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
self.assertEqual('kunit-resource-test', result.suites[0].name)
class LinuxSourceTreeTest(unittest.TestCase):
def setUp(self):
mock.patch.object(signal, 'signal').start()
self.addCleanup(mock.patch.stopall)
def test_invalid_kunitconfig(self):
with self.assertRaisesRegex(kunit_kernel.ConfigError, 'nonexistent.* does not exist'):
kunit_kernel.LinuxSourceTree('', kunitconfig_path='/nonexistent_file')
def test_valid_kunitconfig(self):
with tempfile.NamedTemporaryFile('wt') as kunitconfig:
tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=kunitconfig.name)
def test_dir_kunitconfig(self):
with tempfile.TemporaryDirectory('') as dir:
with open(os.path.join(dir, '.kunitconfig'), 'w') as f:
pass
tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
# TODO: add more test cases.
class KUnitJsonTest(unittest.TestCase):
def _json_for(self, log_file):
with open(test_data_path(log_file)) as file:
test_result = kunit_parser.parse_run_tests(file)
json_obj = kunit_json.get_json_result(
test_result=test_result,
def_config='kunit_defconfig',
build_dir=None,
json_path='stdout')
return json.loads(json_obj)
def test_failed_test_json(self):
result = self._json_for('test_is_test_passed-failure.log')
self.assertEqual(
{'name': 'example_simple_test', 'status': 'FAIL'},
result["sub_groups"][1]["test_cases"][0])
def test_crashed_test_json(self):
result = self._json_for('test_is_test_passed-crash.log')
self.assertEqual(
{'name': 'example_simple_test', 'status': 'ERROR'},
result["sub_groups"][1]["test_cases"][0])
def test_no_tests_json(self):
result = self._json_for('test_is_test_passed-no_tests_run_with_header.log')
self.assertEqual(0, len(result['sub_groups']))
class StrContains(str):
def __eq__(self, other):
return self in other
class KUnitMainTest(unittest.TestCase):
def setUp(self):
path = test_data_path('test_is_test_passed-all_passed.log')
with open(path) as file:
all_passed_log = file.readlines()
self.print_mock = mock.patch('builtins.print').start()
self.addCleanup(mock.patch.stopall)
self.linux_source_mock = mock.Mock()
self.linux_source_mock.build_reconfig = mock.Mock(return_value=True)
self.linux_source_mock.build_kernel = mock.Mock(return_value=True)
self.linux_source_mock.run_kernel = mock.Mock(return_value=all_passed_log)
def test_config_passes_args_pass(self):
kunit.main(['config', '--build_dir=.kunit'], self.linux_source_mock)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0)
def test_build_passes_args_pass(self):
kunit.main(['build'], self.linux_source_mock)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0)
self.linux_source_mock.build_kernel.assert_called_once_with(False, 8, '.kunit', None)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0)
def test_exec_passes_args_pass(self):
kunit.main(['exec'], self.linux_source_mock)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_passes_args_pass(self):
kunit.main(['run'], self.linux_source_mock)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_exec_passes_args_fail(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
with self.assertRaises(SystemExit) as e:
kunit.main(['exec'], self.linux_source_mock)
self.assertEqual(e.exception.code, 1)
def test_run_passes_args_fail(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
with self.assertRaises(SystemExit) as e:
kunit.main(['run'], self.linux_source_mock)
self.assertEqual(e.exception.code, 1)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.print_mock.assert_any_call(StrContains(' 0 tests run'))
def test_exec_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
kunit.main(['exec', '--raw_output'], self.linux_source_mock)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
def test_run_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
kunit.main(['run', '--raw_output'], self.linux_source_mock)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
def test_run_raw_output_kunit(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
kunit.main(['run', '--raw_output=kunit'], self.linux_source_mock)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
def test_run_raw_output_does_not_take_positional_args(self):
# --raw_output is a string flag, but we don't want it to consume
# any positional arguments, only ones after an '='
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
kunit.main(['run', '--raw_output', 'filter_glob'], self.linux_source_mock)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
def test_exec_timeout(self):
timeout = 3453
kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=timeout)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_timeout(self):
timeout = 3453
kunit.main(['run', '--timeout', str(timeout)], self.linux_source_mock)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=timeout)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_builddir(self):
build_dir = '.kunit'
kunit.main(['run', '--build_dir=.kunit'], self.linux_source_mock)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir=build_dir, filter_glob='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_config_builddir(self):
build_dir = '.kunit'
kunit.main(['config', '--build_dir', build_dir], self.linux_source_mock)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
def test_build_builddir(self):
build_dir = '.kunit'
kunit.main(['build', '--build_dir', build_dir], self.linux_source_mock)
self.linux_source_mock.build_kernel.assert_called_once_with(False, 8, build_dir, None)
def test_exec_builddir(self):
build_dir = '.kunit'
kunit.main(['exec', '--build_dir', build_dir], self.linux_source_mock)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir=build_dir, filter_glob='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
@mock.patch.object(kunit_kernel, 'LinuxSourceTree')
def test_run_kunitconfig(self, mock_linux_init):
mock_linux_init.return_value = self.linux_source_mock
kunit.main(['run', '--kunitconfig=mykunitconfig'])
# Just verify that we parsed and initialized it correctly here.
mock_linux_init.assert_called_once_with('.kunit',
kunitconfig_path='mykunitconfig',
arch='um',
cross_compile=None,
qemu_config_path=None)
@mock.patch.object(kunit_kernel, 'LinuxSourceTree')
def test_config_kunitconfig(self, mock_linux_init):
mock_linux_init.return_value = self.linux_source_mock
kunit.main(['config', '--kunitconfig=mykunitconfig'])
# Just verify that we parsed and initialized it correctly here.
mock_linux_init.assert_called_once_with('.kunit',
kunitconfig_path='mykunitconfig',
arch='um',
cross_compile=None,
qemu_config_path=None)
def test_run_kernel_args(self):
kunit.main(['run', '--kernel_args=a=1', '--kernel_args=b=2'], self.linux_source_mock)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=['a=1','b=2'], build_dir='.kunit', filter_glob='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
if __name__ == '__main__':
unittest.main()
| 38.828921 | 90 | 0.769997 |
bb125a3c8236848728ad9f8f039d2c3973fcda56 | 4,590 | py | Python | src/pyinterp/interpolator/bicubic.py | readthedocs-assistant/pangeo-pyinterp | e9dc18445dce36638d5a90f64c8e2f1b53164f90 | [
"BSD-3-Clause"
] | null | null | null | src/pyinterp/interpolator/bicubic.py | readthedocs-assistant/pangeo-pyinterp | e9dc18445dce36638d5a90f64c8e2f1b53164f90 | [
"BSD-3-Clause"
] | null | null | null | src/pyinterp/interpolator/bicubic.py | readthedocs-assistant/pangeo-pyinterp | e9dc18445dce36638d5a90f64c8e2f1b53164f90 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2022 CNES
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""
Bicubic interpolation
=====================
"""
from typing import Optional, Union
import numpy as np
from .. import core
from .. import grid
from .. import interface
def bicubic(mesh: Union[grid.Grid2D, grid.Grid3D, grid.Grid4D],
x: np.ndarray,
y: np.ndarray,
z: Optional[np.ndarray] = None,
u: Optional[np.ndarray] = None,
nx: Optional[int] = 3,
ny: Optional[int] = 3,
fitting_model: str = "c_spline",
boundary: str = "undef",
bounds_error: bool = False,
num_threads: int = 0) -> np.ndarray:
"""Bicubic gridded interpolator.
Args:
mesh (pyinterp.grid.Grid2D, pyinterp.grid.Grid3D, pyinterp.grid.Grid4D):
Function on a uniform grid to be interpolated. If the grid is a ND
grid, the bicubic interpolation is performed spatially along the X
and Y axes of the ND grid and a linear interpolation are performed
along the other axes between the values obtained by the bicubic
interpolation.
.. warning::
The GSL functions for calculating bicubic functions require
that the axes defined in the grids are strictly increasing.
x (numpy.ndarray): X-values.
y (numpy.ndarray): Y-values.
z (numpy.ndarray, optional): None for a :py:class:`2D Grid
<pyinterp.grid.Grid2D>` otherwise Z-values.
u (numpy.ndarray, optional): None for a :py:class:`2D Grid
<pyinterp.grid.Grid2D>`, :py:class:`3D Grid
<pyinterp.grid.Grid3D>` otherwise U-values.
nx (int, optional): The number of X-coordinate values required to
perform the interpolation. Defaults to ``3``.
ny (int, optional): The number of Y-coordinate values required to
perform the interpolation. Defaults to ``3``.
fitting_model (str, optional): Type of interpolation to be performed.
Supported are ``linear``, ``bicubic``, ``polynomial``, ``c_spline``,
``c_spline_periodic``, ``akima``, ``akima_periodic`` and
``steffen``. Default to ``bicubic``.
boundary (str, optional): A flag indicating how to handle boundaries of
the frame.
* ``expand``: Expand the boundary as a constant.
* ``wrap``: circular boundary conditions.
* ``sym``: Symmetrical boundary conditions.
* ``undef``: Boundary violation is not defined.
Default ``undef``.
bounds_error (bool, optional): If True, when interpolated values are
requested outside of the domain of the input axes (x,y), a
:py:class:`ValueError` is raised. If False, then the value is set to
NaN. Default to ``False``.
num_threads (int, optional): The number of threads to use for the
computation. If 0 all CPUs are used. If 1 is given, no parallel
computing code is used at all, which is useful for debugging.
Defaults to ``0``.
Returns:
numpy.ndarray: Values interpolated.
"""
if not mesh.x.is_ascending():
raise ValueError('X-axis is not increasing')
if not mesh.y.is_ascending():
raise ValueError('Y-axis is not increasing')
if fitting_model not in [
'akima_periodic', 'akima', 'bicubic', 'c_spline_periodic',
'c_spline', 'linear', 'polynomial', 'steffen'
]:
raise ValueError(f"fitting model {fitting_model!r} is not defined")
if boundary not in ['expand', 'wrap', 'sym', 'undef']:
raise ValueError(f"boundary {boundary!r} is not defined")
instance = mesh._instance
function = interface._core_function(
"bicubic" if fitting_model == "bicubic" else "spline", instance)
args = [
instance,
np.asarray(x),
np.asarray(y), nx, ny, fitting_model, boundary, bounds_error,
num_threads
]
if isinstance(mesh, (grid.Grid3D, grid.Grid4D)):
if z is None:
raise ValueError(
f"You must specify the Z-values for a {mesh._DIMENSIONS}D "
"grid.")
args.insert(3, np.asarray(z))
if isinstance(mesh, grid.Grid4D):
if u is None:
raise ValueError("You must specify the U-values for a 4D grid.")
args.insert(4, np.asarray(u))
return getattr(core, function)(*args)
| 41.727273 | 80 | 0.605664 |
8a62d6ff3e14371b4993203eb2df90f6b5891e77 | 15,181 | py | Python | AustinBot/all_cogs/rpgFunctions/database.py | austinmh12/DiscordBots | 55550b68a7ad6423de55e62dbbff93fd88f08ff2 | [
"MIT"
] | null | null | null | AustinBot/all_cogs/rpgFunctions/database.py | austinmh12/DiscordBots | 55550b68a7ad6423de55e62dbbff93fd88f08ff2 | [
"MIT"
] | null | null | null | AustinBot/all_cogs/rpgFunctions/database.py | austinmh12/DiscordBots | 55550b68a7ad6423de55e62dbbff93fd88f08ff2 | [
"MIT"
] | null | null | null | from .. import sql
def initialise_db():
sql('rpg', 'create table players (id integer, guild_id integer, current_character text)')
sql('rpg', '''create table professions (
name text
,primary_stat text
,secondary_stat text
,base_str integer
,base_dex integer
,base_int integer
,base_con integer
,str_mod integer
,dex_mod integer
,int_mod integer
,con_mod integer
,starting_weapon integer
,starting_off_hand integer
,weight text
)'''
)
sql('rpg', '''create table characters (
player_id integer
,player_guild_id integer
,name text
,profession text
,level integer
,exp integer
,gold integer
,helmet integer
,chest integer
,legs integer
,boots integer
,gloves integer
,amulet integer
,ring integer
,weapon integer
,off_hand integer
,current_con integer
,current_area text
,death_timer text
,inventory text
,current_mp integer
,spells text
)'''
)
sql('rpg', '''create table monsters (
name text
,primary_stat text
,secondary_stat text
,min_damage integer
,max_damage integer
,crit_chance integer
,base_str integer
,base_dex integer
,base_int integer
,base_con integer
,str_mod integer
,dex_mod integer
,int_mod integer
,con_mod integer
,base_exp integer
,exp_mod integer
)'''
)
sql('rpg', '''create table equipment (
id integer
,name text
,rarity text
,type text
,level integer
,str_bonus integer
,dex_bonus integer
,int_bonus integer
,con_bonus integer
,def_bonus integer
,atk_bonus integer
,weight text
,defense integer
,min_damage integer
,max_damage integer
,stat text
,crit_chance integer
)'''
)
sql('rpg', 'create table areas (name text, recommended_level integer, monsters text, loot_table text)')
sql('rpg', 'create table spells (name text, profession text, level integer, min_damage integer, max_damage integer, stat text, cost integer)')
sql('rpg', 'create table consumables (id integer, name text, type text, restored integer, stat text, bonus integer)')
sql('rpg', '''insert into professions values
("Warrior","STR","DEX",10,8,5,7,3,2,1,3,1,7,'Heavy')
,("Wizard","INT","",4,5,10,5,1,1,3,1,3,0,'Light')
,("Archer","DEX","",5,9,6,5,1,3,1,2,2,0,'Light')
,("Rogue","DEX","",3,11,7,6,1,3,1,1,4,0,'Medium')
,("Artificer","INT","CON",4,6,8,8,1,2,3,3,3,0,'Medium')
,("Druid","CON","STR",8,7,5,9,3,1,1,3,1,0,'Medium')
,("Paladin","STR","INT",9,4,8,7,3,1,3,2,1,7,'Heavy')
'''
)
sql('rpg', '''insert into equipment values
(1,"Starter Sword","Trash","Sword",1,0,0,0,0,0,0,"",0,1,3,"STR",0.05)
,(2,"Starter Shortbow","Trash","Shortbow",1,0,0,0,0,0,0,"",0,1,3,"DEX",0.05)
,(3,"Starter Wand","Trash","Wand",1,0,0,0,0,0,0,"",0,1,3,"INT",0.05)
,(4,"Starter Dagger","Trash","Dagger",1,0,0,0,0,0,0,"",0,1,3,"DEX",0.1)
,(5,"Dented Platmail","Trash","Chest",1,0,0,0,0,0,0,"Heavy",3,0,0,"",0)
,(6,"Dented Platelegs","Trash","Legs",1,0,0,0,0,0,0,"Heavy",3,0,0,"",0)
,(7,"Cracked Kite Shield","Trash","Shield",1,0,0,0,0,0,0,"Heavy",3,0,0,"",0)
,(8,"Ripped Leather Vest","Trash","Chest",1,0,0,0,0,0,0,"Medium",2,0,0,"",0)
,(9,"Ripped Leather Pants","Trash","Legs",1,0,0,0,0,0,0,"Medium",2,0,0,"",0)
,(10,"Tattered Cloth Shirt","Trash","Chest",1,0,0,0,0,0,0,"Light",1,0,0,"",0)
,(11,"Tattered Cloth Pants","Trash","Legs",1,0,0,0,0,0,0,"Light",1,0,0,"",0)
'''
)
sql('rpg', '''insert into monsters values
('Goblin','STR','',1,3,0.02,1,1,1,1,1,1,1,1,2,2)
,('Rat','DEX','',1,2,0.05,1,2,1,1,1,1,1,1,1,2)
,('Spider','DEX','',1,3,0.01,1,1,1,1,1,2,1,1,2,1)
,('Crab','CON','',1,2,0,1,1,1,3,1,1,1,2,1,1)
,('Mole','CON','',2,3,0,1,1,1,3,1,1,1,2,2,2)
,('Wisp','INT','',2,3,0,1,1,2,1,1,1,2,2,2,2)
,('Ghost','INT','',1,3,0,1,1,1,1,1,1,3,2,2,3)
,('Fly','DEX','',1,2,0,1,1,1,1,1,1,1,1,1,1)
,('Skeleton','STR','',2,4,0,3,2,1,1,2,2,1,1,3,2)
,('Zombie','STR','',1,4,0,3,3,1,3,2,2,1,2,3,3)
,('Ghoul','STR','',2,3,0,2,1,1,4,3,2,1,3,3,2)
,('Bandit','DEX','',2,4,0.1,2,5,2,3,1,1,1,3,4,2)
,('Thief','DEX','',3,4,0.1,4,4,2,4,2,2,1,2,7,3)
,('Imp','DEX','INT',1,5,0.05,1,8,4,1,1,2,2,1,4,2)
,('Guard','STR','',3,6,0.02,7,6,3,5,2,2,1,3,6,4)
,('Duck','CON','',1,2,0,1,1,1,2,1,1,1,2,2,2)
,('Chicken','CON','',1,2,0,1,1,1,3,1,1,1,2,2,2)
,('Bat','DEX','',1,3,0.02,1,3,1,1,1,2,1,1,3,2)
,('Snail','CON','',1,2,0,1,1,1,1,1,1,1,1,1,1)
,('Slime','CON','',2,4,0,1,1,1,4,1,1,1,4,2,3)
,('Scorpion','DEX','',1,4,0.02,1,3,1,1,1,2,1,1,3,1)
,('Lizard','DEX','CON',1,3,0.02,1,3,1,2,1,2,1,2,3,3)
,('Snake','DEX','CON',1,3,0.02,1,3,1,3,1,2,1,1,2,3)
,('Scarab','DEX','CON',1,2,0.05,2,3,1,3,1,3,1,2,4,3)
,('Mummy','STR','CON',3,6,0,5,2,1,5,3,2,1,3,8,5)
,('Firebat','DEX','INT',1,3,0.02,1,5,5,3,1,2,2,2,5,4)
,('Lava Eel','DEX','INT',1,3,0.02,2,4,6,4,1,3,3,3,5,5)
,('Flame Spirit','INT','',5,6,0,1,5,9,4,1,2,4,2,10,8);'''
)
sql('rpg', '''insert into areas values
('Sewer',2,'{"Fly":{"min_level":1,"max_level":2},"Spider":{"min_level":1,"max_level":3},"Rat":{"min_level":1,"max_level":3}}','{"gold":5,"item_chance":0.1,"max_item_count":2,"items":{"Sword":{"rarities":["Trash","Common"],"min_level":1,"max_level":3},"Chest":{"rarities":["Trash","Common"],"min_level":1,"max_level":3},"Boots":{"rarities":["Trash","Common"],"min_level":1,"max_level":3},"Dagger":{"rarities":["Trash","Common"],"min_level":1,"max_level":3},"Helmet":{"rarities":["Trash","Common"],"min_level":1,"max_level":3},"Longsword":{"rarities":["Trash","Common"],"min_level":1,"max_level":3},"Shortbow":{"rarities":["Trash","Common"],"min_level":1,"max_level":3},"Gloves":{"rarities":["Trash","Common"],"min_level":1,"max_level":3},"Wand":{"rarities":["Trash","Common"],"min_level":1,"max_level":3}},"consumables":{"Health":{"min_level":1,"max_level":2}},"unique_items":[]}')
,('Forest',4,'{"Mole":{"min_level":3,"max_level":8},"Spider":{"min_level":2,"max_level":6},"Rat":{"min_level":2,"max_level":6},"Skeleton":{"min_level":3,"max_level":5},"Zombie":{"min_level":2,"max_level":6},"Ghoul":{"min_level":1,"max_level":5},"Imp":{"min_level":1,"max_level":8}}','{"gold":15,"item_chance":0.15,"max_item_count":2,"items":{"Sword":{"rarities":["Trash","Common"],"min_level":2,"max_level":6},"Chest":{"rarities":["Trash","Common"],"min_level":2,"max_level":6},"Boots":{"rarities":["Trash","Common"],"min_level":2,"max_level":6},"Dagger":{"rarities":["Trash","Common"],"min_level":2,"max_level":6},"Helmet":{"rarities":["Trash","Common"],"min_level":2,"max_level":6},"Longsword":{"rarities":["Trash","Common"],"min_level":2,"max_level":6},"Shortbow":{"rarities":["Trash","Common"],"min_level":2,"max_level":6},"Gloves":{"rarities":["Trash","Common"],"min_level":2,"max_level":6},"Wand":{"rarities":["Trash","Common"],"min_level":2,"max_level":6}},"consumables":{"Health":{"min_level":2,"max_level":4},"Mana":{"min_level":2,"max_level":4}},"unique_items":[]}')
,('SideRoads',9,'{"Bandit":{"min_level":6,"max_level":11},"Thief":{"min_level":6,"max_level":11},"Imp":{"min_level":4,"max_level":14},"Goblin":{"min_level":5,"max_level":12},"Guard":{"min_level":7,"max_level":12}}','{"gold":40,"item_chance":0.15,"max_item_count":3,"items":{"Sword":{"rarities":["Common","Uncommon"],"min_level":5,"max_level":12},"Chest":{"rarities":["Common","Uncommon"],"min_level":5,"max_level":12},"Boots":{"rarities":["Common","Uncommon"],"min_level":5,"max_level":12},"Dagger":{"rarities":["Common","Uncommon"],"min_level":5,"max_level":12},"Helmet":{"rarities":["Common","Uncommon"],"min_level":5,"max_level":12},"Longsword":{"rarities":["Common"],"min_level":5,"max_level":12},"Shortbow":{"rarities":["Common","Uncommon"],"min_level":5,"max_level":12},"Gloves":{"rarities":["Common","Uncommon"],"min_level":5,"max_level":12},"Wand":{"rarities":["Common","Uncommon"],"min_level":5,"max_level":12},"Claymore":{"rarities":["Common"],"min_level":5,"max_level":12},"Crossbow":{"rarities":["Common"],"min_level":5,"max_level":12},"Staff":{"rarities":["Common"],"min_level":5,"max_level":12},"Knife":{"rarities":["Common"],"min_level":5,"max_level":12}},"consumables":{"Health":{"min_level":3,"max_level":6},"Mana":{"min_level":3,"max_level":6}},"unique_items":[]}')
,('Village',12,'{"Golbin":{"min_level":9,"max_level":15},"Guard":{"min_level":10,"max_level":16},"Duck":{"min_level":8,"max_level":12},"Chicken":{"min_level":13,"max_level":15},"Thief":{"min_level":9,"max_level":15}}','{"gold":100,"item_chance":0.25,"max_item_count":2,"items":{"Sword":{"rarities":["Common"],"min_level":8,"max_level":14},"Longsword":{"rarities":["Common"],"min_level":8,"max_level":14},"Claymore":{"rarities":["Common"],"min_level":8,"max_level":14},"Shortbow":{"rarities":["Common"],"min_level":8,"max_level":14},"Longbow":{"rarities":["Common"],"min_level":8,"max_level":14},"Crossbow":{"rarities":["Common"],"min_level":8,"max_level":14},"Staff":{"rarities":["Common"],"min_level":8,"max_level":14},"Wand":{"rarities":["Common"],"min_level":8,"max_level":14},"Dagger":{"rarities":["Common"],"min_level":8,"max_level":14},"Knife":{"rarities":["Common"],"min_level":8,"max_level":14},"Helmet":{"rarities":["Common"],"min_level":8,"max_level":14},"Chest":{"rarities":["Common"],"min_level":8,"max_level":14},"Legs":{"rarities":["Common"],"min_level":8,"max_level":14},"Boots":{"rarities":["Common"],"min_level":8,"max_level":14},"Gloves":{"rarities":["Common"],"min_level":8,"max_level":14},"Shield":{"rarities":["Common"],"min_level":8,"max_level":14}},"consumables":{"Health":{"min_level":8,"max_level":14},"Mana":{"min_level":8,"max_level":14}},"unique_items":[]}')
,('Cave',16,'{"Bat":{"min_level":12,"max_level":20},"Snail":{"min_level":11,"max_level":18},"Spider":{"min_level":14,"max_level":18},"Slime":{"min_level":17,"max_level":20}}','{"gold":200,"item_chance":0.15,"max_item_count":2,"items":{"Sword":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Longsword":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Claymore":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Shortbow":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Longbow":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Crossbow":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Staff":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Wand":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Dagger":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Knife":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Helmet":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Chest":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Legs":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Boots":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Gloves":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18},"Shield":{"rarities":["Common","Uncommon"],"min_level":12,"max_level":18}},"consumables":{"Health":{"min_level":12,"max_level":18},"Mana":{"min_level":12,"max_level":18}},"unique_items":[]}')
,('Desert',21,'{"Scorpion":{"min_level":15,"max_level":22},"Lizard":{"min_level":18,"max_level":24},"Snake":{"min_level":18,"max_level":22},"Scarab":{"min_level":16,"max_level":23},"Mummy":{"min_level":20,"max_level":25},"Skeleton":{"min_level":19,"max_level":25},"Crab":{"min_level":19,"max_level":25}}','{"gold":400,"item_chance":0.2,"max_item_count":3,"items":{"Sword":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Longsword":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Claymore":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Shortbow":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Longbow":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Crossbow":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Staff":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Wand":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Dagger":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Knife":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Helmet":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Chest":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Legs":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Boots":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Gloves":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24},"Shield":{"rarities":["Common","Uncommon"],"min_level":17,"max_level":24}},"consumables":{"Health":{"min_level":17,"max_level":24},"Mana":{"min_level":17,"max_level":24}},"unique_items":[]}')
,('Volcano',27,'{"Slime":{"min_level":23,"max_level":29},"Firebat":{"min_level":22,"max_level":27},"Lava Eel":{"min_level":24,"max_level":30},"Flame Spirit":{"min_level":25,"max_level":31}}','{"gold":750,"item_chance":0.1,"max_item_count":1,"items":{"Sword":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Longsword":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Claymore":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Shortbow":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Longbow":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Crossbow":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Staff":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Wand":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Dagger":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Knife":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Helmet":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Chest":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Legs":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Boots":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Gloves":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30},"Shield":{"rarities":["Uncommon","Rare"],"min_level":22,"max_level":30}},"consumables":{"Health":{"min_level":22,"max_level":30},"Mana":{"min_level":22,"max_level":30}},"unique_items":[]}');'''
)
sql('rpg', '''insert into spells values
('Roar','Warrior',3,5,7,'STR',2)
,('Frenzy','Warrior',5,10,11,'STR',5)
,('Stomp','Warrior',8,15,20,'STR',10)
,('Cleave','Warrior',10,25,26,'STR',12)
,('Pinpoint','Archer',2,8,9,'DEX',1)
,('Deadeye','Archer',4,5,18,'DEX',3)
,('Straight Shot','Archer',6,12,20,'DEX',7)
,('Double Shot','Archer',9,30,36,'DEX',15)
,('Firebolt','Wizard',1,2,3,'INT',2)
,('Thunderbolt','Wizard',1,2,3,'INT',2)
,('Icebolt','Wizard',2,3,5,'INT',3)
,('Earthbolt','Wizard',3,5,8,'INT',5)
,('Fire Strike','Wizard',5,8,12,'INT',8)
,('Lightning Strike','Wizard',7,11,17,'INT',10)
,('Ice Strike','Wizard',9,14,21,'INT',13)
,('Earth Strike','Wizard',10,15,23,'INT',15)
,('Sneak','Rogue',2,7,8,'DEX',3)
,('Backstab','Rogue',5,7,12,'DEX',5)
,('Tendon Slash','Rogue',10,25,35,'DEX',10);'''
) | 88.261628 | 1,668 | 0.629603 |
9cf83b6773ebd981ff4d78f3bd2ecd118cc9dfdc | 159 | py | Python | alento_bot/storage_module/formats/config_format.py | alentoghostflame/StupidEveAppraisalBot | a78adb206efd3a4dc41cbbfb45ee0b8b61aea248 | [
"MIT"
] | null | null | null | alento_bot/storage_module/formats/config_format.py | alentoghostflame/StupidEveAppraisalBot | a78adb206efd3a4dc41cbbfb45ee0b8b61aea248 | [
"MIT"
] | null | null | null | alento_bot/storage_module/formats/config_format.py | alentoghostflame/StupidEveAppraisalBot | a78adb206efd3a4dc41cbbfb45ee0b8b61aea248 | [
"MIT"
] | null | null | null | class ConfigData:
def __init__(self):
self.discord_command_prefix = ";"
self.discord_bot_token = ""
self.data_folder_path = "data"
| 26.5 | 41 | 0.63522 |
79b0812a7041ad301e881c180f2f641b25d1d440 | 127 | py | Python | sandbox/ipython/mydemo.py | rboman/progs | c60b4e0487d01ccd007bcba79d1548ebe1685655 | [
"Apache-2.0"
] | 2 | 2021-12-12T13:26:06.000Z | 2022-03-03T16:14:53.000Z | sandbox/ipython/mydemo.py | rboman/progs | c60b4e0487d01ccd007bcba79d1548ebe1685655 | [
"Apache-2.0"
] | 5 | 2019-03-01T07:08:46.000Z | 2019-04-28T07:32:42.000Z | sandbox/ipython/mydemo.py | rboman/progs | c60b4e0487d01ccd007bcba79d1548ebe1685655 | [
"Apache-2.0"
] | 2 | 2017-12-13T13:13:52.000Z | 2019-03-13T20:08:15.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
from IPython.lib.demo import Demo
mydemo = Demo('myscript.py')
#mydemo()
| 10.583333 | 33 | 0.629921 |
01b8d0b1f67ce4439fafa7bbc137ecec5e4d0405 | 8,617 | py | Python | tune_td3.py | clairecw/TD3 | 22840c0ee1620086c2495d859be2c3b7a4c70f3f | [
"MIT"
] | 1 | 2021-04-11T16:52:43.000Z | 2021-04-11T16:52:43.000Z | tune_td3.py | aimless-agents/TD3 | a94d3a2ceba6f4766e1c4ff676c445a0bc418b62 | [
"MIT"
] | null | null | null | tune_td3.py | aimless-agents/TD3 | a94d3a2ceba6f4766e1c4ff676c445a0bc418b62 | [
"MIT"
] | 1 | 2021-02-12T05:01:46.000Z | 2021-02-12T05:01:46.000Z | import numpy as np
import torch
import gym
import argparse
import os
import sys
import pybulletgym
import utils
import TD3
import OurDDPG
import DDPG
import warnings
import ray
from ray import tune
from ray.tune.schedulers import ASHAScheduler
from ray.tune.schedulers import ASHAScheduler
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(policy, env_name, seed, eval_episodes=10):
import pybulletgym
warnings.filterwarnings("ignore")
eval_env = gym.make(env_name)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
def train(
config,
start_timesteps,
max_timesteps,
policy_noise,
expl_noise,
noise_clip,
policy_freq,
batch_size,
seed,
policy,
prioritized_replay,
env_name,
eval_freq,
discount,
tau,
use_rank
):
if prioritized_replay:
alpha = float(config["alpha"])
beta = float(config["beta"])
else:
discount = float(config["discount"])
tau = float(config["tau"])
import pybulletgym
warnings.filterwarnings("ignore")
env = gym.make(env_name)
# Set seeds
env.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"discount": discount,
"tau": tau,
}
# Initialize policy
if policy == "TD3":
# Target policy smoothing is scaled wrt the action scale
kwargs["policy_noise"] = policy_noise * max_action
kwargs["noise_clip"] = noise_clip * max_action
kwargs["policy_freq"] = policy_freq
kwargs["prioritized_replay"] = prioritized_replay
kwargs["use_rank"] = use_rank
policy = TD3.TD3(**kwargs)
elif policy == "OurDDPG":
policy = OurDDPG.DDPG(**kwargs)
elif policy == "DDPG":
policy = DDPG.DDPG(**kwargs)
if prioritized_replay:
replay_buffer = utils.PrioritizedReplayBuffer(state_dim, action_dim, max_timesteps, start_timesteps, alpha=alpha, beta=beta)
else:
replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
# Evaluate untrained policy
evaluations = [eval_policy(policy, env_name, seed)]
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num = 0
for t in range(int(max_timesteps)):
episode_timesteps += 1
# Select action randomly or according to policy
if t < start_timesteps:
action = env.action_space.sample()
else:
action = (
policy.select_action(np.array(state))
+ np.random.normal(0, max_action * expl_noise, size=action_dim)
).clip(-max_action, max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_timesteps < env._max_episode_steps else 0
# Store data in replay buffer
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
# Train agent after collecting sufficient data
if t >= start_timesteps:
policy.train(replay_buffer, batch_size)
if done:
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
# Reset environment
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % eval_freq == 0:
avg_reward = eval_policy(policy, env_name, seed)
tune.report(episode_reward_mean=avg_reward)
evaluations.append(avg_reward)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--policy", default="TD3") # Policy name (TD3, DDPG or OurDDPG)
parser.add_argument("--env", default="HalfCheetah-v2") # OpenAI gym environment name
parser.add_argument("--seed", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--start_timesteps", default=25e3, type=int)# Time steps initial random policy is used
parser.add_argument("--eval_freq", default=5e3, type=int) # How often (time steps) we evaluate
parser.add_argument("--max_timesteps", default=75e4, type=int) # Max time steps to run environment
parser.add_argument("--expl_noise", default=0.1) # Std of Gaussian exploration noise
parser.add_argument("--batch_size", default=256, type=int) # Batch size for both actor and critic
parser.add_argument("--discount", default=0.99, type=float) # Discount factor
parser.add_argument("--tau", default=0.005, type=float) # Target network update rate
parser.add_argument("--beta_step", default=0.008) # Beta annealing step-size (should be 1/max_timesteps)
parser.add_argument("--policy_noise", default=0.2) # Noise added to target policy during critic update
parser.add_argument("--noise_clip", default=0.5) # Range to clip target policy noise
parser.add_argument("--policy_freq", default=2, type=int) # Frequency of delayed policy updates
parser.add_argument("--save_model", action="store_true") # Save model and optimizer parameters
parser.add_argument("--load_model", default="") # Model load file name, "" doesn't load, "default" uses file_name
parser.add_argument("--prioritized_replay", default=False, action='store_true') # Include this flag to use prioritized replay buffer
parser.add_argument("--smoke_test", default=False, action='store_true') # Include this flag to run a smoke test
parser.add_argument("--use_rank", default=False, action="store_true") # Include this flag to use rank-based probabilities
args = parser.parse_args()
print("---------------------------------------")
print(f"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}")
print("---------------------------------------")
if args.prioritized_replay:
config = {
"beta": tune.grid_search([0.3, 0.4, 0.5, 0.6]),
"alpha": tune.grid_search([0.4, 0.5, 0.6, 0.7])
}
else:
config = {
"discount": tune.grid_search([0.995, 0.996, 0.997, 0.998, 0.999]),
"tau": tune.grid_search([1e-5, 5e-4, 1e-4])
}
kwargs = {}
if not args.smoke_test:
kwargs["start_timesteps"] = args.start_timesteps
kwargs["max_timesteps"] = args.max_timesteps
kwargs["eval_freq"] = args.eval_freq
else:
kwargs["start_timesteps"] = 25
kwargs["max_timesteps"] = 75
kwargs["eval_freq"] = 5
kwargs["policy_noise"] = args.policy_noise
kwargs["expl_noise"] = args.expl_noise
kwargs["noise_clip"] = args.noise_clip
kwargs["batch_size"] = args.batch_size
kwargs["policy_freq"] = args.policy_freq
kwargs["seed"] = args.seed
kwargs["policy"] = args.policy
kwargs["prioritized_replay"] = args.prioritized_replay
kwargs["use_rank"] = args.use_rank
kwargs["env_name"] = args.env
kwargs["discount"] = args.discount
kwargs["tau"] = args.tau
result = tune.run(
tune.with_parameters(train, **kwargs),
local_dir=os.path.join(os.getcwd(), "results", "tune_results"),
num_samples=1,
scheduler=ASHAScheduler(metric="episode_reward_mean", mode="max"),
config=config
)
best_trial = result.get_best_trial("episode_reward_mean", "max", "last")
print("best trial: ", best_trial.config)
print("best trial last result: ", best_trial.last_result) | 36.982833 | 139 | 0.628061 |
c3fde8ebaca894ae82bdcc1e6ebef532788af71c | 8,415 | py | Python | python/ccxt/async/nova.py | MoreChickenDelivered/ccxt | a8996798d08a1cedc30bfb3db72d14bc8919930f | [
"MIT"
] | 1 | 2018-07-06T08:14:13.000Z | 2018-07-06T08:14:13.000Z | python/ccxt/async/nova.py | MoreChickenDelivered/ccxt | a8996798d08a1cedc30bfb3db72d14bc8919930f | [
"MIT"
] | null | null | null | python/ccxt/async/nova.py | MoreChickenDelivered/ccxt | a8996798d08a1cedc30bfb3db72d14bc8919930f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
class nova (Exchange):
def describe(self):
return self.deep_extend(super(nova, self).describe(), {
'id': 'nova',
'name': 'Novaexchange',
'countries': ['TZ'], # Tanzania
'rateLimit': 2000,
'version': 'v2',
'has': {
'CORS': False,
'createMarketOrder': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/30518571-78ca0bca-9b8a-11e7-8840-64b83a4a94b2.jpg',
'api': 'https://novaexchange.com/remote',
'www': 'https://novaexchange.com',
'doc': 'https://novaexchange.com/remote/faq',
},
'api': {
'public': {
'get': [
'markets/',
'markets/{basecurrency}/',
'market/info/{pair}/',
'market/orderhistory/{pair}/',
'market/openorders/{pair}/buy/',
'market/openorders/{pair}/sell/',
'market/openorders/{pair}/both/',
'market/openorders/{pair}/{ordertype}/',
],
},
'private': {
'post': [
'getbalances/',
'getbalance/{currency}/',
'getdeposits/',
'getwithdrawals/',
'getnewdepositaddress/{currency}/',
'getdepositaddress/{currency}/',
'myopenorders/',
'myopenorders_market/{pair}/',
'cancelorder/{orderid}/',
'withdraw/{currency}/',
'trade/{pair}/',
'tradehistory/',
'getdeposithistory/',
'getwithdrawalhistory/',
'walletstatus/',
'walletstatus/{currency}/',
],
},
},
})
async def fetch_markets(self):
response = await self.publicGetMarkets()
markets = response['markets']
result = []
for i in range(0, len(markets)):
market = markets[i]
id = market['marketname']
quote, base = id.split('_')
symbol = base + '/' + quote
active = True
if market['disabled']:
active = False
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': active,
'info': market,
})
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
orderbook = await self.publicGetMarketOpenordersPairBoth(self.extend({
'pair': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook, None, 'buyorders', 'sellorders', 'price', 'amount')
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
response = await self.publicGetMarketInfoPair(self.extend({
'pair': self.market_id(symbol),
}, params))
ticker = response['markets'][0]
timestamp = self.milliseconds()
last = self.safe_float(ticker, 'last_price')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high24h'),
'low': self.safe_float(ticker, 'low24h'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': self.safe_float(ticker, 'change24h'),
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': self.safe_float(ticker, 'volume24h'),
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = trade['unix_t_datestamp'] * 1000
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'id': None,
'order': None,
'type': None,
'side': trade['tradetype'].lower(),
'price': self.safe_float(trade, 'price'),
'amount': self.safe_float(trade, 'amount'),
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetMarketOrderhistoryPair(self.extend({
'pair': market['id'],
}, params))
return self.parse_trades(response['items'], market, since, limit)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostGetbalances()
balances = response['balances']
result = {'info': response}
for b in range(0, len(balances)):
balance = balances[b]
currency = balance['currency']
lockbox = float(balance['amount_lockbox'])
trades = float(balance['amount_trades'])
account = {
'free': float(balance['amount']),
'used': self.sum(lockbox, trades),
'total': float(balance['amount_total']),
}
result[currency] = account
return self.parse_balance(result)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
amount = str(amount)
price = str(price)
market = self.market(symbol)
order = {
'tradetype': side.upper(),
'tradeamount': amount,
'tradeprice': price,
'tradebase': 1,
'pair': market['id'],
}
response = await self.privatePostTradePair(self.extend(order, params))
return {
'info': response,
'id': None,
}
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostCancelorder(self.extend({
'orderid': id,
}, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/'
if api == 'private':
url += api + '/'
url += self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
url += '?' + self.urlencode({'nonce': nonce})
signature = self.hmac(self.encode(url), self.encode(self.secret), hashlib.sha512, 'base64')
body = self.urlencode(self.extend({
'apikey': self.apiKey,
'signature': signature,
}, query))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'status' in response:
if response['status'] != 'success':
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 38.076923 | 126 | 0.496019 |
61f7085a335d8b9120325de3b16e3e5faef4471b | 8,382 | py | Python | util/development/old_stats.py | tchlux/util | eff37464c7e913377398025adf76b057f9630b35 | [
"MIT"
] | 4 | 2021-04-22T20:19:40.000Z | 2022-01-30T18:57:23.000Z | util/development/old_stats.py | tchlux/util | eff37464c7e913377398025adf76b057f9630b35 | [
"MIT"
] | 1 | 2022-01-24T14:10:27.000Z | 2022-01-30T16:42:53.000Z | util/development/old_stats.py | tchlux/util | eff37464c7e913377398025adf76b057f9630b35 | [
"MIT"
] | 2 | 2019-05-19T07:44:28.000Z | 2021-04-22T20:20:40.000Z | def modes(data, confidence=.99, tol=1/1000):
from util.optimize import zero
num_samples = len(data)
error = 2*samples(num_samples, confidence=confidence)
cdf = cdf_fit(data, fit="cubic")
print("error: ",error)
# Find all of the zeros of the derivative (mode centers / dividers)
checks = np.linspace(cdf.min, cdf.max, np.ceil(1/tol))
second_deriv = cdf.derivative.derivative
deriv_evals = second_deriv(checks)
modes = [i for i in range(1, len(deriv_evals)) if
(deriv_evals[i-1] * deriv_evals[i] <= 0) and
(deriv_evals[i-1] >= deriv_evals[i])]
antimodes = [i for i in range(1, len(deriv_evals)) if
(deriv_evals[i-1] * deriv_evals[i] <= 0) and
(deriv_evals[i-1] < deriv_evals[i])]
# Compute exact modes and antimodes using a zero-finding function.
modes = [zero(second_deriv, checks[i-1], checks[i]) for i in modes]
antimodes = [zero(second_deriv, checks[i-1], checks[i]) for i in antimodes]
original_antimodes = antimodes[:]
# Fix the bounds of the antimodes to match the distribution.
if modes[0] < antimodes[0]: antimodes = [cdf.min] + antimodes
else: antimodes[0] = cdf.min
if modes[-1] > antimodes[-1]: antimodes += [cdf.max]
else: antimodes[-1] = cdf.max
# Make sure that there is an antimode between each mode.
for i in range(len(modes)):
if antimodes[i] > modes[i]:
# Update the next antimode with this one (as long as it's not the max).
if (i < len(modes)-1):
antimodes[i+1] = (antimodes[i] + antimodes[i+1]) / 2
# Always update this antimode to properly be LESS than the mode.
antimodes[i] = (modes[i] + modes[i-1]) / 2
print("len(modes): ",len(modes))
print("len(antimodes): ",len(antimodes))
# Define a function that counts the number of modes thta are too small.
def count_too_small():
return sum( (cdf(upp) - cdf(low)) < error for (low,upp) in
zip(antimodes[:-1],antimodes[1:]) )
# Show PDF
from util.plot import Plot
p = Plot()
pdf = pdf_fit(cdf.inverse(np.random.random((1000,))))
# Loop until all modes are big enough to be accepted given error tolerance.
step = 1
while count_too_small() > 0:
print()
print("step: ",step, (len(modes), len(antimodes)))
f = len(modes)
p.add_func("PDF", pdf, cdf(), color=p.color(1), frame=f, show_in_legend=(step==1))
# Generate the mode lines.
mode_lines = [[],[]]
for z in modes:
mode_lines[0] += [z,z,None]
mode_lines[1] += [0,.2,None]
p.add("modes", *mode_lines, color=p.color(0), mode="lines",
group="modes", show_in_legend=(z==modes[0] and step==1), frame=f)
# Generate the antimode lines.
anti_lines = [[],[]]
for z in antimodes:
anti_lines[0] += [z,z,None]
anti_lines[1] += [0,.2,None]
p.add("seperator", *anti_lines, color=p.color(3,alpha=.3), mode="lines",
group="seperator", show_in_legend=(z==antimodes[0] and (step==1)), frame=f)
step += 1
# Compute the densities and the sizes of each mode.
sizes = [cdf(antimodes[i+1]) - cdf(antimodes[i])
for i in range(len(modes))]
densities = [(cdf(antimodes[i+1]) - cdf(antimodes[i])) /
(antimodes[i+1] - antimodes[i])
for i in range(len(modes))]
# Compute those modes that have neighbors that are too small.
to_grow = [i for i in range(len(modes))
if (i > 0 and sizes[i-1] < error)
or (i < len(sizes)-1 and sizes[i+1] < error)]
if len(to_grow) == 0: break
print("modes: ",modes)
print("antimodes: ",antimodes)
print("sizes: ",sizes)
print("densities: ",densities)
print("to_grow: ",to_grow)
# Sort the modes to be grown by their size, largest first.
to_grow = sorted(to_grow, key=lambda i: -densities[i])
# Keep track of the modes that have already been absorbed.
preference = {}
taken = set()
conflicts = set()
modes_to_remove = []
anti_to_remove = []
while len(to_grow) > 0:
i = to_grow.pop(0)
# Pick which of the adjacent nodes to absorb.
to_absorb = None
if (i < len(modes)-1) and (sizes[i+1] < error):
direction = 1
to_absorb = i + 1
if (i > 0) and (sizes[i-1] < error):
# If there wasn't a right mode, take the left by default.
if (to_absorb == None):
direction = -1
to_absorb = i - 1
# Otherwise we have to pick based on the density similarity.
elif (abs(modes[i-1]-modes[i]) < abs(modes[i+1]-modes[i])):
# Take the other one if its density is more similar.
direction = -1
to_absorb = i - 1
# If there is no good option to absorb, the skip.
if (to_absorb in preference): continue
# Record the preferred pick of this mode.
preference[i] = (direction, to_absorb)
# If this mode is already absorbed, then add it to conflict list.
if to_absorb in taken: conflicts.add( to_absorb )
# Remove the ability to 'absorb' from modes getting absorbed.
if to_absorb in to_grow: to_grow.remove(to_absorb)
# Add the absorbed value to the set of "taken" modes.
taken.add(to_absorb)
# Resolve conflicts by giving absorbed modes to closer modes.
for i in sorted(conflicts, key=lambda i: -densities[i]):
if (abs(modes[i-1] - modes[i]) < abs(modes[i+1] - modes[i])):
preference.pop(i+1)
else:
preference.pop(i-1)
# Update the boundaries
for i in sorted(preference, key=lambda i: -densities[i]):
direction, to_absorb = preference[i]
# Update the boundary of this mode.
antimodes[i+(direction>0)] = antimodes[to_absorb + (direction>0)]
# Update the "to_remove" lists.
anti_to_remove.append( antimodes[to_absorb + (direction>0)] )
modes_to_remove.append( modes[to_absorb] )
# Remove the modes and antimodes that were merged.
for m in modes_to_remove: modes.remove(m)
for a in anti_to_remove: antimodes.remove(a)
# Update the remaining antimodes to be nearest to the middle
# of the remaining modes (making them representative dividers).
for i in range(len(modes)-1):
middle = (modes[i] + modes[i+1]) / 2
closest = np.argmin([abs(oam - middle) for oam in original_antimodes])
antimodes[i+1] = original_antimodes[closest]
f = len(modes)
p.add_func("PDF", pdf, cdf(), color=p.color(1), frame=f, show_in_legend=(step==1))
# Generate the mode lines.
mode_lines = [[],[]]
for z in modes:
mode_lines[0] += [z,z,None]
mode_lines[1] += [0,.2,None]
p.add("modes", *mode_lines, color=p.color(0), mode="lines",
group="modes", show_in_legend=(z==modes[0] and step==1), frame=f)
# Generate the antimode lines.
anti_lines = [[],[]]
for z in antimodes:
anti_lines[0] += [z,z,None]
anti_lines[1] += [0,.2,None]
p.add("seperator", *anti_lines, color=p.color(3,alpha=.3), mode="lines",
group="seperator", show_in_legend=(z==antimodes[0] and (step==1)), frame=f)
p.show(append=True, y_range=[0,.15])
p = Plot()
p.add_func("CDF", cdf, cdf(), color=p.color(1))
for z in modes:
p.add("modes", [z,z], [0,1], color=p.color(0), mode="lines",
group="modes", show_in_legend=(z==modes[0]))
for z in antimodes:
p.add("seperator", [z,z], [0,1], color=p.color(3), mode="lines",
group="sep", show_in_legend=(z==antimodes[0]))
p.show(append=True)
# ../development/testing/test_stats.py
if __name__ == "__main__":
from util.random import cdf
np.random.seed(1)
data = cdf(nodes=3).inverse(np.random.random(100))
modes(data)
| 44.823529 | 90 | 0.566571 |
7a4cec8879f0ab12b68f4424111b5015e9868ee9 | 27,137 | py | Python | openstates/openstates-master/openstates/az/bills.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/az/bills.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/az/bills.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | import re
from billy.scrape import NoDataForPeriod
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
from . import utils
from .action_utils import get_action_type, get_verbose_action
from lxml import html
BASE_URL = 'http://www.azleg.gov/'
# map to type and official_type (maybe we can figure out PB/PZ and add names
SPONSOR_TYPES = {'P': ('primary', 'P'),
'C': ('cosponsor', 'C'),
'PB': ('primary', 'PB'),
'PZ': ('primary', 'PZ'),
'CZ': ('cosponsor', 'CZ')}
# This string of hot garbage appears when a document hasn't been posted yet.
hot_garbage_404_fail = ('The Requested Document Has Not Been '
'Posted To The Web Site Yet.'
'|There Are No Documents For [A-Z\d]+'
'|The page cannot be displayed because an internal server error has occurred.')
class AZBillScraper(BillScraper):
def accept_response(self, response):
normal = super(AZBillScraper, self).accept_response(response)
return normal or response.status_code == 500
"""
Arizona Bill Scraper.
"""
jurisdiction = 'az'
def get_session_id(self, session):
"""
returns the session id for a given session
"""
return self.metadata['session_details'][session]['session_id']
def scrape_bill(self, chamber, session, bill_id):
"""
Scrapes documents, actions, vote counts and votes for
a given bill.
"""
session_id = self.get_session_id(session)
url = BASE_URL + 'DocumentsForBill.asp?Bill_Number=%s&Session_ID=%s' % (
bill_id.replace(' ', ''), session_id)
docs_for_bill = self.get(url).text
if re.search(hot_garbage_404_fail, docs_for_bill):
# Bailing here will prevent the bill from being saved, which
# occurs in the scrape_actions method below.
return
root = html.fromstring(docs_for_bill)
bill_title = root.xpath(
'//div[@class="ContentPageTitle"]')[1].text.strip()
b_type = utils.get_bill_type(bill_id)
bill = Bill(session, chamber, bill_id, bill_title, type=b_type)
bill.add_source(url)
path = '//tr[contains(td/font/text(), "%s")]'
link_path = '//tr[contains(td/a/@href, "%s")]'
link_path2 = '//tr[contains(td/font/a/@href, "%s")]'
# versions
for href in root.xpath("//a[contains(@href, 'pdf')]"):
version_url = href.attrib['href']
if "bills" in version_url.lower():
name = list(href.getparent().getparent().getparent())
name = name[1].text_content()
bill.add_version(href.text_content(), version_url,
on_duplicate='use_old',
mimetype='application/pdf')
#fact sheets and summary
rows = root.xpath(link_path2 % '/summary/')
for row in rows:
tds = row.xpath('td')
fact_sheet = tds[1].text_content().strip()
fact_sheet_url = tds[1].xpath('string(font/a/@href)') or \
tds[2].xpath('string(font/a/@href)')
bill.add_document(fact_sheet, fact_sheet_url, type="summary")
#agendas
# skipping revised, cancelled, date, time and room from agendas
# but how to get the agenda type cleanly? meaning whether it is
# house or senate?
rows = root.xpath(link_path % '/agendas')
for row in rows:
tds = row.xpath('td')
agenda_committee = tds[0].text_content().strip()
agenda_html = tds[7].xpath('string(a/@href)').strip()
if agenda_html == '':
agenda_html = tds[6].xpath('string(a/@href)').strip()
bill.add_document(agenda_committee, agenda_html)
# House Calendars
# skipping calendar number, modified, date
rows = root.xpath(link_path % '/calendar/h')
for row in rows:
tds = row.xpath('td')
calendar_name = tds[0].text_content().strip()
calendar_html = tds[5].xpath('string(a/@href)')
bill.add_document(calendar_name, calendar_html,
type='house calendar')
# Senate Calendars
# skipping calendar number, modified, date
rows = root.xpath(link_path % '/calendar/s')
for row in rows:
tds = row.xpath('td')
calendar_name = tds[0].text_content().strip()
calendar_html = tds[5].xpath('string(a/@href)')
bill.add_document(calendar_name, calendar_html,
type='senate calendar')
# amendments
rows = root.xpath(path % 'AMENDMENT:')
for row in rows:
tds = row.xpath('td')
amendment_title = tds[1].text_content().strip()
amendment_link = tds[2].xpath('string(font/a/@href)')
if amendment_link == "": #if there's no html link, take the pdf one which is next
amendment_link = tds[3].xpath('string(font/a/@href)')
if amendment_link:
bill.add_document(amendment_title, amendment_link,
type='amendment')
# videos
# http://azleg.granicus.com/MediaPlayer.php?view_id=13&clip_id=7684
rows = root.xpath(link_path % '&clip_id')
for row in rows:
tds = row.xpath('td')
video_title = tds[1].text_content().strip()
video_link = tds[2].xpath('string(a/@href)')
video_date = tds[0].text_content().strip()
bill.add_document(video_title, video_link, date=video_date,
type='video')
self.scrape_actions(chamber, session, bill)
def scrape_actions(self, chamber, session, bill):
"""
Scrape the actions for a given bill
"""
ses_num = utils.legislature_to_number(session)
bill_id = bill['bill_id'].replace(' ', '')
action_url = BASE_URL + 'FormatDocument.asp?inDoc=/legtext/%s/bills/%so.asp' % (ses_num, bill_id.lower())
action_page = self.get(action_url).text
if re.search(hot_garbage_404_fail, action_page):
# This bill has no actions yet, but that
# happened frequently with pre-filed bills
# before the 2013 session, so skipping probably
# isn't the thing to do.
self.save_bill(bill)
return
bill.add_source(action_url)
root = html.fromstring(action_page)
base_table = root.xpath('//table[@class="ContentAreaBackground"]')[0]
# common xpaths
table_path = '//table[contains(tr/td/b/text(), "%s")]'
#sponsors
sponsors = base_table.xpath('//sponsor')
for sponsor in sponsors:
name = sponsor.text.strip()
# sponsor.xpath('string(ancestor::td[1]/following-sibling::td[1]/text())').strip()
s_type = sponsor.getparent().getparent().getnext().text_content().strip()
s_type, o_type = SPONSOR_TYPES[s_type]
bill.add_sponsor(s_type, name, official_type=o_type)
#titles
table = base_table.xpath(table_path % 'TITLE')
if table:
for row in table[0].iterchildren('tr'):
title = row[1].text_content().strip()
if title != bill['title']:
bill.add_title(title)
for table in base_table.xpath('tr/td/table'):
action = table.xpath('string(tr[1]/td[1])').strip()
if action == '':
action = table.xpath('string(tr[1])').strip()
if (action.endswith('FIRST READ:') or
action.endswith('SECOND READ:') or 'WAIVED' in action):
rows = table.xpath('tr')
for row in rows:
action = row[0].text_content().strip()[:-1]
actor = 'lower' if action.startswith('H') else 'upper'
date = utils.get_date(row[1])
# bill:introduced
if (action.endswith('FIRST READ') or
action.endswith('FIRST WAIVED')):
if actor == chamber:
a_type = ['bill:introduced', 'bill:reading:1']
else:
a_type = 'bill:reading:1'
bill.add_action(actor, action, date, type=a_type)
else:
a_type = 'bill:reading:2'
bill.add_action(actor, action, date, type=a_type)
continue
elif action == 'COMMITTEES:':
# committee assignments
rows = table.xpath('tr')[1:]
for row in rows:
# First add the committee assigned action
meta_tag = row.xpath('.//meta')[0]
h_or_s = meta_tag.get('name')[0] # @name is HCOMMITTEE OR SCOMMITTEE
committee = meta_tag.get('content') # @content is committee abbrv
#actor is house or senate referring the bill to committee
actor = 'lower' if h_or_s.lower() == 'h' else 'upper'
act = 'assigned to committee: ' + \
utils.get_committee_name(committee, actor)
date = utils.get_date(row[1])
bill.add_action(actor, act, date, type='committee:referred')
# now lets see if there is a vote
vote_url = row[0].xpath('string(a/@href)')
if vote_url:
date = utils.get_date(row[3])
try:
act = row[5].text_content().strip()
except IndexError:
#not sure what to do if action is not specified
#skipping and throwing a warning for now
self.logger.warning("Vote has no action, skipping.")
continue
a_type = get_action_type(act, 'COMMITTEES:')
act = get_verbose_action(act)
bill.add_action(actor,
utils.get_committee_name(committee, actor) + ":" + act,
date, type=a_type, abbrv=committee)
self.scrape_votes(actor, vote_url, bill, date,
motion='committee: ' + act,
committees=committee,
type='other')
elif len(row) == 5:
# probably senate rules committee
date = utils.get_date(row[3])
if date == '':
date = utils.get_date(row[1])
act = row[4].text_content().strip()
a_type = get_action_type(act, 'COMMITTEES:')
act = get_verbose_action(act)
bill.add_action(actor,
utils.get_committee_name(
committee, actor) +
":" + act, date,
type=a_type, abbrv=committee)
continue
elif 'CAUCUS' in action:
rows = table.xpath('tr')[0:2]
for row in rows:
actor = utils.get_actor(row, chamber)
action = row[0].text_content().strip()
if action.endswith(':'):
action = action[:-1]
if len(row) != 3:
self.warning('skipping row: %s' %
row.text_content())
continue
result = row[2].text_content().strip()
# majority caucus Y|N
action = action + " recommends to concur: " + result
date = utils.get_date(row[1])
bill.add_action(actor, action, date, concur=result,
type='other')
continue
# transmit to house or senate
elif 'TRANSMIT TO' in action:
rows = table.xpath('tr')
for row in rows:
action = row[0].text_content().strip()[:-1]
actor = 'upper' if action.endswith('HOUSE') else 'lower'
date = utils.get_date(row[1])
bill.add_action(actor, action, date, type='other')
continue
# Committee of the whole actions
elif 'COW ACTION' in action:
rows = table.xpath('tr')
actor = utils.get_actor(rows[0], chamber)
if 'SIT COW ACTION' in action:
act = rows[0][-1].text_content().strip()
date = utils.get_date(rows[0][1])
else:
act = rows[1][2].text_content().strip()
date = utils.get_date(rows[1][1])
action = action + " " + get_verbose_action(act) # COW ACTION 1 DPA
bill.add_action(actor, action, date, type='other')
if rows[1][0].text_content().strip() == 'Vote Detail':
vote_url = rows[1][0].xpath('string(a/@href)')
self.scrape_votes(actor, vote_url, bill, date,
motion=action, type='other',
extra=act)
continue
# AMENDMENTS
elif 'AMENDMENTS' in action:
rows = table.xpath('tr')[1:]
for row in rows:
act = row.text_content().strip()
if act == '':
continue
if 'passed' in act or 'adopted' in act:
a_type = 'amendment:passed'
elif 'failed' in act:
a_type = 'amendment:failed'
elif 'withdrawn' in act:
a_type = 'amendment:withdrawn'
else:
a_type = 'other'
# actor and date will same as previous action
bill.add_action(actor, act, date, type=a_type)
continue
# CONFERENCE COMMITTEE
# http://www.azleg.gov/FormatDocument.asp?inDoc=/legtext/49Leg/2r/bills/hb2083o.asp
# MISCELLANEOUS MOTION
# MOTION TO RECONSIDER
elif action == 'MOTION TO RECONSIDER:':
date = utils.get_date(table[1][1])
if date:
if table[1][0].text_content().strip() == 'Vote Detail':
vote_url = table[1][0].xpath('string(a/@href)')
bill.add_action(actor, action, date, type=a_type)
self.scrape_votes(actor, vote_url, bill, date,
motion='motion to reconsider',
type='other')
else:
action = table[-1][1].text_content().strip()
bill.add_action(actor, action, date, type='other')
continue
elif (action.endswith('FINAL READ:') or
action.endswith('THIRD READ:')):
# house|senate final and third read
rows = table.xpath('tr')
# need to find out if third read took place in house or senate
# if an ancestor table contains 'TRANSMIT TO' then the action
# is taking place in that chamber, else it is in chamber
actor = utils.get_actor(rows[0], chamber)
# get a dict of keys from the header and values from the row
k_rows = utils.get_rows(rows[1:], rows[0])
action = rows[0][0].text_content().strip()
for row in k_rows:
a_type = [get_action_type(action, 'Generic')]
if row[action].text_content().strip() == 'Vote Detail':
vote_url = row.pop(action).xpath('string(a/@href)')
vote_date = utils.get_date(row.pop('DATE'))
try:
passed = row.pop('RESULT').text_content().strip()
except KeyError:
passed = row.pop('2/3 VOTE').text_content().strip()
# leaves vote counts, ammended, emergency, two-thirds
# and possibly rfe left in k_rows. get the vote counts
# from scrape votes and pass ammended and emergency
# as kwargs to sort them in scrap_votes
pass_fail = {'PASSED': 'bill:passed',
'FAILED': 'bill:failed'}[passed]
a_type.append(pass_fail)
bill.add_action(actor, action, vote_date,
type=a_type)
row['type'] = 'passage'
self.scrape_votes(actor, vote_url, bill, vote_date,
passed=passed, motion=action,
**row)
else:
date = utils.get_date(row.pop('DATE'))
if date:
bill.add_action(actor, action, date, type=a_type)
continue
elif 'TRANSMITTED TO' in action:
# transmitted to Governor or secretary of the state
# SoS if it goes to voters as a proposition and memorials, etc
rows = table.xpath('tr')
actor = utils.get_actor(rows[0], chamber)
# actor is the actor from the previous statement because it is
# never transmitted to G or S without third or final read
sent_to = rows[0][1].text_content().strip()
date = utils.get_date(rows[0][2])
a_type = 'governor:received' if sent_to[0] == 'G' else 'other'
bill.add_action(actor, "TRANSMITTED TO " + sent_to, date,
type=a_type)
# See if the actor is the governor and whether he signed
# the bill or vetoed it
act, date, chapter, version = '', '', '', ''
for row in rows[1:]:
if row[0].text_content().strip() == 'ACTION:':
act = row[1].text_content().strip()
date = utils.get_date(row[2])
elif row[0].text_content().strip() == 'CHAPTER:':
chapter = row[1].text_content().strip()
elif row[0].text_content().strip() == 'CHAPTERED VERSION:':
version = row[1].text_content().strip()
elif row[0].text_content().strip() == 'TRANSMITTED VERSION:':
version = row[1].text_content().strip()
if act and sent_to == 'GOVERNOR':
a_type = 'governor:signed' if act == 'SIGNED' else 'governor:vetoed'
if chapter:
bill.add_action(sent_to.lower(), act, date,
type=a_type, chapter=chapter,
chaptered_version=version)
else:
bill.add_action(sent_to.lower(), act, date,
type=a_type)
continue
# this is probably only important for historical legislation
elif 'FINAL DISPOSITION' in action:
rows = table.xpath('tr')
if rows:
disposition = rows[0][1].text_content().strip()
bill['final_disposition'] = disposition
bill = self.sort_bill_actions(bill)
self.save_bill(bill)
def scrape(self, chamber, session):
try:
session_id = self.get_session_id(session)
except KeyError:
raise NoDataForPeriod(session)
view = {'lower':'allhouse', 'upper':'allsenate'}[chamber]
url = BASE_URL + 'Bills.asp?view=%s&Session_ID=%s' % (view, session_id)
bills_index = self.get(url).text
root = html.fromstring(bills_index)
bill_links = root.xpath('//div/table/tr[3]/td[4]/table/tr/td/' +
'table[2]/tr[2]/td/table/tr/td[2]/table/tr/td//a')
for link in bill_links:
bill_id = link.text.strip()
bill_id = " ".join(re.split('([A-Z]*)([0-9]*)', bill_id)).strip()
self.scrape_bill(chamber, session, bill_id)
def scrape_votes(self, chamber, url, bill, date, **kwargs):
"""
Scrapes the votes from a vote detail page with the legislator's names
this handles all of the votes and expects the following keyword
arguments: motion
an Arizona Vote object will have the following additional fields:
additional vote counts:
+not_voting, +excused, +absent, +present
additional vote lists
+NV, +EX, +AB, +P
this depends on the chamber and the committee
"""
o_args = {}
passed = '' # to test if we need to compare vote counts later
v_type = kwargs.pop('type')
if 'passed' in kwargs:
passed = {'PASSED': True, 'FAILED': False}[kwargs.pop('passed')]
if 'AMEND' in kwargs:
o_args['amended'] = kwargs.pop('AMEND').text_content().strip()
if 'motion' in kwargs:
motion = kwargs.pop('motion')
if 'EMER' in kwargs and kwargs['EMER'].text_content().strip():
o_args['EMER'] = kwargs.pop('EMER').text_content().strip()
if '2/3 VOTE' in kwargs and kwargs['2/3 VOTE'].text_content().strip():
o_args['2/3 VOTE'] = kwargs.pop('2/3 VOTE').text_content().strip()
if 'committee' in kwargs:
o_args['committee'] = utils.get_committee_name(kwargs.pop('committee'),
chamber)
if 'committees' in kwargs:
o_args['committee'] = utils.get_committee_name(kwargs.pop('committees'),
chamber)
vote_page = self.get(url).text
root = html.fromstring(vote_page)
vote_table = root.xpath('/html/body/div/table/tr[3]/td[4]/table/tr/td/table/tr/td/table')[0]
vote_count = vote_table.xpath('following-sibling::p/following-sibling::text()')
vote_string = vote_count[0].replace(u'\xa0', '').strip()
v_count = re.compile(r'\b[A-Z]*\s*[A-z]*:\s\d*')
v_list = v_count.findall(vote_string)
o_count = 0
for x in v_list:
k, v = x.split(':')
# make NOT VOTING not_voting
k = k.strip().replace(' ', '_').lower()
v = int(v.strip())
if k == 'ayes':
yes_count = int(v)
elif k == 'nays':
no_count = int(v)
else:
o_args.update({str(k):v})
o_count += int(v)
if passed == '':
passed = yes_count > no_count
if ('committee' not in o_args) and ('committees' not in o_args):
if chamber == 'upper' and passed:
if 'EMER' in o_args or '2/3 VOTE' in o_args:
passed = yes_count > 20
else:
passed = yes_count > 16
elif chamber == 'lower' and passed:
if 'EMER' in o_args or '2/3 VOTE' in o_args:
passed = yes_count > 40
else:
passed = yes_count > 31
vote = Vote(chamber, date, motion, passed, yes_count, no_count,
o_count, type=v_type, **o_args)
vote.add_source(url)
# grab all the tables descendant tds
tds = vote_table.xpath('descendant::td')
# pair 'em up
matched = [ tds[y:y+2] for y in range(0, len(tds), 2) ]
for name, v in iter(matched):
v = v.text_content().strip()
name = name.text_content().strip()
if name == 'Member Name':
continue
if v == 'Y':
vote.yes(name)
elif v == 'N':
vote.no(name)
else:
if v in vote:
vote[v].append(name)
else:
vote[v] = [name]
vote.other(name)
# Warn if the stated other_vote count doesn't add up.
if vote['other_count'] != len(vote['other_votes']):
self.warning("Other votes count on webpage didn't match "
"len(other_votes)...using length instead.")
vote['other_count'] = len(vote['other_votes'])
bill.add_vote(vote)
def sort_bill_actions(self, bill):
actions = bill['actions']
actions_list = []
out_of_order = []
new_list = []
if not actions:
return bill
action_date = actions[0]['date']
actions[0]['action'] = actions[0]['action'].lower()
actions_list.append(actions[0])
# seperate the actions that are out of order
for action in actions[1:]:
if action['date'] < action_date:
out_of_order.append(action)
else:
actions_list.append(action)
action_date = action['date']
action['action'] = action['action'].lower()
action_date = actions_list[0]['date']
for action in actions_list:
# this takes care of the actions in beween
for act in out_of_order:
if act['date'] < action_date:
o_index = out_of_order.index(act)
new_list.append(out_of_order.pop(o_index))
if act['date'] >= action_date and act['date'] < action['date']:
o_index = out_of_order.index(act)
new_list.append(out_of_order.pop(o_index))
new_list.append(action)
for act in out_of_order:
if act['date'] == action['date']:
o_index = out_of_order.index(act)
new_list.append(out_of_order.pop(o_index))
if out_of_order != []:
self.log("Unable to sort " + bill['bill_id'])
return bill
else:
bill['actions'] = new_list
return bill
| 46.627148 | 113 | 0.496297 |
d7331556d804e523600572a049ea3202458eb9e9 | 1,642 | py | Python | msgbox/main.py | paolo-losi/msgbox | d7a61761ba6606f8ac0fd25429ff94a88811a629 | [
"MIT"
] | null | null | null | msgbox/main.py | paolo-losi/msgbox | d7a61761ba6606f8ac0fd25429ff94a88811a629 | [
"MIT"
] | null | null | null | msgbox/main.py | paolo-losi/msgbox | d7a61761ba6606f8ac0fd25429ff94a88811a629 | [
"MIT"
] | 1 | 2021-08-12T20:10:20.000Z | 2021-08-12T20:10:20.000Z | import argparse
import logging
import signal
import sys
import tornado.ioloop
from msgbox import logger
from msgbox.http import http_server_manager, http_client_manager
from msgbox.serial import SerialPortManager
from msgbox.sim import sim_manager
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="log at debug level",
action='store_true')
parser.add_argument("--usb-only", help="manage usb modems only",
action='store_true')
def finalize_shutdown():
http_client_manager.stop()
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(ioloop.stop)
def main():
args = parser.parse_args()
level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=level,
format='[%(levelname)1.1s %(asctime)s] '
#'%(name)-15s '
'%(threadName)-20s '
'%(message)s')
serial_manager = SerialPortManager(args.usb_only)
http_client_manager.start()
sim_manager.start()
serial_manager.start()
http_server_manager.start()
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt, SystemExit:
pass
except Exception:
logger.error('error trapped', exc_info=True)
finally:
logger.info("shutting down ...")
http_server_manager.stop()
serial_manager.stop()
sim_manager.stop(finalize_shutdown)
signal.signal(signal.SIGTERM, lambda sig, frame: sys.exit(0))
if __name__ == '__main__':
main()
| 26.483871 | 64 | 0.633983 |
260b66a7be8cd9ab8997da609ca6519967540d06 | 2,458 | py | Python | tests/selectors/test_selector.py | ringwraith/zvt | ff5844ff7991132bbf38d464f29f461dba5efa14 | [
"MIT"
] | 1 | 2019-08-24T02:26:51.000Z | 2019-08-24T02:26:51.000Z | tests/selectors/test_selector.py | ringwraith/zvt | ff5844ff7991132bbf38d464f29f461dba5efa14 | [
"MIT"
] | null | null | null | tests/selectors/test_selector.py | ringwraith/zvt | ff5844ff7991132bbf38d464f29f461dba5efa14 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from zvt.factors.technical_factor import CrossMaFactor
from ..context import init_context
init_context()
from zvt.domain import SecurityType, TradingLevel, Provider
from zvt.selectors.technical_selector import TechnicalSelector
from zvt.selectors.selector import TargetSelector
def test_cross_ma_selector():
security_list = ['stock_sz_000338']
security_type = 'stock'
start_timestamp = '2018-01-01'
end_timestamp = '2019-06-30'
my_selector = TargetSelector(security_list=security_list,
security_type=security_type,
start_timestamp=start_timestamp,
end_timestamp=end_timestamp)
# add the factors
my_selector \
.add_filter_factor(CrossMaFactor(security_list=security_list,
security_type=security_type,
start_timestamp=start_timestamp,
end_timestamp=end_timestamp,
level=TradingLevel.LEVEL_1DAY))
my_selector.run()
print(my_selector.open_long_df)
print(my_selector.open_short_df)
assert 'stock_sz_000338' in my_selector.get_open_short_targets('2018-01-29')
def test_technical_selector():
selector = TechnicalSelector(security_type=SecurityType.stock, start_timestamp='2019-01-01',
end_timestamp='2019-06-10',
level=TradingLevel.LEVEL_1DAY,
provider=Provider.JOINQUANT)
selector.run()
print(selector.get_result_df())
targets = selector.get_open_long_targets('2019-06-04')
assert 'stock_sz_000338' not in targets
assert 'stock_sz_000338' not in targets
assert 'stock_sz_002572' not in targets
assert 'stock_sz_002572' not in targets
targets = selector.get_open_short_targets('2019-06-04')
assert 'stock_sz_000338' in targets
assert 'stock_sz_000338' in targets
assert 'stock_sz_002572' in targets
assert 'stock_sz_002572' in targets
selector.move_on(timeout=0)
targets = selector.get_open_long_targets('2019-06-19')
assert 'stock_sz_000338' in targets
assert 'stock_sz_002572' not in targets
targets = selector.get_keep_long_targets('2019-06-19')
assert 'stock_sz_000338' not in targets
assert 'stock_sz_002572' not in targets
| 35.114286 | 96 | 0.658666 |
a052b016517f6aacc801103061d8c4b9c33c0bcd | 176 | py | Python | test/cases/nested_function.in.py | eirikurt/sdsort | beb7bf00aad2255daeb3cc6440c5036bc6e6ee02 | [
"MIT"
] | null | null | null | test/cases/nested_function.in.py | eirikurt/sdsort | beb7bf00aad2255daeb3cc6440c5036bc6e6ee02 | [
"MIT"
] | 4 | 2021-05-02T21:41:21.000Z | 2021-05-02T21:47:57.000Z | test/cases/nested_function.in.py | eirikurt/sdsort | beb7bf00aad2255daeb3cc6440c5036bc6e6ee02 | [
"MIT"
] | null | null | null | class FunctionNester:
def nest(self, val: str):
def inner():
print(val)
return inner
def bar(self):
return self.nest("something")
| 17.6 | 37 | 0.545455 |
e5af4290aaf5ef04e01a54db388adcdc02dd1124 | 1,969 | py | Python | z_externals/handson_second_edition/Chapter19/lib/common.py | linklab/link_rl | e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99 | [
"MIT"
] | null | null | null | z_externals/handson_second_edition/Chapter19/lib/common.py | linklab/link_rl | e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99 | [
"MIT"
] | null | null | null | z_externals/handson_second_edition/Chapter19/lib/common.py | linklab/link_rl | e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99 | [
"MIT"
] | 1 | 2021-11-23T12:30:37.000Z | 2021-11-23T12:30:37.000Z | import numpy as np
import torch
import torch.distributions as distribution
import ptan
def unpack_batch_a2c(batch, net, last_val_gamma, device="cpu"):
"""
Convert batch into training tensors
:param batch:
:param net:
:return: states variable, actions tensor, reference values variable
"""
states = []
actions = []
rewards = []
not_done_idx = []
last_states = []
for idx, exp in enumerate(batch):
states.append(exp.state)
actions.append(exp.action)
rewards.append(exp.reward)
if exp.last_state is not None:
not_done_idx.append(idx)
last_states.append(exp.last_state)
states_v = ptan.agent.float32_preprocessor(states).to(device)
actions_v = torch.FloatTensor(actions).to(device)
# handle rewards
rewards_np = np.array(rewards, dtype=np.float32)
if not_done_idx:
last_states_v = ptan.agent.float32_preprocessor(last_states).to(device)
last_vals_v = net(last_states_v)
last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]
rewards_np[not_done_idx] += last_val_gamma * last_vals_np
ref_vals_v = torch.FloatTensor(rewards_np).to(device)
return states_v, actions_v, ref_vals_v
@torch.no_grad()
def unpack_batch_sac(batch, val_net, twinq_net, policy_net,
gamma: float, ent_alpha: float,
device="cpu"):
"""
Unpack Soft Actor-Critic batch
"""
states_v, actions_v, ref_q_v = \
unpack_batch_a2c(batch, val_net, gamma, device)
# references for the critic network
mu_v = policy_net(states_v)
act_dist = distribution.Normal(mu_v, torch.exp(policy_net.logstd))
acts_v = act_dist.sample()
q1_v, q2_v = twinq_net(states_v, acts_v)
# element-wise minimum
ref_vals_v = torch.min(q1_v, q2_v).squeeze() - \
ent_alpha * act_dist.log_prob(acts_v).sum(dim=1)
return states_v, actions_v, ref_vals_v, ref_q_v
| 31.758065 | 79 | 0.665312 |
1537b6aaf21e2694745aaefee303a1ee23afa5e0 | 206 | py | Python | event_service/users/tests/test_models.py | theodor85/event_service | 3ecb98ab1121b676914e517125d6d8b02a2c2cd8 | [
"MIT"
] | null | null | null | event_service/users/tests/test_models.py | theodor85/event_service | 3ecb98ab1121b676914e517125d6d8b02a2c2cd8 | [
"MIT"
] | 3 | 2021-05-11T16:12:52.000Z | 2022-02-27T05:55:18.000Z | event_service/users/tests/test_models.py | theodor85/event_service | 3ecb98ab1121b676914e517125d6d8b02a2c2cd8 | [
"MIT"
] | null | null | null | import pytest
from event_service.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
| 20.6 | 64 | 0.776699 |
623766aa0a16d0f35bd02bfa21d8cfdd7df17ca7 | 9,404 | py | Python | tests/python/rna_array.py | gunslingster/CSC581-assignement1 | 39012146e142bf400c7140d90ecfd27c45b589ca | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 365 | 2015-02-10T15:10:55.000Z | 2022-03-03T15:50:51.000Z | tests/python/rna_array.py | mmtt1998819/blender | c9c3bf983321990a6960c422e002a372c35a6f76 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 45 | 2015-01-09T15:34:20.000Z | 2021-10-05T14:44:23.000Z | tests/python/rna_array.py | mmtt1998819/blender | c9c3bf983321990a6960c422e002a372c35a6f76 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 172 | 2015-01-25T15:16:53.000Z | 2022-01-31T08:25:36.000Z | # Apache License, Version 2.0
import unittest
import random
test = bpy.data.test
# farr - 1-dimensional array of float
# fdarr - dynamic 1-dimensional array of float
# fmarr - 3-dimensional ([3][4][5]) array of float
# fdmarr - dynamic 3-dimensional (ditto size) array of float
# same as above for other types except that the first letter is "i" for int and "b" for bool
class TestArray(unittest.TestCase):
# test that assignment works by: assign -> test value
# - rvalue = list of float
# - rvalue = list of numbers
# test.object
# bpy.data.test.farr[3], iarr[3], barr[...], fmarr, imarr, bmarr
def setUp(self):
test.farr = (1.0, 2.0, 3.0)
test.iarr = (7, 8, 9)
test.barr = (False, True, False)
# test access
# test slice access, negative indices
def test_access(self):
rvals = ([1.0, 2.0, 3.0], [7, 8, 9], [False, True, False])
for arr, rval in zip((test.farr, test.iarr, test.barr), rvals):
self.assertEqual(prop_to_list(arr), rval)
self.assertEqual(arr[0:3], rval)
self.assertEqual(arr[1:2], rval[1:2])
self.assertEqual(arr[-1], arr[2])
self.assertEqual(arr[-2], arr[1])
self.assertEqual(arr[-3], arr[0])
# fail when index out of bounds
def test_access_fail(self):
for arr in (test.farr, test.iarr, test.barr):
self.assertRaises(IndexError, lambda: arr[4])
# test assignment of a whole array
def test_assign_array(self):
# should accept int as float
test.farr = (1, 2, 3)
# fail when: unexpected no. of items, invalid item type
def test_assign_array_fail(self):
def assign_empty_list(arr):
setattr(test, arr, ())
for arr in ("farr", "iarr", "barr"):
self.assertRaises(ValueError, assign_empty_list, arr)
def assign_invalid_float():
test.farr = (1.0, 2.0, "3.0")
def assign_invalid_int():
test.iarr = ("1", 2, 3)
def assign_invalid_bool():
test.barr = (True, 0.123, False)
for func in [assign_invalid_float, assign_invalid_int, assign_invalid_bool]:
self.assertRaises(TypeError, func)
# shouldn't accept float as int
def assign_float_as_int():
test.iarr = (1, 2, 3.0)
self.assertRaises(TypeError, assign_float_as_int)
# non-dynamic arrays cannot change size
def assign_different_size(arr, val):
setattr(test, arr, val)
for arr, val in zip(("iarr", "farr", "barr"), ((1, 2), (1.0, 2.0), (True, False))):
self.assertRaises(ValueError, assign_different_size, arr, val)
# test assignment of specific items
def test_assign_item(self):
for arr, rand_func in zip((test.farr, test.iarr, test.barr), (rand_float, rand_int, rand_bool)):
for i in range(len(arr)):
val = rand_func()
arr[i] = val
self.assertEqual(arr[i], val)
# float prop should accept also int
for i in range(len(test.farr)):
val = rand_int()
test.farr[i] = val
self.assertEqual(test.farr[i], float(val))
#
def test_assign_item_fail(self):
def assign_bad_index(arr):
arr[4] = 1.0
def assign_bad_type(arr):
arr[1] = "123"
for arr in [test.farr, test.iarr, test.barr]:
self.assertRaises(IndexError, assign_bad_index, arr)
# not testing bool because bool allows not only (True|False)
for arr in [test.farr, test.iarr]:
self.assertRaises(TypeError, assign_bad_type, arr)
def test_dynamic_assign_array(self):
# test various lengths here
for arr, rand_func in zip(("fdarr", "idarr", "bdarr"), (rand_float, rand_int, rand_bool)):
for length in range(1, 64):
rval = make_random_array(length, rand_func)
setattr(test, arr, rval)
self.assertEqual(prop_to_list(getattr(test, arr)), rval)
def test_dynamic_assign_array_fail(self):
# could also test too big length here
def assign_empty_list(arr):
setattr(test, arr, ())
for arr in ("fdarr", "idarr", "bdarr"):
self.assertRaises(ValueError, assign_empty_list, arr)
class TestMArray(unittest.TestCase):
def setUp(self):
# reset dynamic array sizes
for arr, func in zip(("fdmarr", "idmarr", "bdmarr"), (rand_float, rand_int, rand_bool)):
setattr(test, arr, make_random_3d_array((3, 4, 5), func))
# test assignment
def test_assign_array(self):
for arr, func in zip(("fmarr", "imarr", "bmarr"), (rand_float, rand_int, rand_bool)):
# assignment of [3][4][5]
rval = make_random_3d_array((3, 4, 5), func)
setattr(test, arr, rval)
self.assertEqual(prop_to_list(getattr(test, arr)), rval)
# test assignment of [2][4][5], [1][4][5] should work on dynamic arrays
def test_assign_array_fail(self):
def assign_empty_array():
test.fmarr = ()
self.assertRaises(ValueError, assign_empty_array)
def assign_invalid_size(arr, rval):
setattr(test, arr, rval)
# assignment of 3,4,4 or 3,3,5 should raise ex
for arr, func in zip(("fmarr", "imarr", "bmarr"), (rand_float, rand_int, rand_bool)):
rval = make_random_3d_array((3, 4, 4), func)
self.assertRaises(ValueError, assign_invalid_size, arr, rval)
rval = make_random_3d_array((3, 3, 5), func)
self.assertRaises(ValueError, assign_invalid_size, arr, rval)
rval = make_random_3d_array((3, 3, 3), func)
self.assertRaises(ValueError, assign_invalid_size, arr, rval)
def test_assign_item(self):
# arr[i] = x
for arr, func in zip(("fmarr", "imarr", "bmarr", "fdmarr", "idmarr", "bdmarr"),
(rand_float, rand_int, rand_bool) * 2):
rval = make_random_2d_array((4, 5), func)
for i in range(3):
getattr(test, arr)[i] = rval
self.assertEqual(prop_to_list(getattr(test, arr)[i]), rval)
# arr[i][j] = x
for arr, func in zip(("fmarr", "imarr", "bmarr", "fdmarr", "idmarr", "bdmarr"),
(rand_float, rand_int, rand_bool) * 2):
arr = getattr(test, arr)
rval = make_random_array(5, func)
for i in range(3):
for j in range(4):
arr[i][j] = rval
self.assertEqual(prop_to_list(arr[i][j]), rval)
def test_assign_item_fail(self):
def assign_wrong_size(arr, i, rval):
getattr(test, arr)[i] = rval
# assign wrong size at level 2
for arr, func in zip(("fmarr", "imarr", "bmarr"), (rand_float, rand_int, rand_bool)):
rval1 = make_random_2d_array((3, 5), func)
rval2 = make_random_2d_array((4, 3), func)
for i in range(3):
self.assertRaises(ValueError, assign_wrong_size, arr, i, rval1)
self.assertRaises(ValueError, assign_wrong_size, arr, i, rval2)
def test_dynamic_assign_array(self):
for arr, func in zip(("fdmarr", "idmarr", "bdmarr"), (rand_float, rand_int, rand_bool)):
# assignment of [3][4][5]
rval = make_random_3d_array((3, 4, 5), func)
setattr(test, arr, rval)
self.assertEqual(prop_to_list(getattr(test, arr)), rval)
# [2][4][5]
rval = make_random_3d_array((2, 4, 5), func)
setattr(test, arr, rval)
self.assertEqual(prop_to_list(getattr(test, arr)), rval)
# [1][4][5]
rval = make_random_3d_array((1, 4, 5), func)
setattr(test, arr, rval)
self.assertEqual(prop_to_list(getattr(test, arr)), rval)
# test access
def test_access(self):
pass
# test slice access, negative indices
def test_access_fail(self):
pass
random.seed()
def rand_int():
return random.randint(-1000, 1000)
def rand_float():
return float(rand_int())
def rand_bool():
return bool(random.randint(0, 1))
def make_random_array(len, rand_func):
arr = []
for i in range(len):
arr.append(rand_func())
return arr
def make_random_2d_array(dimsize, rand_func):
marr = []
for i in range(dimsize[0]):
marr.append([])
for j in range(dimsize[1]):
marr[-1].append(rand_func())
return marr
def make_random_3d_array(dimsize, rand_func):
marr = []
for i in range(dimsize[0]):
marr.append([])
for j in range(dimsize[1]):
marr[-1].append([])
for k in range(dimsize[2]):
marr[-1][-1].append(rand_func())
return marr
def prop_to_list(prop):
ret = []
for x in prop:
if type(x) not in {bool, int, float}:
ret.append(prop_to_list(x))
else:
ret.append(x)
return ret
def suite():
return unittest.TestSuite([
unittest.TestLoader().loadTestsFromTestCase(TestArray),
unittest.TestLoader().loadTestsFromTestCase(TestMArray),
])
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
| 31.77027 | 104 | 0.582093 |
0fa709a1a820dc6486394960922ab5ceeb1d5ab6 | 9,411 | py | Python | src/quad_solve_attempt_2.py | yuanagain/seniorthesis | c0ef4d703c286c8562b5ebb4ed0fbe162e0c4cf2 | [
"MIT"
] | null | null | null | src/quad_solve_attempt_2.py | yuanagain/seniorthesis | c0ef4d703c286c8562b5ebb4ed0fbe162e0c4cf2 | [
"MIT"
] | null | null | null | src/quad_solve_attempt_2.py | yuanagain/seniorthesis | c0ef4d703c286c8562b5ebb4ed0fbe162e0c4cf2 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
##
import sys
import random
## equation class
coeffs = [ [4, 5, 6, 7 ],
[0, 9, 10, 11 ],
[0, 0, 12, 13 ],
[0, 0, 0, 14 ]]
def van_der_pol_oscillator_deriv(x, t):
nx0 = x[1]
nx1 = -mu * (x[0] ** 2.0 - 1.0) * x[1] - x[0]
res = np.array([nx0, nx1])
return res
# ts = np.linspace(0.0, 50.0, 500)
# xs = odeint(van_der_pol_oscillator_deriv, [0.2, 0.2], ts)
# plt.plot(xs[:,0], xs[:,1])
# xs = odeint(van_der_pol_oscillator_deriv, [-3.0, -3.0], ts)
# plt.plot(xs[:,0], xs[:,1])
# xs = odeint(van_der_pol_oscillator_deriv, [4.0, 4.0], ts)
# plt.plot(xs[:,0], xs[:,1])
# plt.gca().set_aspect('equal')
# plt.savefig('vanderpol_oscillator.png')
# plt.show()
def quadtheta(t, y):
w_dot = 0.
x_dot = 0.
y_dot = 0.
z_dot = 0.
return w_dot, x_dot, y_dot, z_dot
## TODO
# define coefficient object, a[i][j] = ...
# define evaluation
class FourQuad:
def __init__(self):
coeffs = None
def evaluate(self, z):
"""
Evaluates a complex polynomial at z
"""
total = _zero()
coeffs = self.coeffs
for i in range(len(coeffs)):
total = total + coeffs[i] * z**i
return total
def differentiate(self, z):
return
def __iter__(self):
return
def __getitem__(self, key):
return key
## Next steps:
## Need to define class, coefficient calling
## Just use scalars, can switch to intervals easily
## How general?
## Visualiation of scaling by one dimension
## Autodifferentiation
## viz to discard big parts of the space, esp. spirals, etc.,
## help decide where not to start
def quad_distance(w, x, y, z):
return [w[i]**2 + x[i]**2 + y[i]**2 + z[i]**2 for i in range(len(w))]
def quad1(w, x, y, z, s=10, r=28, b=2.667):
w_dot = x*y - b*z
x_dot = s*(y - x)
y_dot = r*x - y - x*z
z_dot = x*y - b*z
return w_dot, x_dot, y_dot, z_dot
## setting up parameters
# default_lambda_1 = .2523718
# default_lambda_2 = .392931
# default_lambda_3 = 1 - default_lambda_1 - default_lambda_2
## March 27
default_lambda_1, default_lambda_2, default_lambda_3 = 0.086, 0.141, 0.773
def quad2(x_1, y_1, x_2, y_2,
lambda_1 = default_lambda_1,
lambda_2 = default_lambda_2,
lambda_3 = default_lambda_3):
"""
dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2
dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2
http://www.math.kit.edu/iag3/~herrlich/seite/wws-11/media/wws-talk-valdez.pdf
"""
x_1_dot = lambda_2 * (x_1**2 - y_1**2) - (lambda_2 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * lambda_2 * x_1 * y_1 - (lambda_2 + lambda_3) * (x_1*y_2 + y_1*x_2)
x_2_dot = lambda_1 * (x_2**2 - y_2**2) - (lambda_1 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * lambda_1 * x_2 * y_2 - (lambda_1 +lambda_3) * (x_1*y_2 + y_1*x_2)
return x_1_dot, y_1_dot, x_2_dot, y_2_dot
def plot_quad(ws, xs, ys, zs, plot_type = 0, txt = ""):
if plot_type == 0:
print("Plotting Double Plot Quad Viz")
plt.figure(1)
plt.subplot(2, 1, 1)
plt.subplots_adjust(top=0.85)
plt.plot(xs, ws)
#plt.yscale('linear')
plt.title('xy')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.subplot(2, 1, 2)
plt.plot(ys, zs)
#plt.yscale('linear')
plt.title('wz')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.suptitle(txt, fontsize=14)
plt.show()
elif plot_type == 1:
print("Plotting Overlain Double Plot Quad Viz")
plt.figure(1)
plt.plot(xs, ws)
plt.plot(ys, zs)
#plt.yscale('linear')
plt.title('x-w, y-z')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.suptitle(txt, fontsize=14)
plt.show()
elif plot_type == 2:
print("Plotting Sphere Plot Quad Viz")
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.subplots_adjust(top=0.85)
plt.suptitle(txt, fontsize=14)
qdist = quad_distance(ws, xs, ys, zs)
ws = np.divide(ws, qdist)
xs = np.divide(xs, qdist)
ys = np.divide(ys, qdist)
zs = np.divide(zs, qdist)
ax.plot(xs, ys, zs)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title("Nonrigorous Solution")
plt.show()
else:
print("Invalid Plot Type")
def main(argv):
if len(argv) < 2:
print("usage: python <fname> <code> where <code> = 0, 1, 2")
print("0: dual plots")
print("1: overlain")
print("2: spherical projection")
return
sim = 'demo3'
if sim == 'demo1':
dt = 0.01
stepCnt = 10000
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
ws[0], xs[0], ys[0], zs[0] = ( 0.372854105052,
0.393518965248,
-0.0359026080443,
-0.216701666067 )
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
w_dot, x_dot, y_dot, z_dot = quad1(ws[i], xs[i], ys[i], zs[i])
ws[i + 1] = ws[i] + (w_dot * dt)
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
plot_quad(ws, xs, ys, zs, float(argv[1]))
elif sim == 'demo2':
dt = 0.01
stepCnt = 100000
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
ws[0], xs[0], ys[0], zs[0] = ( 0.372854105052,
0.393518965248,
-0.0359026080443,
-0.216701666067 )
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
w_dot, x_dot, y_dot, z_dot = quad2(ws[i], xs[i], ys[i], zs[i])
ws[i + 1] = ws[i] + (w_dot * dt)
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
plot_quad(ws, xs, ys, zs, float(argv[1]))
elif sim == 'demo3':
"""
Loop through simulations
"""
for i in range(1):
lambda_1 = default_lambda_1
lambda_2 = default_lambda_2
lambda_3 = default_lambda_3
dt = 0.01
stepCnt = 100000
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
ws[0], xs[0], ys[0], zs[0] = ( 0.032,
0.308,
-0.1,
-0.5 )
# Stepping through "time".
print("LAMBDAS")
print(lambda_1, lambda_2, lambda_3)
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
w_dot, x_dot, y_dot, z_dot = quad2(ws[i], xs[i], ys[i], zs[i],
lambda_1, lambda_2, lambda_3)
ws[i + 1] = ws[i] + (w_dot * dt)
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
# display initial value
print("w_0, x_0, y_0, z_0 = "
+ str(ws[0]) + ", "
+ str(xs[0]) + ", "
+ str(ys[0]) + ", "
+ str(zs[0]))
# display parameters
print("lambda_1, lambda_2, lambda_3 = "
+ str(lambda_1) + ", "
+ str(lambda_2) + ", "
+ str(lambda_3))
txt = ("Parameters: lambda_1, lambda_2, lambda_3 = "
+ str(round(lambda_1, 3)) + ", "
+ str(round(lambda_2, 3)) + ", "
+ str(round(lambda_3, 3)) + '\n'
+ "Initial Point: w_0, x_0, y_0, z_0 = "
+ str(round(ws[0], 3)) + ", "
+ str(round(xs[0], 3)) + ", "
+ str(round(ys[0], 3)) + ", "
+ str(round(zs[0], 3)) )
plot_quad(ws, xs, ys, zs, float(argv[1]), txt = txt)
if __name__=="__main__":
main(sys.argv)
### #
# TODO
# Plot overlain
# plot many areas
| 28.779817 | 88 | 0.470088 |
24070bcd774c1c0c7c8d887b581e95d56ea3086d | 5,543 | py | Python | model.py | arbaaz-abz/EAST-TextDetector | 60376ce027fab9f909e4a75530bd89034a182a38 | [
"MIT"
] | 47 | 2019-02-21T02:31:06.000Z | 2021-06-23T10:14:13.000Z | model.py | arbaaz-abz/EAST-TextDetector | 60376ce027fab9f909e4a75530bd89034a182a38 | [
"MIT"
] | 21 | 2020-09-25T22:41:00.000Z | 2022-03-12T00:50:43.000Z | model.py | arbaaz-abz/EAST-TextDetector | 60376ce027fab9f909e4a75530bd89034a182a38 | [
"MIT"
] | 11 | 2019-02-24T01:54:21.000Z | 2022-02-16T09:45:54.000Z | import tensorflow as tf
import numpy as np
from tensorflow.contrib import slim
tf.app.flags.DEFINE_integer('text_scale', 512, '')
from nets import resnet_v1
FLAGS = tf.app.flags.FLAGS
def unpool(inputs):
return tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*2, tf.shape(inputs)[2]*2])
def mean_image_subtraction(images, means=[123.68, 116.78, 103.94]):
'''
image normalization
:param images:
:param means:
:return:
'''
num_channels = images.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=3, num_or_size_splits=num_channels, value=images)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=3, values=channels)
def model(images, weight_decay=1e-5, is_training=True):
'''
define the model, we use slim's implemention of resnet
'''
images = mean_image_subtraction(images)
with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=weight_decay)):
logits, end_points = resnet_v1.resnet_v1_50(images, is_training=is_training, scope='resnet_v1_50')
with tf.variable_scope('feature_fusion', values=[end_points.values]):
batch_norm_params = {
'decay': 0.997,
'epsilon': 1e-5,
'scale': True,
'is_training': is_training
}
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params,
weights_regularizer=slim.l2_regularizer(weight_decay)):
f = [end_points['pool5'], end_points['pool4'],
end_points['pool3'], end_points['pool2']]
for i in range(4):
print('Shape of f_{} {}'.format(i, f[i].shape))
g = [None, None, None, None]
h = [None, None, None, None]
num_outputs = [None, 128, 64, 32]
for i in range(4):
if i == 0:
h[i] = f[i]
else:
c1_1 = slim.conv2d(tf.concat([g[i-1], f[i]], axis=-1), num_outputs[i], 1)
h[i] = slim.conv2d(c1_1, num_outputs[i], 3)
if i <= 2:
g[i] = unpool(h[i])
else:
g[i] = slim.conv2d(h[i], num_outputs[i], 3)
print('Shape of h_{} {}, g_{} {}'.format(i, h[i].shape, i, g[i].shape))
# here we use a slightly different way for regression part,
# we first use a sigmoid to limit the regression range, and also
# this is do with the angle map
F_score = slim.conv2d(g[3], 1, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None)
# 4 channel of axis aligned bbox and 1 channel rotation angle
geo_map = slim.conv2d(g[3], 4, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None) * FLAGS.text_scale
angle_map = (slim.conv2d(g[3], 1, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None) - 0.5) * np.pi/2 # angle is between [-45, 45]
F_geometry = tf.concat([geo_map, angle_map], axis=-1)
return F_score, F_geometry
def dice_coefficient(y_true_cls, y_pred_cls,
training_mask):
'''
dice loss
:param y_true_cls:
:param y_pred_cls:
:param training_mask:
:return:
'''
eps = 1e-5
intersection = tf.reduce_sum(y_true_cls * y_pred_cls * training_mask)
union = tf.reduce_sum(y_true_cls * training_mask) + tf.reduce_sum(y_pred_cls * training_mask) + eps
loss = 1. - (2 * intersection / union)
tf.summary.scalar('classification_dice_loss', loss)
return loss
def loss(y_true_cls, y_pred_cls,
y_true_geo, y_pred_geo,
training_mask):
'''
define the loss used for training, contraning two part,
the first part we use dice loss instead of weighted logloss,
the second part is the iou loss defined in the paper
:param y_true_cls: ground truth of text
:param y_pred_cls: prediction os text
:param y_true_geo: ground truth of geometry
:param y_pred_geo: prediction of geometry
:param training_mask: mask used in training, to ignore some text annotated by ###
:return:
'''
classification_loss = dice_coefficient(y_true_cls, y_pred_cls, training_mask)
# scale classification loss to match the iou loss part
classification_loss *= 0.01
# d1 -> top, d2->right, d3->bottom, d4->left
d1_gt, d2_gt, d3_gt, d4_gt, theta_gt = tf.split(value=y_true_geo, num_or_size_splits=5, axis=3)
d1_pred, d2_pred, d3_pred, d4_pred, theta_pred = tf.split(value=y_pred_geo, num_or_size_splits=5, axis=3)
area_gt = (d1_gt + d3_gt) * (d2_gt + d4_gt)
area_pred = (d1_pred + d3_pred) * (d2_pred + d4_pred)
w_union = tf.minimum(d2_gt, d2_pred) + tf.minimum(d4_gt, d4_pred)
h_union = tf.minimum(d1_gt, d1_pred) + tf.minimum(d3_gt, d3_pred)
area_intersect = w_union * h_union
area_union = area_gt + area_pred - area_intersect
L_AABB = -tf.log((area_intersect + 1.0)/(area_union + 1.0))
L_theta = 1 - tf.cos(theta_pred - theta_gt)
tf.summary.scalar('geometry_AABB', tf.reduce_mean(L_AABB * y_true_cls * training_mask))
tf.summary.scalar('geometry_theta', tf.reduce_mean(L_theta * y_true_cls * training_mask))
L_g = L_AABB + 20 * L_theta
return tf.reduce_mean(L_g * y_true_cls * training_mask) + classification_loss
| 40.459854 | 143 | 0.628721 |
aa86c03ca017a56368254b7623fe0de37544f2d0 | 3,548 | py | Python | imdbSpider.py | Shulun/imdbSpider | 9158977f569c0be22b61f151e2b5c771de7e4d54 | [
"MIT"
] | null | null | null | imdbSpider.py | Shulun/imdbSpider | 9158977f569c0be22b61f151e2b5c771de7e4d54 | [
"MIT"
] | null | null | null | imdbSpider.py | Shulun/imdbSpider | 9158977f569c0be22b61f151e2b5c771de7e4d54 | [
"MIT"
] | null | null | null | #coding:utf-8
from urllib import request
from bs4 import BeautifulSoup as bs
import re
import wordsegment as ws
import pandas as pd
import numpy
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['figure.figsize'] = (20.0, 10.0)
import matplotlib.pyplot as plt
# %matplotlib inline
from wordcloud import WordCloud
import itertools
import time
def getNowPlayingMovieList(url):
resp = request.urlopen(url)
html_data = resp.read().decode('latin-1')
soup = bs(html_data, 'html.parser')
nowplaying_movie = soup.find_all('h4', itemprop='name')
nowplaying_movie_list = []
for item in nowplaying_movie:
nowplaying_movie_list.append(item.a)
nowplaying_list = []
for item in nowplaying_movie_list:
nowplaying_dict = {}
nowplaying_dict['name'] = item['title']
nowplaying_dict['id'] = item.get('href').split('/')[2]
# nowplaying_dict['id'] = item['href'].split('/')[2]
nowplaying_list.append(nowplaying_dict)
# print(nowplaying_list)
return nowplaying_list
def getReviewsById(movieId, reviewNum, url):
reviewList = []
rem = reviewNum % 10
for i in range(0, reviewNum-rem, 10):
requrl = url + movieId + '/reviews?start=' + str(i)
resp = request.urlopen(requrl)
html_data = resp.read().decode('latin-1')
soup = bs(html_data, 'html.parser')
review_data = soup.find_all('p', class_=None)
review_data_cleaned = []
for rev in review_data:
if not rev.a and not rev.b:
review_data_cleaned.append(rev)
review_data_cleaned = review_data_cleaned[:-1]
reviewList.append(review_data_cleaned)
if rem:
requrl = url + movieId + '/reviews?start=' + str(reviewNum-rem)
resp = request.urlopen(requrl)
html_data = resp.read().decode('latin-1')
soup = bs(html_data, 'html.parser')
review_data = soup.find_all('p', class_=None)
review_data_rem = []
for rev in review_data:
if not rev.a and not rev.b:
review_data_rem.append(rev)
review_data_rem = review_data_rem[:rem]
reviewList.append(review_data_rem)
reviewList = list(itertools.chain.from_iterable(reviewList))
# print(reviewList)
# print(len(reviewList))
return reviewList
def processReviews(revList):
reviews = ''
for rev in revList:
reviews += str(rev).strip()
reviews = re.sub('<.*?>', '', reviews)
pattern = re.compile(r'[a-zA-Z]')
data = re.findall(pattern, reviews)
processed_reviews = ''.join(data)
return processed_reviews
def main():
url = 'http://www.imdb.com/movies-in-theaters/'
url1 = 'http://www.imdb.com/title/'
now_movie_list = getNowPlayingMovieList(url)
# Get latest 100 reviews of the movie
movie_id = now_movie_list[14]['id']
movie_reviews = getReviewsById(movie_id, 100, url1)
processed_reviews = processReviews(movie_reviews)
segment = ws.segment(processed_reviews)
words_df = pd.DataFrame({'segment':segment})
stopwords = pd.read_csv('stopwords.txt', index_col=False, quoting=3, sep='\t', names=['stopword'], encoding='utf-8')
words_df = words_df[~words_df.segment.isin(stopwords.stopword)]
words_stat = words_df.groupby(by=['segment'])['segment'].agg({'count':numpy.size})
words_stat = words_stat.reset_index().sort_values(by=['count'], ascending=False)
wordcloud = WordCloud(font_path='simhei.ttf', background_color='white', max_font_size=100)
word_frequency = {x[0]:x[1] for x in words_stat.head(500).values}
wordcloud = wordcloud.fit_words(word_frequency)
plt.imshow(wordcloud)
plt.axis('off')
plt.savefig('wordcloud.tiff', bbox_inches='tight', dpi=300)
if __name__ == '__main__':
start = time.clock()
main()
print('Program finishes in', time.clock()-start, 'seconds') | 32.851852 | 117 | 0.728016 |
6c704136484309a8f16cbf73c98ab65e0794583d | 14,849 | py | Python | cryptoapis/model/add_tokens_to_existing_from_address_rb_token_data.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 5 | 2021-05-17T04:45:03.000Z | 2022-03-23T12:51:46.000Z | cryptoapis/model/add_tokens_to_existing_from_address_rb_token_data.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | null | null | null | cryptoapis/model/add_tokens_to_existing_from_address_rb_token_data.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 2 | 2021-06-02T07:32:26.000Z | 2022-02-12T02:36:23.000Z | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.add_tokens_to_existing_from_address_rb_token_data_bitcoin_omni_token import AddTokensToExistingFromAddressRBTokenDataBitcoinOmniToken
from cryptoapis.model.add_tokens_to_existing_from_address_rb_token_data_ethereum_token import AddTokensToExistingFromAddressRBTokenDataEthereumToken
globals()['AddTokensToExistingFromAddressRBTokenDataBitcoinOmniToken'] = AddTokensToExistingFromAddressRBTokenDataBitcoinOmniToken
globals()['AddTokensToExistingFromAddressRBTokenDataEthereumToken'] = AddTokensToExistingFromAddressRBTokenDataEthereumToken
class AddTokensToExistingFromAddressRBTokenData(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'property_id': (int,), # noqa: E501
'contract_address': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'property_id': 'propertyId', # noqa: E501
'contract_address': 'contractAddress', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""AddTokensToExistingFromAddressRBTokenData - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
property_id (int): Represents the specific `propertyId` of the token data that will be forwarded.. [optional] # noqa: E501
contract_address (str): Represents the specific `contractAddress` of the Token that will be forwarded.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AddTokensToExistingFromAddressRBTokenData - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
property_id (int): Represents the specific `propertyId` of the token data that will be forwarded.. [optional] # noqa: E501
contract_address (str): Represents the specific `contractAddress` of the Token that will be forwarded.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
],
'oneOf': [
AddTokensToExistingFromAddressRBTokenDataBitcoinOmniToken,
AddTokensToExistingFromAddressRBTokenDataEthereumToken,
],
}
| 46.403125 | 484 | 0.609671 |
34dfdb2dd87b0a786ed6c030aa431d733f0c4e83 | 4,800 | py | Python | src/pypyr/mappings.py | joelphillips/pypyramid | be1b4760235d859755771e55c003396e02b72f91 | [
"BSD-3-Clause"
] | 1 | 2015-01-01T16:26:16.000Z | 2015-01-01T16:26:16.000Z | src/pypyr/mappings.py | joelphillips/pypyramid | be1b4760235d859755771e55c003396e02b72f91 | [
"BSD-3-Clause"
] | null | null | null | src/pypyr/mappings.py | joelphillips/pypyramid | be1b4760235d859755771e55c003396e02b72f91 | [
"BSD-3-Clause"
] | null | null | null | '''
Created on Aug 20, 2010
@author: joel
'''
import numpy
def applyweights(wp, vp):
from numpy import newaxis
wp = wp.reshape(wp.shape + (1,)*(3-len(wp.shape)))
if len(vp.shape) == 2: vp = vp[:,:,newaxis]
wvp = numpy.sum(wp[:,newaxis] * vp[:,:,newaxis,:], axis=3)
# print "applyweights %s, %s, %s"%(wp.shape, vp.shape, wvp.shape)
return wvp
def psi(p):
""" Map the reference pyramid to the infinite pyramid """
zeta = p[:,2].reshape(-1,1)
pp = p / (1-zeta)
for i in numpy.flatnonzero(zeta==1):
if (numpy.abs(p[i,(0,1)]) < 1E-10).all(): pp[i] = numpy.array([0,0,numpy.inf]) # not perfect, as x,y <> 0 do not map to [0,0,inf]
return pp
def psijac(p):
xi = p[:,0].reshape(-1,1)
eta = p[:,1].reshape(-1,1)
zeta = p[:,2].reshape(-1,1)
z1 = 1/(1-zeta)
z2 = z1 * z1
zeros = numpy.zeros_like(xi)
return numpy.hstack([z1,zeros, xi*z2 ,zeros,z1,eta*z2, zeros,zeros,z2 ] ).reshape(-1,3,3)
def psiinvjac(p):
xi = p[:,0].reshape(-1,1)
eta = p[:,1].reshape(-1,1)
zeta = p[:,2].reshape(-1,1)
z1 = (1-zeta)
z2 = z1 * z1
zeros = numpy.zeros_like(xi)
return numpy.hstack([z1,zeros, -xi*z1 ,zeros,z1,-eta*z1, zeros,zeros,z2 ] ).reshape(-1,3,3)
def psidet(p):
zeta = p[:,2].reshape(-1,1)
return 1/(1-zeta)**4
def derham3dweights(jac, invjac=None, dets=None):
from numpy.linalg import tensorsolve, det
from numpy import ones, transpose
if invjac is None: invjac = lambda p : tensorsolve(jac(p), numpy.tile(numpy.eye(3), (len(p),1,1)))
if dets is None: dets = lambda p : map(det, jac(p))
w0 = lambda p: ones(len(p))
w1 = lambda p: transpose(jac(p), (0,2,1))
w2 = lambda p: invjac(p) * dets(p).reshape(-1,1,1)
w3 = dets
return [w0,w1,w2,w3]
def mapweights(map):
return derham3dweights(map.jac, map.invjac, map.dets)
class Pullback(object):
""" A callable object that returns the pullback H^*(f):M -> X of its argument, f: N -> X
based on the (inverse) homeomorphism H: N -> M and a weight function w: N -> L(X,X), so that
H^*(f)(p) = w(p) . f(g(p))"""
def __init__(self, map, weights=None):
self.map = map
self.weights = [lambda p: numpy.ones(len(p))] if weights is None else weights
def __call__(self, f):
def Hf(p):
fHp = f(self.map(p))
return self.apply(p, fHp)
return Hf
def apply(self, p, vals):
""" Apply the weights to the underlying """
return vals if self.weights is None else applyweights(self.weights[0](p), vals)
def next(self):
if len(self.weights) > 1:
return Pullback(self.map, self.weights[1:])
def mapbasedpullback(map, s):
""" Returns a callable that returns a pullback functional based on a map object (e.g. Affine)"""
return Pullback(map.apply, mapweights(map)[s:])
class Affine(object):
def __init__(self, offset, linear):
self.offset = offset
self.linear = linear
def apply(self, p):
# print "Affine apply", p.shape, self.linear.transpose().shape, self.offset.shape
return (numpy.dot(p, self.linear.transpose()) + self.offset).reshape(-1,len(self.offset))
def applyinv(self, q):
from numpy.linalg import solve
return solve(self.linear, (q - self.offset.reshape(1,-1)).transpose()).transpose()
def jac(self,p):
return numpy.tile(self.linear, (len(p),1,1))
def invjac(self, p):
from numpy.linalg import inv
return numpy.tile(inv(self.linear), (len(p),1,1))
def dets(self,p):
from numpy.linalg import det
return numpy.ones(len(p)) * det(self.linear)
def buildaffine(pfrom, pto):
from numpy.linalg import qr, solve
from numpy import dot
# treat the first point as the origin in each case
# Taking transposes means that the first axis is the x,y,z component of the points in the second axis.
if len(pfrom) > 1:
F = (pfrom[1:] - pfrom[0]).transpose()
T = (pto[1:] - pto[0]).transpose()
# we want to find M such that M . F = T
# if not enough points have been supplied, add in some orthogonal ones
fmissing = F.shape[0] - F.shape[1]
if fmissing:
F = numpy.hstack((F, numpy.zeros((F.shape[0], fmissing))))
T = numpy.hstack((T, numpy.zeros((T.shape[0], fmissing))))
FQ = qr(F)[0]
TQ = qr(T)[0]
F[:,-fmissing:] = FQ[:,-fmissing:]
T[:,-fmissing:] = TQ[:,-fmissing:]
M = solve(F.transpose(),T.transpose())
offset = pto[0] - dot(pfrom[0], M)
return Affine(offset, M.transpose())
| 33.333333 | 138 | 0.571667 |
3b87ca9ce9483f67ad4df1bfbd3273d616e44112 | 12,977 | py | Python | notebooks/17.0-BDP-eda-sep-data.py | zeou1/maggot_models | 4e1b518c2981ab1ca9607099c3813e8429d94ca4 | [
"BSD-3-Clause"
] | null | null | null | notebooks/17.0-BDP-eda-sep-data.py | zeou1/maggot_models | 4e1b518c2981ab1ca9607099c3813e8429d94ca4 | [
"BSD-3-Clause"
] | null | null | null | notebooks/17.0-BDP-eda-sep-data.py | zeou1/maggot_models | 4e1b518c2981ab1ca9607099c3813e8429d94ca4 | [
"BSD-3-Clause"
] | null | null | null | #%% Load data
import networkx as nx
import numpy as np
import pandas as pd
from graspy.plot import degreeplot, edgeplot, gridplot, heatmap
from mpl_toolkits.axes_grid1 import make_axes_locatable
from src.data import load_networkx
from src.utils import meta_to_array, savefig
#%%
graph_type = "Gn"
graph = load_networkx(graph_type)
df_adj = nx.to_pandas_adjacency(graph)
adj = df_adj.values
classes = meta_to_array(graph, "Class")
print(np.unique(classes))
nx_ids = np.array(list(graph.nodes()), dtype=int)
df_ids = df_adj.index.values.astype(int)
df_adj.index = df_ids
df_adj.columns = df_ids
np.array_equal(nx_ids, df_ids)
cell_ids = df_ids
#%% Map MW classes to the indices of cells belonging to them
unique_classes, inverse_classes = np.unique(classes, return_inverse=True)
class_ind_map = {}
class_ids_map = {}
for i, class_name in enumerate(unique_classes):
inds = np.where(inverse_classes == i)[0]
ids = cell_ids[inds]
class_ind_map[class_name] = inds
class_ids_map[class_name] = ids
class_ind_map
#%%
plt.figure(figsize=(10, 5))
sns.distplot(adj.sum(axis=0), norm_hist=False)
plt.xlabel("Proportion of input w/in current graph")
plt.ylabel("Frequency")
plt.tight_layout()
savefig("proportion_win_graph", fmt="png")
# degreeplot(adj)
#%%
# node = str(df_ids[class_ids_map["ORN mPNs"][0]])
# neighbor_graph = nx.ego_graph(graph, node)
# neighbor_names = meta_to_array(neighbor_graph, "Class")
# neighbor_nodes = list(neighbor_graph.nodes())
# labels = dict(zip(neighbor_nodes, neighbor_names))
# plt.figure(figsize=(20, 20))
# nx.draw_networkx(neighbor_graph, labels=labels)
# plt.show()
#%%
def proportional_search(adj, class_ind_map, or_classes, ids, thresh):
"""finds the cell ids of neurons who receive a certain proportion of their
input from one of the cells in or_classes
Parameters
----------
adj : np.array
adjacency matrix, assumed to be normalized so that columns sum to 1
class_map : dict
keys are class names, values are arrays of indices describing where that class
can be found in the adjacency matrix
or_classes : list
which classes to consider for the input thresholding. Neurons will be selected
which satisfy ANY of the input threshold criteria
ids : np.array
names of each cell
"""
pred_cell_ids = []
for i, class_name in enumerate(or_classes):
inds = class_ind_map[class_name] # indices for neurons of that class
from_class_adj = adj[inds, :] # select the rows corresponding to that class
prop_input = from_class_adj.sum(axis=0) # sum input from that class
# prop_input /= adj.sum(axis=0)
flag_inds = np.where(prop_input >= thresh[i])[0] # inds above threshold
pred_cell_ids += list(ids[flag_inds]) # append to cells which satisfied
pred_cell_ids = np.unique(pred_cell_ids)
return pred_cell_ids
pn_types = ["ORN mPNs", "ORN uPNs", "tPNs", "vPNs"]
lhn_thresh = [0.05, 0.05, 0.05, 0.05]
pred_lhn_ids = proportional_search(adj, class_ind_map, pn_types, df_ids, lhn_thresh)
true_lhn_inds = np.concatenate((class_ind_map["LHN"], class_ind_map["LHN; CN"]))
true_lhn_ids = df_ids[true_lhn_inds]
print("LHN")
print("Recall:")
print(np.isin(true_lhn_ids, pred_lhn_ids).mean()) # how many of the og lhn i got
print("Precision:")
print(np.isin(pred_lhn_ids, true_lhn_ids).mean()) # this is how many of mine are in og
print(len(pred_lhn_ids))
my_wrong_lhn_ids = np.setdiff1d(pred_lhn_ids, true_lhn_ids)
#%%
def _sort_inds(graph, inner_labels, outer_labels, sort_nodes):
sort_df = pd.DataFrame(columns=("inner_labels", "outer_labels"))
sort_df["inner_labels"] = inner_labels
sort_df["outer_labels"] = outer_labels
# get frequencies of the different labels so we can sort by them
inner_label_counts = _get_freq_vec(inner_labels)
outer_label_counts = _get_freq_vec(outer_labels)
# inverse counts so we can sort largest to smallest
# would rather do it this way so can still sort alphabetical for ties
sort_df["inner_counts"] = len(inner_labels) - inner_label_counts
sort_df["outer_counts"] = len(outer_labels) - outer_label_counts
# get node edge sums (not exactly degrees if weighted)
node_edgesums = graph.sum(axis=1) + graph.sum(axis=0)
sort_df["node_edgesums"] = node_edgesums.max() - node_edgesums
if sort_nodes:
by = [
"outer_counts",
"outer_labels",
"inner_counts",
"inner_labels",
"node_edgesums",
]
else:
by = ["outer_counts", "outer_labels", "inner_counts", "inner_labels"]
sort_df.sort_values(by=by, kind="mergesort", inplace=True)
sorted_inds = sort_df.index.values
return sorted_inds
def _sort_graph(graph, inner_labels, outer_labels, sort_nodes):
inds = _sort_inds(graph, inner_labels, outer_labels, sort_nodes)
graph = graph[inds, :][:, inds]
return graph
def _get_freqs(inner_labels, outer_labels=None):
# use this because unique would give alphabetical
_, outer_freq = _unique_like(outer_labels)
outer_freq_cumsum = np.hstack((0, outer_freq.cumsum()))
# for each group of outer labels, calculate the boundaries of the inner labels
inner_freq = np.array([])
for i in range(outer_freq.size):
start_ind = outer_freq_cumsum[i]
stop_ind = outer_freq_cumsum[i + 1]
_, temp_freq = _unique_like(inner_labels[start_ind:stop_ind])
inner_freq = np.hstack([inner_freq, temp_freq])
inner_freq_cumsum = np.hstack((0, inner_freq.cumsum()))
return inner_freq, inner_freq_cumsum, outer_freq, outer_freq_cumsum
def _get_freq_vec(vals):
# give each set of labels a vector corresponding to its frequency
_, inv, counts = np.unique(vals, return_counts=True, return_inverse=True)
count_vec = counts[inv]
return count_vec
def _unique_like(vals):
# gives output like
uniques, inds, counts = np.unique(vals, return_index=True, return_counts=True)
inds_sort = np.argsort(inds)
uniques = uniques[inds_sort]
counts = counts[inds_sort]
return uniques, counts
# assume that the graph has already been plotted in sorted form
def _plot_groups(
ax, divider, graph, sorted_inds, inner_labels, outer_labels=None, fontsize=30
):
inner_labels = np.array(inner_labels)
plot_outer = True
if outer_labels is None:
outer_labels = np.ones_like(inner_labels)
plot_outer = False
# sorted_inds = _sort_inds(graph, inner_labels, outer_labels, False)
inner_labels = inner_labels[sorted_inds]
outer_labels = outer_labels[sorted_inds]
inner_freq, inner_freq_cumsum, outer_freq, outer_freq_cumsum = _get_freqs(
inner_labels, outer_labels
)
inner_unique, _ = _unique_like(inner_labels)
outer_unique, _ = _unique_like(outer_labels)
# n_verts = graph.shape[0]
axline_kws = dict(linestyle="dashed", lw=0.9, alpha=0.3, zorder=3, color="grey")
# draw lines
for x in inner_freq_cumsum[1:-1]:
ax.vlines(x, 0, graph.shape[0] + 1, **axline_kws)
# ax.hlines(x, 0, graph.shape[1] + 1, **axline_kws)
# add specific lines for the borders of the plot
pad = 0.0001
low = pad
high = 1 - pad
ax.plot((low, low), (low, high), transform=ax.transAxes, **axline_kws)
ax.plot((low, high), (low, low), transform=ax.transAxes, **axline_kws)
ax.plot((high, high), (low, high), transform=ax.transAxes, **axline_kws)
ax.plot((low, high), (high, high), transform=ax.transAxes, **axline_kws)
# generic curve that we will use for everything
lx = np.linspace(-np.pi / 2.0 + 0.05, np.pi / 2.0 - 0.05, 500)
tan = np.tan(lx)
curve = np.hstack((tan[::-1], tan))
# divider = make_axes_locatable(ax)
# inner curve generation
inner_tick_loc = inner_freq.cumsum() - inner_freq / 2
inner_tick_width = inner_freq / 2
# outer curve generation
outer_tick_loc = outer_freq.cumsum() - outer_freq / 2
outer_tick_width = outer_freq / 2
# top inner curves
ax_x = divider.new_vertical(size="5%", pad=0.0, pack_start=True)
ax.figure.add_axes(ax_x)
_plot_brackets(
ax_x,
np.tile(inner_unique, len(outer_unique)),
inner_tick_loc,
inner_tick_width,
curve,
"inner",
"x",
graph.shape[1],
fontsize,
)
return ax
def _plot_brackets(
ax, group_names, tick_loc, tick_width, curve, level, axis, max_size, fontsize
):
for x0, width in zip(tick_loc, tick_width):
x = np.linspace(x0 - width, x0 + width, 1000)
if axis == "x":
ax.plot(x, curve, c="k")
ax.patch.set_alpha(0)
elif axis == "y":
ax.plot(curve, x, c="k")
ax.patch.set_alpha(0)
ax.set_yticks([])
ax.set_xticks([])
ax.tick_params(axis=axis, which="both", length=0, pad=7)
for direction in ["left", "right", "bottom", "top"]:
ax.spines[direction].set_visible(False)
if axis == "x":
ax.set_xticks(tick_loc)
ax.set_xticklabels(
group_names,
fontsize=fontsize,
verticalalignment="center",
horizontalalignment="right",
rotation=90,
rotation_mode="anchor",
)
# ax.xaxis.set_label_position("bottom")
# ax.xaxis.tick_top()
ax.xaxis.labelpad = 200
ax.set_xlim(0, max_size)
ax.tick_params(axis="x", which="major", pad=5 + fontsize / 4)
elif axis == "y":
ax.set_yticks(tick_loc)
ax.set_yticklabels(group_names, fontsize=fontsize, verticalalignment="center")
ax.set_ylim(0, max_size)
ax.invert_yaxis()
# proj_class = "ORN mPNs"
pn_types = ["ORN mPNs", "ORN uPNs", "tPNs", "vPNs"]
import matplotlib.pyplot as plt
import seaborn as sns
from src.utils import savefig
for proj_class in pn_types:
sort_inds = _sort_inds(adj, classes, np.ones_like(classes), True)
sort_adj = _sort_graph(adj, classes, np.ones_like(classes), True)
sort_classes = classes[sort_inds]
proj_inds = np.where(sort_classes == proj_class)[0]
clipped_adj = sort_adj[proj_inds, :]
plt.figure(figsize=(30, 10))
xs, ys = np.meshgrid(
range(1, clipped_adj.shape[1] + 1), range(1, clipped_adj.shape[0] + 1)
)
nonzero_inds = np.nonzero(clipped_adj.ravel())
x = xs.ravel()[nonzero_inds]
y = ys.ravel()[nonzero_inds]
weights = clipped_adj.ravel()[nonzero_inds]
ax = sns.scatterplot(x=x, y=y, size=weights, legend=False)
plt.ylabel(proj_class)
plt.title(proj_class, pad=100)
divider = make_axes_locatable(ax)
ax_top = divider.new_vertical(size="25%", pad=0.0, pack_start=False)
ax.figure.add_axes(ax_top)
sums = clipped_adj.sum(axis=0)
ax_top.bar(range(1, clipped_adj.shape[1] + 1), sums, width=10)
ax_top.set_xlim((0, clipped_adj.shape[1]))
ax_top.axis("off")
ax_top.hlines(0.05, 0, clipped_adj.shape[1] + 1, color="r", linestyle="--")
ax = _plot_groups(
ax, divider, clipped_adj, sort_inds, classes, outer_labels=None, fontsize=10
)
ax.set_xlim((0, clipped_adj.shape[1]))
ax.set_ylim((0, clipped_adj.shape[0]))
ax.axis("off")
# savefig(proj_class + "_to_all_marginals")
# #%%
# my_classes = classes.copy()
# wrong_inds = np.isin(cell_ids, my_wrong_lhn_ids)
# my_classes[wrong_inds] = "LHN"
# for proj_class in pn_types:
# sort_inds = _sort_inds(adj, my_classes, np.ones_like(my_classes), True)
# sort_adj = _sort_graph(adj, my_classes, np.ones_like(my_classes), True)
# sort_classes = my_classes[sort_inds]
# proj_inds = np.where(sort_classes == proj_class)[0]
# clipped_adj = sort_adj[proj_inds, :]
# plt.figure(figsize=(30, 10))
# # pn_graph = df_adj.loc[class_ids_map["vPNs"], :].values
# xs, ys = np.meshgrid(
# range(1, clipped_adj.shape[1] + 1), range(1, clipped_adj.shape[0] + 1)
# )
# nonzero_inds = np.nonzero(clipped_adj.ravel())
# x = xs.ravel()[nonzero_inds]
# y = ys.ravel()[nonzero_inds]
# weights = clipped_adj.ravel()[nonzero_inds]
# ax = sns.scatterplot(x=x, y=y, size=weights, legend=False)
# plt.ylabel(proj_class)
# plt.title(proj_class, pad=100)
# divider = make_axes_locatable(ax)
# ax_top = divider.new_vertical(size="25%", pad=0.0, pack_start=False)
# ax.figure.add_axes(ax_top)
# sums = clipped_adj.sum(axis=0)
# # sums /= sums.max()
# # sums = sums[sort_inds]
# ax_top.bar(range(1, clipped_adj.shape[1] + 1), sums, width=10)
# ax_top.set_xlim((0, clipped_adj.shape[1]))
# ax_top.axis("off")
# ax_top.hlines(0.05, 0, clipped_adj.shape[1] + 1, color="r", linestyle="--")
# ax = _plot_groups(
# ax, divider, clipped_adj, sort_inds, my_classes, outer_labels=None, fontsize=10
# )
# ax.set_xlim((0, clipped_adj.shape[1]))
# ax.set_ylim((0, clipped_adj.shape[0]))
# ax.axis("off")
#%%
#%%
| 33.5323 | 89 | 0.672189 |
0bedb04690325914a8e98a31e0d0150042fc0a36 | 2,291 | py | Python | qingcloud/cli/iaas_client/actions/vxnet/create_vxnets.py | shiyuwang-qc/qingcloud-cli | ba89ea2fea1b1a52ac91ce5fe67a5868c860fb5b | [
"Apache-2.0"
] | 11 | 2015-05-27T19:52:36.000Z | 2021-04-15T09:07:39.000Z | qingcloud/cli/iaas_client/actions/vxnet/create_vxnets.py | zhangchi1992/qingcloud-cli | 163549d03e7e2c339a00ea6e7cb8e443881f82e4 | [
"Apache-2.0"
] | 7 | 2017-07-19T05:05:03.000Z | 2019-04-25T07:18:04.000Z | qingcloud/cli/iaas_client/actions/vxnet/create_vxnets.py | zhangchi1992/qingcloud-cli | 163549d03e7e2c339a00ea6e7cb8e443881f82e4 | [
"Apache-2.0"
] | 19 | 2016-03-15T07:31:47.000Z | 2021-07-26T09:31:33.000Z | # =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.iaas_client.actions.base import BaseAction
class CreateVxnetsAction(BaseAction):
action = 'CreateVxnets'
command = 'create-vxnets'
usage = '%(prog)s --count <count> --vxnet_name <vxnet_name> [options] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-c', '--count', dest='count',
action='store', type=int, default=1,
help='the number of vxnets to create.')
parser.add_argument('-N', '--vxnet_name', dest='vxnet_name',
action='store', type=str, default='',
help='the short name of vxnet you want to create.')
parser.add_argument('-t', '--vxnet_type', dest='vxnet_type',
action='store', type=int, default=1,
help='the vxnet type. 0: unmanaged vxnet, 1: managed vxnet. Default 1.')
parser.add_argument('-m', '--mode', dest='mode',
action='store', type=int, default=0,
help='The vxnet mode. 0: gre+ovs, 1: vxlan+bridge. Default 0.')
@classmethod
def build_directive(cls, options):
if not options.vxnet_name:
print('[vxnet_name] should be specified.')
return None
return {
'vxnet_name': options.vxnet_name,
'vxnet_type': options.vxnet_type,
'mode': options.mode,
'count': options.count,
}
| 41.654545 | 91 | 0.564819 |
3a20c0de51a3bc8dcfe315eb873fc1a2808429a2 | 4,614 | py | Python | rlkit/data_management/simple_replay_buffer.py | michaelwan11/hfr | a92c2f46f746cc70c91e7f879c7e2b7c475ba73f | [
"MIT"
] | 2 | 2021-12-29T07:37:15.000Z | 2022-01-19T23:33:33.000Z | rlkit/data_management/simple_replay_buffer.py | michaelwan11/hfr | a92c2f46f746cc70c91e7f879c7e2b7c475ba73f | [
"MIT"
] | null | null | null | rlkit/data_management/simple_replay_buffer.py | michaelwan11/hfr | a92c2f46f746cc70c91e7f879c7e2b7c475ba73f | [
"MIT"
] | 1 | 2021-12-29T07:37:18.000Z | 2021-12-29T07:37:18.000Z | import numpy as np
from rlkit.data_management.replay_buffer import ReplayBuffer
class SimpleReplayBuffer(ReplayBuffer):
def __init__(self, max_replay_buffer_size, observation_dim, action_dim):
self._observation_dim = observation_dim
self._action_dim = action_dim
self._max_replay_buffer_size = max_replay_buffer_size
self._observations = np.zeros((max_replay_buffer_size, observation_dim))
self._start_obs = np.zeros((max_replay_buffer_size, observation_dim))
self._start_obs_top = 0
# It's a bit memory inefficient to save the observations twice,
# but it makes the code *much* easier since you no longer have to
# worry about termination conditions.
self._next_obs = np.zeros((max_replay_buffer_size, observation_dim))
self._actions = np.zeros((max_replay_buffer_size, action_dim))
# Make everything a 2D np array to make it easier for other code to
# reason about the shape of the data
self._rewards = np.zeros((max_replay_buffer_size, 1))
self._sparse_rewards = np.zeros((max_replay_buffer_size, 1))
# self._terminals[i] = a terminal was received at time i
self._terminals = np.zeros((max_replay_buffer_size, 1), dtype="uint8")
self._env_infos = np.zeros((max_replay_buffer_size,), dtype="object")
self._num_start_obs = 0
self.clear()
def add_sample(
self,
observation,
action,
reward,
terminal,
next_observation,
env_info,
**kwargs
):
self._observations[self._top] = observation
self._actions[self._top] = action
self._rewards[self._top] = reward
self._terminals[self._top] = terminal
self._next_obs[self._top] = next_observation
self._sparse_rewards[self._top] = env_info.get("sparse_reward", 0)
# self._sparse_rewards[self._top] = reward
self._env_infos[self._top] = env_info
self._advance()
def add_start_obs(self, observation):
self._start_obs[self._start_obs_top] = observation
self._advance_start_obs()
def terminate_episode(self):
# store the episode beginning once the episode is over
# n.b. allows last episode to loop but whatever
self._episode_starts.append(self._cur_episode_start)
self._cur_episode_start = self._top
def size(self):
return self._size
def clear(self):
self._top = 0
self._size = 0
self._episode_starts = []
self._cur_episode_start = 0
self._num_start_obs = 0
def _advance(self):
self._top = (self._top + 1) % self._max_replay_buffer_size
if self._size < self._max_replay_buffer_size:
self._size += 1
def _advance_start_obs(self):
self._start_obs_top = (self._start_obs_top + 1) % self._max_replay_buffer_size
if self._num_start_obs < self._max_replay_buffer_size:
self._num_start_obs += 1
def sample_data(self, indices):
return dict(
observations=self._observations[indices],
actions=self._actions[indices],
rewards=self._rewards[indices],
terminals=self._terminals[indices],
next_observations=self._next_obs[indices],
env_infos=self._env_infos[indices],
sparse_rewards=self._sparse_rewards[indices],
)
def sample_start_obs(self, indices):
return dict(start_obs=self._start_obs[indices])
def random_batch(self, batch_size):
""" batch of unordered transitions """
indices = np.random.randint(0, self._size, batch_size)
return self.sample_data(indices)
def random_start_obs(self, batch_size):
indices = np.random.randint(0, self._num_start_obs, batch_size)
return self.sample_start_obs(indices)
def random_sequence(self, batch_size):
""" batch of trajectories """
# take random trajectories until we have enough
i = 0
indices = []
while len(indices) < batch_size:
# TODO hack to not deal with wrapping episodes, just don't take the last one
start = np.random.choice(self.episode_starts[:-1])
pos_idx = self._episode_starts.index(start)
indices += list(range(start, self._episode_starts[pos_idx + 1]))
i += 1
# cut off the last traj if needed to respect batch size
indices = indices[:batch_size]
return self.sample_data(indices)
def num_steps_can_sample(self):
return self._size
| 38.773109 | 88 | 0.657781 |
45c34c4554dbe6d10f99643b48b693751947beb5 | 214 | py | Python | Bible/test.py | LeonelF/Limnoria-Plugins | b8146831a61f62f1e5f1196b8c1915a65c608124 | [
"MIT"
] | null | null | null | Bible/test.py | LeonelF/Limnoria-Plugins | b8146831a61f62f1e5f1196b8c1915a65c608124 | [
"MIT"
] | null | null | null | Bible/test.py | LeonelF/Limnoria-Plugins | b8146831a61f62f1e5f1196b8c1915a65c608124 | [
"MIT"
] | null | null | null | ###
# Copyright (c) 2017, Weasel
# All rights reserved.
#
#
###
from supybot.test import *
class CoinTestCase(PluginTestCase):
plugins = ('Bible',)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| 13.375 | 56 | 0.682243 |
c9cddca8d183dff3cf847b2a59d6a52931514932 | 1,102 | py | Python | portfolio/urls.py | lucasLB7/LucasPortfiolio | 6579c0622f1e071d41b497561e4261605978ffc6 | [
"Unlicense"
] | null | null | null | portfolio/urls.py | lucasLB7/LucasPortfiolio | 6579c0622f1e071d41b497561e4261605978ffc6 | [
"Unlicense"
] | 4 | 2020-02-12T01:07:27.000Z | 2021-06-08T19:10:47.000Z | portfolio/urls.py | lucasLB7/LucasPortfiolio | 6579c0622f1e071d41b497561e4261605978ffc6 | [
"Unlicense"
] | null | null | null | """portfolio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.views.generic import TemplateView
from django.contrib.auth import views
from portfolio import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('^', include('portfolio_main.urls', namespace = "main")),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 34.4375 | 80 | 0.728675 |
c0adaf6ad907c832dbeefcb4a82bce2fd2f424cf | 39,068 | py | Python | .history/src/_fighter_20190422151622.py | vidalmatheus/MK-Project | 6646020c59367ba0424d73a5861e13bbc0daac1f | [
"MIT"
] | 1 | 2019-12-25T10:25:30.000Z | 2019-12-25T10:25:30.000Z | .history/src/_fighter_20190422151622.py | vidalmatheus/MK-Project | 6646020c59367ba0424d73a5861e13bbc0daac1f | [
"MIT"
] | 1 | 2019-12-25T10:27:15.000Z | 2019-12-25T10:27:15.000Z | .history/src/_fighter_20190422151622.py | vidalmatheus/MK-Project | 6646020c59367ba0424d73a5861e13bbc0daac1f | [
"MIT"
] | 1 | 2019-12-25T10:50:05.000Z | 2019-12-25T10:50:05.000Z |
from pygame_functions import *
import fightScene
import engine
import menu
class Fighter:
fighterNames = ["Sub-Zero", "Scorpion"]
fightMoves = [["w", "s", "a", "d"], ["up", "down", "left", "right"]]
combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]]
danceLimit = 7
walkLimit = 9
jumpLimit = 3
crouchLimit = 3
punchLimit = [3, 11, 3, 5, 3]
kickLimit = [7, 9, 7, 6, 3]
hitLimit = [3, 3, 6, 2, 3, 14, 11, 10]
blockLimit = 3
specialLimit = [4,7]
victoryLimit = 3
fatalityLimit = 20
dizzyLimit = 7
# indexação
# moves
dance = 0
walk = 1
jump = 2
crouch = 3
# punches
Apunch = 4 # soco fraco
Bpunch = 5 # soco forte
Cpunch = 6 # soco agachado fraco
Dpunch = 7 # soco agachado forte: gancho
# kicks
Akick = 8 # chute fraco
Bkick = 9 # chute forte
Ckick = 10 # chute agachado fraco
Dkick = 11 # chute agachado forte: banda
# hits
Ahit = 12 # soco fraco
Bhit = 13 # chute fraco
Chit = 14 # soco forte
Dhit = 15 # chute agrachado fraco
Ehit = 16 # soco agachado fraco
Fhit = 17 # chute forte e soco forte agachado (gancho)
Ghit = 18 # chute agachado forte: banda
#Hhit = 19 # specialMove
#fatalityHit = 20 # fatality hit
# block
Ablock = 19
Bblock = 20
# special move
special = 21
# fatality
fatality = 24
def __init__(self, id, scenario):
self.fighterId = id
self.name = self.fighterNames[id]
self.move = self.fightMoves[id]
self.combat = self.combatMoves[id]
# Position
self.x = 150+id*500
if scenario == 1:
self.y = 350
elif scenario == 2:
self.y = 370
elif scenario == 3:
self.y = 400
elif scenario == 4:
self.y = 370
elif scenario == 5:
self.y = 380
elif scenario == 6:
self.y = 380
elif scenario == 7:
self.y = 360
elif scenario == 8:
self.y = 395
# Loading sprites
self.spriteList = []
# moves
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/dance.png', self.danceLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/walk.png', self.walkLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/jump.png', self.jumpLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/crouch.png', self.crouchLimit))
# Punch sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Apunch.png', self.punchLimit[0]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bpunch.png', self.punchLimit[1]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Cpunch.png', self.punchLimit[2]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dpunch.png', self.punchLimit[3]))
# Kick sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Akick.png', self.kickLimit[0]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bkick.png', self.kickLimit[1]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ckick.png', self.kickLimit[2]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dkick.png', self.kickLimit[3]))
# Hit sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ahit.png', self.hitLimit[0])) # soco fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bhit.png', self.hitLimit[1])) # chute fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Chit.png', self.hitLimit[2])) # soco forte
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dhit.png', self.hitLimit[3])) # chute agrachado fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ehit.png', self.hitLimit[4])) # soco agachado fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Fhit.png', self.hitLimit[5])) # chute forte e soco forte agachado (gancho)
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ghit.png', self.hitLimit[6])) # chute agachado forte: banda
#self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Hhit.png', self.hitLimit[7])) # specialMove
# blocking sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ablock.png', self.blockLimit)) # defesa em pé
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bblock.png', self.blockLimit)) # defesa agachado
# special sprite ----------------------------------
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Special.png', self.specialLimit[self.fighterId])) # Especial
self.act()
def act(self):
# Combat control
combat = False
block = False
alive = False
fatality = False
dizzyCounter = 1
dizzyCounterAux = 1
fatalityCounter = 8
fatalityCounterAux = 1
# Control reflection var
reflection = False
# Dance vars
self.dancing = True
self.frame_dance = 0
self.dance_step = 1
# Walk vars
self.frame_walk = 0
self.walking = False # Variável de status
# Jump vars
self.jumpHeight = 10 # Altura do pulo
self.jumpCounter = 1 # Contador correspodente à subida e descida do pulo
self.jumping = False # Variável de status
self.frame_jumping = 0
self.jump_step = 1
self.end_jump = True
# Crouch vars
self.crouching = False # Variável de status
self.frame_crouching = 0
self.crouch_step = 1
# Punch vars
self.Apunching = False
self.frame_Apunching = 0
self.Apunch_step = 1
self.end_Apunch = True
self.Bpunching = False
self.frame_Bpunching = 0
self.Bpunch_step = 1
self.end_Bpunch = True
self.Cpunching = False
self.frame_Cpunching = 0
self.Cpunch_step = 1
self.end_Cpunch = True
self.Dpunching = False
self.frame_Dpunching = 0
self.Dpunch_step = 1
self.end_Dpunch = True
# Kick vars
self.Akicking = False
self.frame_Akicking = 0
self.Akick_step = 1
self.end_Akick = True
self.Bkicking = False
self.frame_Bkicking = 0
self.Bkick_step = 1
self.end_Bkick = True
self.Ckicking = False
self.frame_Ckicking = 0
self.Ckick_step = 1
self.end_Ckick = True
self.Dkicking = False
self.frame_Dkicking = 0
self.Dkick_step = 1
self.end_Dkick = True
# Blocking vars
self.Ablocking = False
self.frame_Ablocking = 0
self.Ablock_step = 1
self.Bblocking = False
self.frame_Bblocking = 0
self.Bblock_step = 1
# Special vars
self.specialMove = False
self.end_special = True
self.frame_special = 0
self.special_step = 1
# Hit vars
self.hit = False
self.downHit = False
self.hitName = ""
self.Ahitting = False
self.Bhitting = False
self.Chitting = False
self.Dhitting = False
self.Ehitting = False
self.Fhitting = False
self.Ghitting = False
self.Hhitting = False
self.frame_Ahit = 0
self.frame_Bhit = 0
self.frame_Chit = 0
self.frame_Dhit = 0
self.frame_Ehit = 0
self.frame_Fhit = 0
self.frame_Ghit = 0
self.frame_Hhit = 0
self.hit_step = 1
# Life Vars
X_inicio = 37
X_atual = X_inicio
X_fim = X_inicio + 327
self.posFighter()
def fight(self, time, nextFrame):
frame_step = 60
if not self.jumping:
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> jump
if keyPressed(self.move[0]) and not self.hit:
self.jumping = True
self.end_jump = False
self.curr_sprite = self.spriteList[self.jump]
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> right
elif keyPressed(self.move[3]) and not self.hit:
self.curr_sprite = self.spriteList[self.walk]
self.walking = self.setState()
self.setEndState()
self.x += 6
moveSprite(self.spriteList[self.walk], self.x, self.y, True)
self.setSprite(self.spriteList[self.walk])
changeSpriteImage(self.spriteList[self.walk], self.frame_walk)
if time > nextFrame:
# There are 9 frames of animation in each direction
self.frame_walk = (self.frame_walk+1) % self.walkLimit
# so the modulus 9 allows it to loop
nextFrame += frame_step
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> left
elif keyPressed(self.move[2]) and not self.hit:# SEGUNDA MUDANÇA and not self.jumping:
self.curr_sprite = self.spriteList[self.walk]
self.walking = self.setState()
self.setEndState()
self.x -= 6
moveSprite(self.spriteList[self.walk], self.x, self.y, True)
self.setSprite(self.spriteList[self.walk])
changeSpriteImage(self.spriteList[self.walk], self.walkLimit-1-self.frame_walk)
if time > nextFrame:
# There are 9 frames of animation in each direction
self.frame_walk = (self.frame_walk+1) % self.walkLimit
nextFrame += frame_step
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> crouch
elif (keyPressed(self.move[1]) and not self.hit) or self.downHit:
if self.end_Cpunch and self.end_Dpunch and self.end_Ckick and self.end_Dkick and not self.hit and not self.downHit and not self.Bblocking:
self.curr_sprite = self.spriteList[self.crouch]
self.crouching = self.setState()
self.setEndState()
if time > nextFrame:
if self.end_Cpunch and self.end_Dpunch and self.end_Ckick and self.end_Dkick and not self.hit and not self.downHit and not self.Bblocking:
moveSprite(self.spriteList[self.crouch], self.x, self.y, True)
self.setSprite(self.spriteList[self.crouch])
changeSpriteImage(self.spriteList[self.crouch], self.frame_crouching)
self.frame_crouching = (self.frame_crouching+self.crouch_step) % self.crouchLimit
if self.frame_crouching == self.crouchLimit - 2:
self.crouch_step = 0
print("frame_Bblocking =",self.frame_Bblocking)
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> crouch and jab
if ( (keyPressed(self.combat[0]) and self.end_Cpunch) or (not self.end_Cpunch) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Cpunch]
self.Cpunching = self.setState()
self.setEndState()
self.end_Cpunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Cpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Cpunch])
changeSpriteImage(self.spriteList[self.Cpunch], self.frame_Cpunching)
self.frame_Cpunching = (self.frame_Cpunching+self.Cpunch_step) % (self.punchLimit[2]+1)
if (self.frame_Cpunching == self.punchLimit[2]-1):
self.Cpunch_step = -1
if (self.frame_Cpunching == self.punchLimit[2]):
self.frame_Cpunching = 0
self.Cpunch_step = 1
self.end_Cpunch = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> crouch and strong punch
elif ( (keyPressed(self.combat[1]) and self.end_Dpunch) or ( not self.end_Dpunch) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Dpunch]
self.Dpunching = self.setState()
self.setEndState()
self.end_Dpunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Dpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dpunch])
changeSpriteImage(self.spriteList[self.Dpunch], self.frame_Dpunching)
self.frame_Dpunching = (self.frame_Dpunching+self.Dpunch_step) % (self.punchLimit[3]+1)
if (self.frame_Dpunching == self.punchLimit[3]-1):
self.Dpunch_step = -1
if (self.frame_Dpunching == self.punchLimit[3]):
self.frame_Dpunching = 0
self.Dpunch_step = 1
self.end_Dpunch = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> crouch and kick
elif ( (keyPressed(self.combat[2]) and self.end_Ckick) or ( not self.end_Ckick) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Ckick]
self.Ckicking = self.setState()
self.end_Ckick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ckick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ckick])
changeSpriteImage(self.spriteList[self.Ckick], self.frame_Ckicking)
self.frame_Ckicking = (self.frame_Ckicking+self.Ckick_step) % (self.kickLimit[2]+1)
if (self.frame_Ckicking == self.kickLimit[2]-1):
self.Ckick_step = -1
if (self.frame_Ckicking == self.kickLimit[2]):
self.frame_Ckicking = 0
self.Ckick_step = 1
self.end_Ckick = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> Crouch and strong kick
elif ( (keyPressed(self.combat[3]) and self.end_Dkick) or ( not self.end_Dkick) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Dkick]
self.Dkicking = self.setState()
self.end_Dkick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Dkick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dkick])
changeSpriteImage(self.spriteList[self.Dkick], self.frame_Dkicking)
self.frame_Dkicking = (self.frame_Dkicking+self.Dkick_step) % self.kickLimit[3]
if (self.frame_Dkicking == 0):
self.end_Dkick = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> defesa agachado
elif keyPressed(self.combat[5]) and not self.hit and not self.downHit:
self.curr_sprite = self.spriteList[self.Bblock]
self.Bblocking = self.setState()
self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bblock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bblock])
changeSpriteImage(self.spriteList[self.Bblock], self.frame_Bblocking)
self.frame_Bblocking = (self.frame_Bblocking+self.Bblock_step) % self.blockLimit
if self.frame_Bblocking == self.blockLimit - 2:
self.Bblock_step = 0
#--------------Hit em agachado--------------------
#Hhit = 19 # specialMove
#BblockHit = 21 hit agachado
#Ehit = 16 # chute ou soco agachado fraco
elif self.downHit and self.hitName == "Ehit":
self.curr_sprite = self.spriteList[self.Ehit]
self.Ehitting = self.setState()
self.crouching = True
moveSprite(self.spriteList[self.Ehit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ehit])
changeSpriteImage(self.spriteList[self.Ehit], self.frame_Ehit)
if time > nextFrame:
self.frame_Ehit = (self.frame_Ehit+self.hit_step) % self.hitLimit[4]
if (self.frame_Ehit == self.hitLimit[4] - 1):
self.hit_step = -1
if (self.frame_Ehit == 0):
self.hit_step = 1
self.downHit = False
#BblockHit = 21 hit agachado
elif (self.downHit or self.hit) and self.hitName == "Bblocking":
self.curr_sprite = self.spriteList[self.Bblock]
self.Bblocking = self.setState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bblock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bblock])
changeSpriteImage(self.spriteList[self.Bblock], self.frame_Bblocking)
self.frame_Bblocking = (self.frame_Bblocking+self.hit_step) % self.blockLimit
if self.frame_Bblocking == self.blockLimit - 1:
self.hit_step = -1
if self.frame_Bblocking == 1:
self.hit_step = 1
self.hit = False
self.downHit = False
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> jab
elif ((keyPressed(self.combat[0]) and self.end_Apunch) or ( not self.end_Apunch) ) and (not self.hit) :
print("flag!")
self.curr_sprite = self.spriteList[self.Apunch]
self.Apunching = self.setState()
self.setEndState()
self.end_Apunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Apunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Apunch])
changeSpriteImage(self.spriteList[self.Apunch], self.frame_Apunching)
self.frame_Apunching = (self.frame_Apunching+self.Apunch_step) % (self.punchLimit[0]+1)
if (self.frame_Apunching == self.punchLimit[0]-1):
self.Apunch_step = -1
if (self.frame_Apunching == self.punchLimit[0]):
self.frame_Apunching = 0
self.Apunch_step = 1
self.end_Apunch = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> strong punch
elif ( (keyPressed(self.combat[1]) and self.end_Bpunch) or ( not self.end_Bpunch) ) and (not self.hit) :
self.curr_sprite = self.spriteList[self.Bpunch]
self.Bpunching = self.setState()
self.end_Bpunch = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bpunch])
changeSpriteImage(self.spriteList[self.Bpunch], self.frame_Bpunching)
self.frame_Bpunching = (self.frame_Bpunching+self.Bpunch_step) % self.punchLimit[1]
if (self.frame_Bpunching == 0):
self.end_Bpunch = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> kick
elif ( (keyPressed(self.combat[2]) and self.end_Akick) or ( not self.end_Akick) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Akick]
self.Akicking = self.setState()
self.end_Akick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Akick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Akick])
changeSpriteImage(self.spriteList[self.Akick], self.frame_Akicking)
self.frame_Akicking = (self.frame_Akicking+self.Akick_step) % (self.kickLimit[0]+1)
if (self.frame_Akicking == self.kickLimit[0]-1):
self.Akick_step = -1
if (self.frame_Akicking == self.kickLimit[0]):
self.frame_Akicking = 0
self.Akick_step = 1
self.end_Akick = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> strong kick
elif ( (keyPressed(self.combat[3]) and self.end_Bkick) or ( not self.end_Bkick) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Bkick]
self.Bkicking = self.setState()
self.end_Bkick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bkick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bkick])
changeSpriteImage(self.spriteList[self.Bkick], self.frame_Bkicking)
self.frame_Bkicking = (self.frame_Bkicking+self.Bkick_step) % self.kickLimit[1]
if (self.frame_Bkicking == 0):
self.end_Bkick = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> defesa em pé
elif keyPressed(self.combat[5]) and not self.hit:
self.curr_sprite = self.spriteList[self.Ablock]
self.Ablocking = self.setState()
self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ablock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ablock])
changeSpriteImage(self.spriteList[self.Ablock], self.frame_Ablocking)
self.frame_Ablocking = (self.frame_Ablocking+self.Ablock_step) % self.blockLimit
if self.frame_Ablocking == self.blockLimit - 2:
self.Ablock_step = 0
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> special move
elif ((keyPressed(self.combat[4]) and self.end_special) or ( not self.end_special) ) and (not self.hit):
print("SpecialMove")
self.curr_sprite = self.spriteList[self.special]
self.specialMove = self.setState()
self.setEndState()
self.end_special = False
if time > nextFrame:
moveSprite(self.spriteList[self.special], self.x, self.y, True)
self.setSprite(self.spriteList[self.special])
changeSpriteImage(self.spriteList[self.special], self.frame_special)
self.frame_special = (self.frame_special+self.special_step) % (self.specialLimit[self.fighterId]+1)
if (self.frame_special == self.specialLimit[self.fighterId]-1):
self.special_step = -1
if (self.frame_special == self.specialLimit[self.fighterId]):
self.frame_special = 0
self.special_step = 1
self.end_special = True
nextFrame += 1*frame_step
# just dance :)
elif not self.hit:
# reset block (hold type)
self.frame_Ablocking = 0
self.Ablock_step = 1
self.frame_Bblocking = 0
self.Bblock_step = 1
# reset down (hold type)
self.frame_crouching = 0
self.crouch_step = 1
# reset other movement
self.frame_walk = self.frame_jumping = 0
# reset combat frames
self.frame_Apunching = self.frame_Bpunching = self.frame_Cpunching = self.frame_Dpunching = self.frame_Akicking = self.frame_Bkicking = self.frame_Ckicking = self.frame_Dkicking = 0
self.setEndState()
# start to dance
self.curr_sprite = self.spriteList[self.dance]
self.dancing = self.setState()
if time > nextFrame:
moveSprite(self.spriteList[self.dance], self.x, self.y, True)
self.setSprite(self.spriteList[self.dance])
changeSpriteImage(self.spriteList[self.dance], self.frame_dance)
self.frame_dance = (self.frame_dance+self.dance_step) % self.danceLimit
if (self.frame_dance == self.danceLimit-1):
self.dance_step = -1
if (self.frame_dance == 0):
self.dance_step = 1
nextFrame += frame_step
#--------------Hit em pé--------------------
#Hhit = 19 # specialMove
#BblockHit = 21 hit agachado
# Ouch! Punch on a face (Ahit = 12 # soco fraco)
elif self.hit and self.hitName == "Apunching":
self.curr_sprite = self.spriteList[self.Ahit]
self.Ahitting = self.setState()
moveSprite(self.spriteList[self.Ahit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ahit])
changeSpriteImage(self.spriteList[self.Ahit], self.frame_Ahit)
if time > nextFrame:
self.frame_Ahit = (self.frame_Ahit+self.hit_step) % self.hitLimit[0]
if (self.frame_Ahit == self.hitLimit[0] - 1):
self.hit_step = -1
if (self.frame_Ahit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
# Ouch! kick on a face (Bhit = 13 # chute fraco)
elif self.hit and self.hitName == "Akicking":
self.curr_sprite = self.spriteList[self.Bhit]
self.Bhitting = self.setState()
if self.fighterId == 0:
self.x -=0.8
else: self.x +=0.8
moveSprite(self.spriteList[self.Bhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bhit])
changeSpriteImage(self.spriteList[self.Bhit], self.frame_Bhit)
if time > nextFrame:
# There are 8 frames of animation in each direction
self.frame_Bhit = (self.frame_Bhit+self.hit_step) % self.hitLimit[1]
if (self.frame_Bhit == self.hitLimit[1] - 1):
self.hit_step = -1
if (self.frame_Bhit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
# Ouch! combo punch (Chit = 14 # soco forte)
elif self.hit and self.hitName == "Bpunching":
self.curr_sprite = self.spriteList[self.Chit]
self.Chitting = self.setState()
if self.fighterId == 0:
self.x -=2
else: self.x +=2
moveSprite(self.spriteList[self.Chit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Chit])
changeSpriteImage(self.spriteList[self.Chit], self.frame_Chit)
if time > nextFrame:
self.frame_Chit = (self.frame_Chit+self.hit_step) % self.hitLimit[2]
if (self.frame_Chit == self.hitLimit[2] - 1):
self.hit_step = -1
if (self.frame_Chit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
#Dhit = 15 # soco agrachado fraco
elif self.hit and self.hitName == "Cpunching":
self.curr_sprite = self.spriteList[self.Dhit]
self.Dhitting = self.setState()
moveSprite(self.spriteList[self.Dhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dhit])
changeSpriteImage(self.spriteList[self.Dhit], self.frame_Dhit)
if time > nextFrame:
self.frame_Dhit = (self.frame_Dhit+self.hit_step) % self.hitLimit[3]
if (self.frame_Dhit == self.hitLimit[3] - 1):
self.hit_step = -1
if (self.frame_Dhit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
#Fhit = 17 # chute forte e soco forte agachado (gancho)
elif self.hit and self.hitName == "Bkicking":
self.curr_sprite = self.spriteList[self.Fhit]
self.Fhitting = self.setState()
if self.frame_Fhit <= 6:
if self.fighterId == 0:
self.x -=5
else: self.x +=5
moveSprite(self.spriteList[self.Fhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Fhit])
changeSpriteImage(self.spriteList[self.Fhit], self.frame_Fhit)
if time > nextFrame:
self.frame_Fhit = (self.frame_Fhit+self.hit_step) % self.hitLimit[5]
if (self.frame_Fhit == self.hitLimit[5] - 1):
self.hit = False
nextFrame += 1.2*frame_step
#Ghit = 18 # chute agachado forte: banda
elif self.hit and self.hitName == "Dkicking":
self.curr_sprite = self.spriteList[self.Ghit]
self.Ghitting = self.setState()
moveSprite(self.spriteList[self.Ghit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ghit])
changeSpriteImage(self.spriteList[self.Ghit], self.frame_Ghit)
if time > nextFrame:
self.frame_Ghit = (self.frame_Ghit+self.hit_step) % self.hitLimit[6]
if (self.frame_Ghit == self.hitLimit[6] - 1):
self.hit = False
nextFrame += 1.2*frame_step
#blockHit! Defesa em pé.
elif self.hit and self.hitName == "Ablocking":
self.curr_sprite = self.spriteList[self.Ablock]
self.Ablocking = self.setState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ablock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ablock])
changeSpriteImage(self.spriteList[self.Ablock], self.frame_Ablocking)
self.frame_Ablocking = (self.frame_Ablocking+self.hit_step) % self.blockLimit
if self.frame_Ablocking == self.blockLimit - 1:
self.hit_step = -1
if self.frame_Ablocking == 1:
self.hit_step = 1
self.hit = False
nextFrame += 1*frame_step
else:
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> jump
if time > nextFrame:
if keyPressed(self.move[2]):
self.x -= 15
if keyPressed(self.move[3]):
self.x += 15
moveSprite(self.spriteList[self.jump], self.x, self.y, True)
self.setSprite(self.spriteList[self.jump])
self.y -= (self.jumpHeight-self.jumpCounter)*7
changeSpriteImage(self.spriteList[self.jump], self.frame_jumping)
if (self.jumpCounter < self.jumpHeight -1 or self.jumpCounter > self.jumpHeight +1): # subindo ou descendo
self.frame_jumping = 1
if (self.jumpHeight - 1 <= self.jumpCounter <= self.jumpHeight + 1): # quase parado
self.frame_jumping = 2
if (self.jumpCounter == 2*self.jumpHeight-1):
self.frame_jumping = 0
self.jumpCounter = -1
if clock() > nextFrame:
self.setSprite(self.spriteList[self.jump])
changeSpriteImage(self.spriteList[self.jump], self.frame_jumping)
moveSprite(self.spriteList[self.jump], self.x, self.y, True)
self.end_jump = self.setState()# MUDANÇA
self.jumping = self.setEndState() #MUDANÇA
self.jumpCounter += 2
nextFrame += 1*frame_step
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
tick(120)
return nextFrame
def getX(self):
return self.x
def getY(self):
return self.y
def setX(self,X):
self.x = X
moveSprite(self.curr_sprite,self.x,self.y,True)
def setY(self,Y):
self.y = Y
moveSprite(self.curr_sprite,self.x,self.y,True)
def isWalking(self):
return self.walking
def isCrouching(self):
return self.crouching
def isDancing(self):
return self.dancing
def isApunching(self):
return self.Apunching
def isBpunching(self):
return self.Bpunching
def isCpunching(self):
return self.Cpunching
def isDpunching(self):
return self.Dpunching
def isAkicking(self):
return self.Akicking
def isBkicking(self):
return self.Bkicking
def isCkicking(self):
return self.Ckicking
def isDkicking(self):
return self.Dkicking
def isAblocking(self):
return self.Ablocking
def isBblocking(self):
return self.Bblocking
def isHit(self):
return self.hit
def killPlayer(self):
for i in range(0,len(self.spriteList)):
killSprite(self.spriteList[i])
def currentSprite(self):
return self.curr_sprite
def takeHit(self,by):
self.hit = True
self.hitName = by
def takeDownHit(self,by):
self.downHit = True
print("flag")
self.hitName = by
def stopHit(self):
self.hit = False
self.hitName = ""
def setState(self):
# moves
self.walking = False
self.dancing = False
self.jumping = False
self.crouching = False
# punches
self.Apunching = False
self.Bpunching = False
self.Cpunching = False
self.Dpunching = False
# kicks
self.Akicking = False
self.Bkicking = False
self.Ckicking = False
self.Dkicking = False
# punch hits
self.Ahitting = False
self.Bhitting = False
self.Chitting = False
self.Dhitting = False
self.Ehitting = False
self.Fhitting = False
self.Ghitting = False
self.Hhitting = False
# blocks
self.Ablocking = False
self.Bblocking = False
# special move
self.specialMove = False
# fatality
self.fatality = False
# actual states
return True
def setEndState(self):
self.end_jump = True
self.end_Apunch = True
self.end_Bpunch = True
self.end_Cpunch = True
self.end_Dpunch = True
self.end_Akick = True
self.end_Bkick = True
self.end_Ckick = True
self.end_Dkick = True
self.end_special = True
return False
def setSprite(self,sprite):
for i in range(0,len(self.spriteList)):
if (not sprite == self.spriteList[i]):
hideSprite(self.spriteList[i])
showSprite(sprite)
def posFighter(self):
for i in range(0,len(self.spriteList)):
moveSprite(self.spriteList[i], self.x, self.y, True) | 47.013237 | 197 | 0.511595 |
4f8885b8ba9f087163547475db7c935c339cb0ec | 17,721 | py | Python | tests/integration-test/test_cis_splice_effects_identify.py | davidaknowles/regtools | 08042ef017a9606d40618c641044fd1c61982203 | [
"MIT"
] | null | null | null | tests/integration-test/test_cis_splice_effects_identify.py | davidaknowles/regtools | 08042ef017a9606d40618c641044fd1c61982203 | [
"MIT"
] | null | null | null | tests/integration-test/test_cis_splice_effects_identify.py | davidaknowles/regtools | 08042ef017a9606d40618c641044fd1c61982203 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
test_cis_splice_effects_identify.py -- Integration test for `regtools cis-splice-effects identify`
Copyright (c) 2015, The Griffith Lab
Author: Avinash Ramu <aramu@genome.wustl.edu>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
'''
from integrationtest import IntegrationTest, main
import unittest
class TestCisSpliceEffectsIdentify(IntegrationTest, unittest.TestCase):
#Test default options (but with RF strandedness).
def test_default_stranded(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
bam1 = self.inputFiles("bam/test_hcc1395.2.bam")[0]
fasta = self.inputFiles("fa/test_chr22.fa")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_annotatedjunctions = self.tempFile("observed-cse-identify.out")
output_annotatedvariants = self.tempFile("observed-cse-identify-variants.out")
output_junctions = self.tempFile("observed-cse-identify-junctions.out")
expected_annotatedjunctions = self.inputFiles("cis-splice-effects-identify/expected-cis-splice-effects-identify-default-stranded-annotatedjunctions.out")[0]
expected_annotatedvariants = self.inputFiles("cis-splice-effects-identify/expected-cis-splice-effects-identify-default-stranded-annotatedvariants.out")[0]
expected_junctions = self.inputFiles("cis-splice-effects-identify/expected-cis-splice-effects-identify-default-stranded-junctions.out")[0]
params = ["cis-splice-effects", "identify", "-s 1",
"-o ", output_annotatedjunctions,
"-v ", output_annotatedvariants,
"-j ", output_junctions,
variants, bam1, fasta, gtf]
rv, err = self.execute(params)
self.assertEqual(rv, 0, err)
self.assertFilesEqual(expected_annotatedjunctions, output_annotatedjunctions, err)
self.assertFilesEqual(expected_annotatedvariants, output_annotatedvariants, err)
self.assertFilesEqual(expected_junctions, output_junctions, err)
#Test default options (but with unstranded).
def test_default(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
bam1 = self.inputFiles("bam/test_hcc1395.2.bam")[0]
fasta = self.inputFiles("fa/test_chr22.fa")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_annotatedjunctions = self.tempFile("observed-cse-identify.out")
output_annotatedvariants = self.tempFile("observed-cse-identify-variants.out")
output_junctions = self.tempFile("observed-cse-identify-junctions.out")
expected_annotatedjunctions = self.inputFiles("cis-splice-effects-identify/expected-cis-splice-effects-identify-default-annotatedjunctions.out")[0]
expected_annotatedvariants = self.inputFiles("cis-splice-effects-identify/expected-cis-splice-effects-identify-default-annotatedvariants.out")[0]
expected_junctions = self.inputFiles("cis-splice-effects-identify/expected-cis-splice-effects-identify-default-junctions.out")[0]
params = ["cis-splice-effects", "identify", "-s 0",
"-o ", output_annotatedjunctions,
"-v ", output_annotatedvariants,
"-j ", output_junctions,
variants, bam1, fasta, gtf]
rv, err = self.execute(params)
self.assertEqual(rv, 0, err)
self.assertFilesEqual(expected_annotatedjunctions, output_annotatedjunctions, err)
self.assertFilesEqual(expected_annotatedvariants, output_annotatedvariants, err)
self.assertFilesEqual(expected_junctions, output_junctions, err)
#Test -h works as expected
def test_help(self):
params = ["cis-splice-effects", "identify", "-h "]
rv, err = self.execute(params)
self.assertEqual(rv, 0, err)
#Test missing input
def test_nobam(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
fasta = self.inputFiles("fa/test_chr22.fa")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_file = self.tempFile("observed-cse-identify.out")
params = ["cis-splice-effects", "identify",
"-o ", output_file, variants, fasta, gtf]
rv, err = self.execute(params)
self.assertEqual(rv, 1, err)
#Test variants annotates params
def test_exonic_intronic_distance(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_file = self.tempFile("observed-annotate.vcf")
variants = self.inputFiles("vcf/test1.vcf")[0]
bam1 = self.inputFiles("bam/test_hcc1395.2.bam")[0]
fasta = self.inputFiles("fa/test_chr22.fa")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_annotatedjunctions = self.tempFile("observed-cse-identify.out")
output_annotatedvariants = self.tempFile("observed-cse-identify-variants.out")
output_junctions = self.tempFile("observed-cse-identify-junctions.out")
exonic_distance = "-e 6"
intronic_distance = "-i 6"
dont_skip_single_exon_transcripts = "-S"
params = ["cis-splice-effects", "identify",
exonic_distance,
intronic_distance,
dont_skip_single_exon_transcripts,
"-s 0",
"-o ", output_annotatedjunctions,
"-v ", output_annotatedvariants,
"-j ", output_junctions,
variants, bam1, fasta, gtf]
rv, err = self.execute(params)
self.assertEqual(rv, 0, err)
#self.assertFilesEqual(expected_file, output_file, err)
def test_allexonic(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_file = self.tempFile("observed-annotate.vcf")
variants = self.inputFiles("vcf/test1.vcf")[0]
bam1 = self.inputFiles("bam/test_hcc1395.2.bam")[0]
fasta = self.inputFiles("fa/test_chr22.fa")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_annotatedjunctions = self.tempFile("observed-cse-identify.out")
output_annotatedvariants = self.tempFile("observed-cse-identify-variants.out")
output_junctions = self.tempFile("observed-cse-identify-junctions.out")
exonic_distance = "-E"
intronic_distance = ""
dont_skip_single_exon_transcripts = ""
params = ["cis-splice-effects", "identify",
exonic_distance,
intronic_distance,
dont_skip_single_exon_transcripts,
"-s 0",
"-o ", output_annotatedjunctions,
"-v ", output_annotatedvariants,
"-j ", output_junctions,
variants, bam1, fasta, gtf]
rv, err = self.execute(params)
self.assertEqual(rv, 0, err)
#self.assertFilesEqual(expected_file, output_file, err)
def test_allintronic(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_file = self.tempFile("observed-annotate.vcf")
variants = self.inputFiles("vcf/test1.vcf")[0]
bam1 = self.inputFiles("bam/test_hcc1395.2.bam")[0]
fasta = self.inputFiles("fa/test_chr22.fa")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_annotatedjunctions = self.tempFile("observed-cse-identify.out")
output_annotatedvariants = self.tempFile("observed-cse-identify-variants.out")
output_junctions = self.tempFile("observed-cse-identify-junctions.out")
exonic_distance = ""
intronic_distance = "-I"
dont_skip_single_exon_transcripts = ""
params = ["cis-splice-effects", "identify",
exonic_distance,
intronic_distance,
dont_skip_single_exon_transcripts,
"-s 0",
"-o ", output_annotatedjunctions,
"-v ", output_annotatedvariants,
"-j ", output_junctions,
variants, bam1, fasta, gtf]
rv, err = self.execute(params)
self.assertEqual(rv, 0, err)
#self.assertFilesEqual(expected_file, output_file, err)
def test_allexonic_someintronic(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_file = self.tempFile("observed-annotate.vcf")
variants = self.inputFiles("vcf/test1.vcf")[0]
bam1 = self.inputFiles("bam/test_hcc1395.2.bam")[0]
fasta = self.inputFiles("fa/test_chr22.fa")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_annotatedjunctions = self.tempFile("observed-cse-identify.out")
output_annotatedvariants = self.tempFile("observed-cse-identify-variants.out")
output_junctions = self.tempFile("observed-cse-identify-junctions.out")
exonic_distance = "-E"
intronic_distance = "-i 6"
dont_skip_single_exon_transcripts = ""
params = ["cis-splice-effects", "identify",
exonic_distance,
intronic_distance,
dont_skip_single_exon_transcripts,
"-s 0",
"-o ", output_annotatedjunctions,
"-v ", output_annotatedvariants,
"-j ", output_junctions,
variants, bam1, fasta, gtf]
rv, err = self.execute(params)
self.assertEqual(rv, 0, err)
#self.assertFilesEqual(expected_file, output_file, err)
def test_allintronic_someexonic(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_file = self.tempFile("observed-annotate.vcf")
variants = self.inputFiles("vcf/test1.vcf")[0]
bam1 = self.inputFiles("bam/test_hcc1395.2.bam")[0]
fasta = self.inputFiles("fa/test_chr22.fa")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_annotatedjunctions = self.tempFile("observed-cse-identify.out")
output_annotatedvariants = self.tempFile("observed-cse-identify-variants.out")
output_junctions = self.tempFile("observed-cse-identify-junctions.out")
exonic_distance = "-e 6"
intronic_distance = "-I"
dont_skip_single_exon_transcripts = ""
params = ["cis-splice-effects", "identify",
exonic_distance,
intronic_distance,
dont_skip_single_exon_transcripts,
"-s 0",
"-o ", output_annotatedjunctions,
"-v ", output_annotatedvariants,
"-j ", output_junctions,
variants, bam1, fasta, gtf]
rv, err = self.execute(params)
self.assertEqual(rv, 0, err)
#self.assertFilesEqual(expected_file, output_file, err)
#Test junctions extract params
def test_anchor_stranded(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_file = self.tempFile("observed-annotate.vcf")
variants = self.inputFiles("vcf/test1.vcf")[0]
bam1 = self.inputFiles("bam/test_hcc1395.2.bam")[0]
fasta = self.inputFiles("fa/test_chr22.fa")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_annotatedjunctions = self.tempFile("observed-cse-identify.out")
output_annotatedvariants = self.tempFile("observed-cse-identify-variants.out")
output_junctions = self.tempFile("observed-cse-identify-junctions.out")
print "BAM1 is ", bam1
for anchor in ["", "30"]:
if anchor != "":
anchor = "-a " + anchor
params = ["cis-splice-effects", "identify",
anchor,
"-s 0",
"-o ", output_annotatedjunctions,
"-v ", output_annotatedvariants,
"-j ", output_junctions,
variants, bam1, fasta, gtf]
rv, err = self.execute(params)
self.assertEqual(rv, 0)
#self.assertFilesEqual(expected_file, output_file)
def test_anchor(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_file = self.tempFile("observed-annotate.vcf")
variants = self.inputFiles("vcf/test1.vcf")[0]
bam1 = self.inputFiles("bam/test_hcc1395.2.bam")[0]
fasta = self.inputFiles("fa/test_chr22.fa")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_annotatedjunctions = self.tempFile("observed-cse-identify.out")
output_annotatedvariants = self.tempFile("observed-cse-identify-variants.out")
output_junctions = self.tempFile("observed-cse-identify-junctions.out")
print "BAM1 is ", bam1
for anchor in ["", "30"]:
if anchor != "":
anchor = "-a " + anchor
params = ["cis-splice-effects", "identify",
anchor,
"-s 0",
"-o ", output_annotatedjunctions,
"-v ", output_annotatedvariants,
"-j ", output_junctions,
variants, bam1, fasta, gtf]
rv, err = self.execute(params)
self.assertEqual(rv, 0)
#self.assertFilesEqual(expected_file, output_file)
def test_intron_size(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_file = self.tempFile("observed-annotate.vcf")
variants = self.inputFiles("vcf/test1.vcf")[0]
bam1 = self.inputFiles("bam/test_hcc1395.2.bam")[0]
fasta = self.inputFiles("fa/test_chr22.fa")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_annotatedjunctions = self.tempFile("observed-cse-identify.out")
output_annotatedvariants = self.tempFile("observed-cse-identify-variants.out")
output_junctions = self.tempFile("observed-cse-identify-junctions.out")
min_intron = "8039"
max_intron = "8039"
params = ["cis-splice-effects", "identify",
"-m", min_intron, "-M", max_intron,
"-s 0",
variants, bam1, fasta, gtf]
rv, err = self.execute(params)
self.assertEqual(rv, 0)
#self.assertFilesEqual(expected_file, output_file)
def test_missing_bam(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_file = self.tempFile("observed-annotate.vcf")
variants = self.inputFiles("vcf/test1.vcf")[0]
bam1 = "does_not_exist.bam"
fasta = self.inputFiles("fa/test_chr22.fa")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_file = self.tempFile("extract.out")
params = ["cis-splice-effects", "identify",
"-s 0",
variants, bam1, fasta, gtf]
rv, err = self.execute(params)
self.assertEqual(rv, 1)
def test_no_gtf(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
output_file = self.tempFile("observed-annotate.vcf")
variants = self.inputFiles("vcf/test1.vcf")[0]
bam1 = self.inputFiles("bam/test_hcc1395.2.bam")[0]
fasta = self.inputFiles("fa/test_chr22.fa")[0]
output_file = self.tempFile("extract.out")
params = ["cis-splice-effects", "identify",
"-s 0",
variants, bam1, fasta]
rv, err = self.execute(params)
self.assertEqual(rv, 1)
def test_window_size(self):
variants = self.inputFiles("vcf/test1.vcf")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_file = self.tempFile("observed-annotate.vcf")
variants = self.inputFiles("vcf/test1.vcf")[0]
bam1 = self.inputFiles("bam/test_hcc1395.2.bam")[0]
fasta = self.inputFiles("fa/test_chr22.fa")[0]
gtf = self.inputFiles("gtf/test_ensemble_chr22.2.gtf")[0]
output_file = self.tempFile("extract.out")
params = ["cis-splice-effects", "identify",
"-s 0",
"-w 5",
variants, bam1, fasta, gtf]
rv, err = self.execute(params)
self.assertEqual(rv, 0)
if __name__ == "__main__":
main()
| 50.34375 | 164 | 0.635404 |
7f88cf59726e2d42283047c4c36db84794a31356 | 2,334 | py | Python | setup.py | wesinator/synapse | 05bdf27346f259159251c2a36d88ac87f8c09ed2 | [
"Apache-2.0"
] | null | null | null | setup.py | wesinator/synapse | 05bdf27346f259159251c2a36d88ac87f8c09ed2 | [
"Apache-2.0"
] | null | null | null | setup.py | wesinator/synapse | 05bdf27346f259159251c2a36d88ac87f8c09ed2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.install import install
VERSION = '2.7.1'
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = 'verify that the git tag matches our version'
def run(self):
tag = os.getenv('CIRCLE_TAG', '')
tag = tag.lstrip('v')
if tag != VERSION:
info = f"Git tag: {tag} does not match the version of this app: {VERSION}"
sys.exit(info)
setup(
name='synapse',
version=VERSION,
description='Synapse Intelligence Analysis Framework',
author='The Vertex Project LLC',
author_email='synapse@vertex.link',
url='https://github.com/vertexproject/synapse',
license='Apache License 2.0',
packages=find_packages(exclude=['scripts',
]),
include_package_data=True,
install_requires=[
'pyOpenSSL>=16.2.0,<20.0.0',
'msgpack>=0.6.1,<0.7.0',
'xxhash>=1.0.1,<2.0.0',
'lmdb>=0.98,<1.0.0',
'tornado>=6.0.3,<7.0.0',
'regex>=2020.5.14',
'PyYAML>=5.1,<6.0',
'aiohttp>=3.5.4,<4.0',
'prompt-toolkit>=3.0.4,<3.1.0',
'lark-parser>=0.9.0,<0.10.0',
'Pygments>=2.6.0,<2.7.0',
'packaging>=20.0,<21.0',
'fastjsonschema>=2.14.3,<2.15',
],
extras_require={
'docs': [
'sphinx>=1.8.2,<2.0.0',
'jupyter>=1.0.0,<2.0.0',
'hide-code>=0.5.2,<0.5.3',
'nbstripout>=0.3.3,<1.0.0',
'sphinx-rtd-theme>=0.4.2,<1.0.0',
],
'dev': [
'pytest>=5.0.0,<6.0.0',
'autopep8>=1.5.3,<2.0.0',
'pytest-cov>=2.9.0,<3.0.0',
'pycodestyle>=2.6.0,<3.0.0',
'bump2version>=1.0.0,<1.1.0',
'pytest-xdist>=1.32.0,<2.0.0',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Topic :: System :: Clustering',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Software Distribution',
'Programming Language :: Python :: 3.7',
],
cmdclass={
'verify': VerifyVersionCommand,
},
)
| 27.785714 | 86 | 0.525278 |
4216795d37b46daf343784139c605f6d8f0c455f | 7,189 | py | Python | ldtab_demo.py | ontodev/wiring.py | f9986f59badff1d374f656f182f92b063b8d19be | [
"BSD-3-Clause"
] | null | null | null | ldtab_demo.py | ontodev/wiring.py | f9986f59badff1d374f656f182f92b063b8d19be | [
"BSD-3-Clause"
] | null | null | null | ldtab_demo.py | ontodev/wiring.py | f9986f59badff1d374f656f182f92b063b8d19be | [
"BSD-3-Clause"
] | null | null | null | import wiring_rs
import sqlite3
import sys
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def get_types(connection, subject):
cur = connection.cursor()
query = (
f"SELECT * FROM statement WHERE "
f"subject='{subject}' AND "
f"predicate='rdf:type'"
)
types = set()
for row in cur.execute(query):
types.add(row["object"])
return types
def get_labels(connection, subject):
cur = connection.cursor()
query = (
f"SELECT * FROM statement WHERE "
f"subject='{subject}' AND "
f"predicate='rdfs:label'"
)
label = set()
for row in cur.execute(query):
label.add(row["object"])
return label
def get_types_of_signature(connection, ofn):
type_map = {}
signature = wiring_rs.get_signature(ofn)
for s in signature:
types = get_types(connection, s)
if types:
type_map[s] = types
return type_map
def get_labels_of_signature(connection, ofn):
label_map = {}
signature = wiring_rs.get_signature(ofn)
for s in signature:
labels = get_labels(connection, s)
if labels:
# just use one label (we assume labels are unique)
label_map[s] = labels.pop()
return label_map
def get_statements(connection, table, subject):
connection.row_factory = dict_factory
cur = connection.cursor()
query = f"SELECT * FROM {table} WHERE subject='{subject}'"
return cur.execute(query)
def object2rdfa(connection, table, json):
# 1. convert json to OFN (wiring.rs)
ofn = wiring_rs.object_2_ofn(json)
# 2. get types from database (wiring.py)
types = get_types_of_signature(connection, ofn)
# 3. get labels from database (wiring.py)
labels = get_labels_of_signature(connection, ofn)
# 4. typing (wiring.rs)
typed = wiring_rs.ofn_typing(ofn, types)
# NO labelling
# 5. labelling (wiring.rs)
# labeled = wiring_rs.ofn_labeling(typed, labels)
# NB: RDFa requires information about IRIs and their labels.
# So, we need to pass typed OFN S-expressions AND labels separately.
# 6. RDFa (wiring.rs)
rdfa = wiring_rs.object_2_rdfa(typed, labels)
return rdfa
def object2omn(connection, table, json):
# 1. convert json to OFN (wiring.rs)
ofn = wiring_rs.object_2_ofn(json)
# 2. get types from database (wiring.py)
types = get_types_of_signature(connection, ofn)
# 3. get labels from database (wiring.py)
labels = get_labels_of_signature(connection, ofn)
# 4. typing (wiring.rs)
typed = wiring_rs.ofn_typing(ofn, types)
# 5. labelling (wiring.rs)
labeled = wiring_rs.ofn_labeling(typed, labels)
# 6. Manchester string (wiring.rs)
man = wiring_rs.ofn_2_man(labeled)
return man
# this results in duplicate database queries
# in the case different JSON objects contain the same named entities.
# This is a common case. So, we should avoid this.
# def objects2omn(connection, table, jsons):
# mans = []
# for json in jsons:
# mans.append(object2omn(json))
# return mans
# TODO: use type hints?
# from typing import List, Set, Dict, Tuple, Optional
# -> there are differences between type hints in Python versions 3.8 and 3.9
# -> ignore for now
def objects2omn(connection, table, jsons):
ofns = []
# 1. first convert everything to ofn
for json in jsons:
ofns.append(wiring_rs.object_2_ofn(json))
# 2. get signature for all terms
signature = set()
for ofn in ofns:
signature = signature.union(wiring_rs.get_signature(ofn))
# 3. get typing information for signature
type_map = {}
for s in signature:
types = get_types(connection, s)
if types:
type_map[s] = types
# 4. get labelling information for signature
label_map = {}
for s in signature:
labels = get_labels(connection, s)
if labels:
# just use one label (we assume labels are unique)
label_map[s] = labels.pop()
# 5. typing
typed = []
for ofn in ofns:
typed.append(wiring_rs.ofn_typing(ofn, type_map))
# 6. labelling (requires correctly typed OFN S-expressions)
labelled = []
for ofn in typed:
labelled.append(wiring_rs.ofn_labeling(ofn, label_map))
# 7. Manchester
man = []
for ofn in labelled:
man.append(wiring_rs.ofn_2_man(ofn))
return man
def run_demo_objects2omn(database, subject):
con = sqlite3.connect(database, check_same_thread=False)
# create list of JSON objects
jsons = []
for row in get_statements(con, "statement", subject):
jsons.append(row["object"])
# create Manchester stings
mans = objects2omn(con, "statement", jsons)
# print them side by side
for i in range(len(jsons)):
print("===")
print(jsons[i])
print(mans[i])
print("===")
def run_demo_object2omn(database, subject):
con = sqlite3.connect(database, check_same_thread=False)
for row in get_statements(con, "statement", subject):
print(object2omn(con, "statement", row["object"]))
def run_demo_object2rdfa(database, subject):
con = sqlite3.connect(database, check_same_thread=False)
for row in get_statements(con, "statement", subject):
print(object2rdfa(con, "statement", row["object"]))
def run_demo(database, subject):
# connection to database
con = sqlite3.connect(database, check_same_thread=False)
# query for ThickTriples of the subject
for row in get_statements(con, "statement", subject):
# fetch data of a ThickTriple
subject = row["subject"]
predicate = row["predicate"]
object = row["object"]
# convert ThickTriple to an OFN S-expression
# TODO provide support for datatypes
ofn = wiring_rs.ldtab_2_ofn(subject, predicate, object)
# fetch typing information relevant for the OFN S-expression
types = get_types_of_signature(con, ofn)
# fetch labelling information relevant for the OFN S-expression
labels = get_labels_of_signature(con, ofn)
# perform typing
typed = wiring_rs.ofn_typing(ofn, types)
# perform labelling (this requires a correctly typed OFN S-expression)
labeled = wiring_rs.ofn_labeling(typed, labels)
# convert to Manchester syntax
man = wiring_rs.ofn_2_man(typed)
lab_man = wiring_rs.ofn_2_man(labeled)
print("======")
print("ThickTriple: " + str(subject) + "," + str(predicate) + "," + str(object))
print("Typed OFN: " + typed)
print("Labelled OFN: " + labeled)
print("Manchester: " + man)
print("Lab Man: " + lab_man)
print("======")
if __name__ == "__main__":
# example call for OBI:
# python ldtab_demo.py ldtab_obi.db obo:OBI_0002946
database = sys.argv[1]
subject = sys.argv[2]
# run_demo(database, subject)
# run_demo_object2omn(database, subject)
run_demo_object2rdfa(database, subject)
# run_demo_objects2omn(database, subject)
| 28.41502 | 88 | 0.648769 |
3b7dbf9079949da6e1bd2137a9d63b8be12057f7 | 14,193 | py | Python | aioredis/pubsub.py | tclarke/aioredis | d42dd9144ea376b8230786040972419b3ffab3c6 | [
"MIT"
] | null | null | null | aioredis/pubsub.py | tclarke/aioredis | d42dd9144ea376b8230786040972419b3ffab3c6 | [
"MIT"
] | null | null | null | aioredis/pubsub.py | tclarke/aioredis | d42dd9144ea376b8230786040972419b3ffab3c6 | [
"MIT"
] | null | null | null | import asyncio
import json
import types
import collections
import warnings
import sys
from .abc import AbcChannel
from .util import _converters # , _set_result
from .errors import ChannelClosedError
from .log import logger
__all__ = [
"Channel",
"EndOfStream",
"Receiver",
]
# End of pubsub messages stream marker.
EndOfStream = object()
class Channel(AbcChannel):
"""Wrapper around asyncio.Queue."""
def __init__(self, name, is_pattern, loop=None):
if loop is not None and sys.version_info >= (3, 8):
warnings.warn("The loop argument is deprecated",
DeprecationWarning)
self._queue = ClosableQueue()
self._name = _converters[type(name)](name)
self._is_pattern = is_pattern
def __repr__(self):
return "<{} name:{!r}, is_pattern:{}, qsize:{}>".format(
self.__class__.__name__,
self._name, self._is_pattern, self._queue.qsize())
@property
def name(self):
"""Encoded channel name/pattern."""
return self._name
@property
def is_pattern(self):
"""Set to True if channel is subscribed to pattern."""
return self._is_pattern
@property
def is_active(self):
"""Returns True until there are messages in channel or
connection is subscribed to it.
Can be used with ``while``:
>>> ch = conn.pubsub_channels['chan:1']
>>> while ch.is_active:
... msg = await ch.get() # may stuck for a long time
"""
return not self._queue.exhausted
async def get(self, *, encoding=None, errors=None, decoder=None):
"""Coroutine that waits for and returns a message.
:raises aioredis.ChannelClosedError: If channel is unsubscribed
and has no messages.
"""
assert decoder is None or callable(decoder), decoder
if self._queue.exhausted:
raise ChannelClosedError()
msg = await self._queue.get()
if msg is EndOfStream:
# TODO: maybe we need an explicit marker for "end of stream"
# currently, returning None may overlap with
# possible return value from `decoder`
# so the user would have to check `ch.is_active`
# to determine if its EoS or payload
return
if self._is_pattern:
dest_channel, msg = msg
if encoding is not None:
msg = msg.decode(encoding, errors)
if decoder is not None:
msg = decoder(msg)
if self._is_pattern:
return dest_channel, msg
return msg
async def get_json(self, encoding='utf-8', errors='strict'):
"""Shortcut to get JSON messages."""
return (await self.get(encoding=encoding, errors=errors, decoder=json.loads))
def iter(self, *, encoding=None, errors=None, decoder=None):
"""Same as get method but its native coroutine.
Usage example:
>>> async for msg in ch.iter():
... print(msg)
"""
return _IterHelper(self,
is_active=lambda ch: ch.is_active,
encoding=encoding,
errors=errors,
decoder=decoder)
async def wait_message(self):
"""Waits for message to become available in channel
or channel is closed (unsubscribed).
Possible usage:
>>> while (await ch.wait_message()):
... msg = await ch.get()
"""
if not self.is_active:
return False
if not self._queue.empty():
return True
await self._queue.wait()
return self.is_active
# internal methods
def put_nowait(self, data):
self._queue.put(data)
def close(self, exc=None):
"""Marks channel as inactive.
Internal method, will be called from connection
on `unsubscribe` command.
"""
if not self._queue.closed:
self._queue.close()
class _IterHelper:
__slots__ = ('_ch', '_is_active', '_args', '_kw')
def __init__(self, ch, is_active, *args, **kw):
self._ch = ch
self._is_active = is_active
self._args = args
self._kw = kw
def __aiter__(self):
return self
async def __anext__(self):
if not self._is_active(self._ch):
raise StopAsyncIteration # noqa
msg = await self._ch.get(*self._args, **self._kw)
if msg is None:
raise StopAsyncIteration # noqa
return msg
class Receiver:
"""Multi-producers, single-consumer Pub/Sub queue.
Can be used in cases where a single consumer task
must read messages from several different channels
(where pattern subscriptions may not work well
or channels can be added/removed dynamically).
Example use case:
>>> from aioredis.pubsub import Receiver
>>> from aioredis.abc import AbcChannel
>>> mpsc = Receiver()
>>> async def reader(mpsc):
... async for channel, msg in mpsc.iter():
... assert isinstance(channel, AbcChannel)
... print("Got {!r} in channel {!r}".format(msg, channel))
>>> asyncio.ensure_future(reader(mpsc))
>>> await redis.subscribe(mpsc.channel('channel:1'),
... mpsc.channel('channel:3'))
... mpsc.channel('channel:5'))
>>> await redis.psubscribe(mpsc.pattern('hello'))
>>> # publishing 'Hello world' into 'hello-channel'
>>> # will print this message:
Got b'Hello world' in channel b'hello-channel'
>>> # when all is done:
>>> await redis.unsubscribe('channel:1', 'channel:3', 'channel:5')
>>> await redis.punsubscribe('hello')
>>> mpsc.stop()
>>> # any message received after stop() will be ignored.
"""
def __init__(self, loop=None, on_close=None):
assert on_close is None or callable(on_close), (
"on_close must be None or callable", on_close)
if loop is not None:
warnings.warn("The loop argument is deprecated",
DeprecationWarning)
if on_close is None:
on_close = self.check_stop
self._queue = ClosableQueue()
self._refs = {}
self._on_close = on_close
def __repr__(self):
return ('<Receiver is_active:{}, senders:{}, qsize:{}>'
.format(self.is_active, len(self._refs), self._queue.qsize()))
def channel(self, name):
"""Create a channel.
Returns ``_Sender`` object implementing
:class:`~aioredis.abc.AbcChannel`.
"""
enc_name = _converters[type(name)](name)
if (enc_name, False) not in self._refs:
ch = _Sender(self, enc_name,
is_pattern=False)
self._refs[(enc_name, False)] = ch
return ch
return self._refs[(enc_name, False)]
def pattern(self, pattern):
"""Create a pattern channel.
Returns ``_Sender`` object implementing
:class:`~aioredis.abc.AbcChannel`.
"""
enc_pattern = _converters[type(pattern)](pattern)
if (enc_pattern, True) not in self._refs:
ch = _Sender(self, enc_pattern,
is_pattern=True)
self._refs[(enc_pattern, True)] = ch
return self._refs[(enc_pattern, True)]
@property
def channels(self):
"""Read-only channels dict."""
return types.MappingProxyType({
ch.name: ch for ch in self._refs.values()
if not ch.is_pattern})
@property
def patterns(self):
"""Read-only patterns dict."""
return types.MappingProxyType({
ch.name: ch for ch in self._refs.values()
if ch.is_pattern})
async def get(self, *, encoding=None, errors=None, decoder=None):
"""Wait for and return pub/sub message from one of channels.
Return value is either:
* tuple of two elements: channel & message;
* tuple of three elements: pattern channel, (target channel & message);
* or None in case Receiver is not active or has just been stopped.
:raises aioredis.ChannelClosedError: If listener is stopped
and all messages have been received.
"""
# TODO: add note about raised exception and end marker.
# Flow before ClosableQueue:
# - ch.get() -> message
# - ch.close() -> ch.put(None)
# - ch.get() -> None
# - ch.get() -> ChannelClosedError
# Current flow:
# - ch.get() -> message
# - ch.close() -> ch._closed = True
# - ch.get() -> ChannelClosedError
assert decoder is None or callable(decoder), decoder
if self._queue.exhausted:
raise ChannelClosedError()
obj = await self._queue.get()
if obj is EndOfStream:
return
ch, msg = obj
if ch.is_pattern:
dest_ch, msg = msg
if encoding is not None:
msg = msg.decode(encoding, errors)
if decoder is not None:
msg = decoder(msg)
if ch.is_pattern:
return ch, (dest_ch, msg)
return ch, msg
async def wait_message(self):
"""Blocks until new message appear."""
if not self._queue.empty():
return True
if self._queue.closed:
return False
await self._queue.wait()
return self.is_active
@property
def is_active(self):
"""Returns True if listener has any active subscription."""
if self._queue.exhausted:
return False
return any(ch.is_active for ch in self._refs.values())
def stop(self):
"""Stop receiving messages.
All new messages after this call will be ignored,
so you must call unsubscribe before stopping this listener.
"""
self._queue.close()
# TODO: discard all senders as they might still be active.
# Channels storage in Connection should be refactored:
# if we drop _Senders here they will still be subscribed
# and will reside in memory although messages will be discarded.
def iter(self, *, encoding=None, errors=None, decoder=None):
"""Returns async iterator.
Usage example:
>>> async for ch, msg in mpsc.iter():
... print(ch, msg)
"""
return _IterHelper(self,
is_active=lambda r: not r._queue.exhausted,
encoding=encoding,
errors=errors,
decoder=decoder)
def check_stop(self, channel, exc=None):
"""TBD"""
# NOTE: this is a fast-path implementation,
# if overridden, implementation should use public API:
#
# if self.is_active and not (self.channels or self.patterns):
if not self._refs:
self.stop()
# internal methods
def _put_nowait(self, data, *, sender):
if self._queue.closed:
logger.warning("Pub/Sub listener message after stop:"
" sender: %r, data: %r",
sender, data)
return
self._queue.put((sender, data))
def _close(self, sender, exc=None):
self._refs.pop((sender.name, sender.is_pattern))
self._on_close(sender, exc=exc)
class _Sender(AbcChannel):
"""Write-Only Channel.
Does not allow direct ``.get()`` calls.
"""
def __init__(self, receiver, name, is_pattern):
self._receiver = receiver
self._name = _converters[type(name)](name)
self._is_pattern = is_pattern
self._closed = False
def __repr__(self):
return "<{} name:{!r}, is_pattern:{}, receiver:{!r}>".format(
self.__class__.__name__,
self._name, self._is_pattern, self._receiver)
@property
def name(self):
"""Encoded channel name or pattern."""
return self._name
@property
def is_pattern(self):
"""Set to True if channel is subscribed to pattern."""
return self._is_pattern
@property
def is_active(self):
return not self._closed
async def get(self, *, encoding=None, errors=None, decoder=None):
raise RuntimeError("MPSC channel does not allow direct get() calls")
def put_nowait(self, data):
self._receiver._put_nowait(data, sender=self)
def close(self, exc=None):
# TODO: close() is exclusive so we can not share same _Sender
# between different connections.
# This needs to be fixed.
if self._closed:
return
self._closed = True
self._receiver._close(self, exc=exc)
class ClosableQueue:
def __init__(self):
self._queue = collections.deque()
self._event = asyncio.Event()
self._closed = False
async def wait(self):
while not (self._queue or self._closed):
await self._event.wait()
return True
async def get(self):
await self.wait()
assert self._queue or self._closed, (
"Unexpected queue state", self._queue, self._closed)
if not self._queue and self._closed:
return EndOfStream
item = self._queue.popleft()
if not self._queue:
self._event.clear()
return item
def put(self, item):
if self._closed:
return
self._queue.append(item)
self._event.set()
def close(self):
"""Mark queue as closed and notify all waiters."""
self._closed = True
self._event.set()
@property
def closed(self):
return self._closed
@property
def exhausted(self):
return self._closed and not self._queue
def empty(self):
return not self._queue
def qsize(self):
return len(self._queue)
def __repr__(self):
closed = 'closed' if self._closed else 'open'
return '<Queue {} size:{}>'.format(closed, len(self._queue))
| 31.125 | 85 | 0.580357 |
13c0ef338bc0cde50e75bffc1dd5493fc1daa420 | 5,044 | py | Python | homeassistant/components/anthemav/media_player.py | alindeman/home-assistant | b274b10f3874c196f0db8f9cfa5f47eb756d1f8e | [
"Apache-2.0"
] | 4 | 2019-07-03T22:36:57.000Z | 2019-08-10T15:33:25.000Z | homeassistant/components/anthemav/media_player.py | alindeman/home-assistant | b274b10f3874c196f0db8f9cfa5f47eb756d1f8e | [
"Apache-2.0"
] | 7 | 2019-08-23T05:26:02.000Z | 2022-03-11T23:57:18.000Z | homeassistant/components/anthemav/media_player.py | alindeman/home-assistant | b274b10f3874c196f0db8f9cfa5f47eb756d1f8e | [
"Apache-2.0"
] | 2 | 2018-08-15T03:59:35.000Z | 2018-10-18T12:20:05.000Z | """Support for Anthem Network Receivers and Processors."""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, EVENT_HOMEASSISTANT_STOP, STATE_OFF,
STATE_ON)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'anthemav'
DEFAULT_PORT = 14999
SUPPORT_ANTHEMAV = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up our socket to the AVR."""
import anthemav
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
device = None
_LOGGER.info("Provisioning Anthem AVR device at %s:%d", host, port)
def async_anthemav_update_callback(message):
"""Receive notification from transport that new data exists."""
_LOGGER.info("Received update callback from AVR: %s", message)
hass.async_create_task(device.async_update_ha_state())
avr = await anthemav.Connection.create(
host=host, port=port,
update_callback=async_anthemav_update_callback)
device = AnthemAVR(avr, name)
_LOGGER.debug("dump_devicedata: %s", device.dump_avrdata)
_LOGGER.debug("dump_conndata: %s", avr.dump_conndata)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, device.avr.close)
async_add_entities([device])
class AnthemAVR(MediaPlayerDevice):
"""Entity reading values from Anthem AVR protocol."""
def __init__(self, avr, name):
"""Initialize entity with transport."""
super().__init__()
self.avr = avr
self._name = name
def _lookup(self, propname, dval=None):
return getattr(self.avr.protocol, propname, dval)
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ANTHEMAV
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return name of device."""
return self._name or self._lookup('model')
@property
def state(self):
"""Return state of power on/off."""
pwrstate = self._lookup('power')
if pwrstate is True:
return STATE_ON
if pwrstate is False:
return STATE_OFF
return None
@property
def is_volume_muted(self):
"""Return boolean reflecting mute state on device."""
return self._lookup('mute', False)
@property
def volume_level(self):
"""Return volume level from 0 to 1."""
return self._lookup('volume_as_percentage', 0.0)
@property
def media_title(self):
"""Return current input name (closest we have to media title)."""
return self._lookup('input_name', 'No Source')
@property
def app_name(self):
"""Return details about current video and audio stream."""
return self._lookup('video_input_resolution_text', '') + ' ' \
+ self._lookup('audio_input_name', '')
@property
def source(self):
"""Return currently selected input."""
return self._lookup('input_name', "Unknown")
@property
def source_list(self):
"""Return all active, configured inputs."""
return self._lookup('input_list', ["Unknown"])
async def async_select_source(self, source):
"""Change AVR to the designated source (by name)."""
self._update_avr('input_name', source)
async def async_turn_off(self):
"""Turn AVR power off."""
self._update_avr('power', False)
async def async_turn_on(self):
"""Turn AVR power on."""
self._update_avr('power', True)
async def async_set_volume_level(self, volume):
"""Set AVR volume (0 to 1)."""
self._update_avr('volume_as_percentage', volume)
async def async_mute_volume(self, mute):
"""Engage AVR mute."""
self._update_avr('mute', mute)
def _update_avr(self, propname, value):
"""Update a property in the AVR."""
_LOGGER.info(
"Sending command to AVR: set %s to %s", propname, str(value))
setattr(self.avr.protocol, propname, value)
@property
def dump_avrdata(self):
"""Return state of avr object for debugging forensics."""
attrs = vars(self)
return(
'dump_avrdata: '
+ ', '.join('%s: %s' % item for item in attrs.items()))
| 30.756098 | 74 | 0.65682 |
51cb7233ffafd5c9acc895e386deccef72824290 | 6,892 | py | Python | fxp2ardour.py | SpotlightKid/ardour2fxp | 9063c321f0b0f08bcdea0a8e88b47f14dd72f858 | [
"MIT"
] | 11 | 2019-01-03T21:51:40.000Z | 2021-08-07T13:42:31.000Z | fxp2ardour.py | SpotlightKid/ardour2fxp | 9063c321f0b0f08bcdea0a8e88b47f14dd72f858 | [
"MIT"
] | 2 | 2019-01-23T05:59:51.000Z | 2021-01-15T20:59:46.000Z | fxp2ardour.py | SpotlightKid/ardour2fxp | 9063c321f0b0f08bcdea0a8e88b47f14dd72f858 | [
"MIT"
] | 3 | 2020-07-17T14:12:15.000Z | 2021-11-19T11:25:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# fxp2ardour.py
#
"""Convert one or more VST2 FXP preset files to Ardour VST presets XML files.
"""
import argparse
import hashlib
import os
import sys
from base64 import b64encode
from collections import namedtuple
from os.path import exists, isdir, join
from struct import calcsize, unpack
from xml.etree import ElementTree as ET
FXP_HEADER_FMT = '>4si4s4i28s'
FXP_PREAMBEL_SIZE = calcsize('>4si')
FXP_HEADER_SIZE = calcsize(FXP_HEADER_FMT)
FXP_FORMAT_VERSION = 1
CHUNK_MAGIC = b'CcnK'
FX_MAGIC_PARAMS = b'FxCk'
FX_MAGIC_CHUNK = b'FPCh'
FX_DEFAULT_VERSION = 1
PRESET_BASE_FIELDS = (
'type',
'plugin_id',
'plugin_version',
'hash',
'label',
'num_params',
)
ChunkPreset = namedtuple('ChunkPreset', PRESET_BASE_FIELDS + ('chunk',))
Preset = namedtuple('Preset', PRESET_BASE_FIELDS + ('params',))
FXPHeader = namedtuple(
'FXPHeader',
('magic', 'size', 'type', 'version', 'plugin_id', 'plugin_version',
'num_params', 'label')
)
class FXPParseException(Exception):
"""Raised when there is an error parsing FXP file data."""
def parse_fxp(fn):
"""Parse VST2 FXP preset file.
Returns list of Preset or ChunkPreset instances.
"""
with open(fn, 'rb') as fp:
fxp = FXPHeader(*unpack(FXP_HEADER_FMT, fp.read(FXP_HEADER_SIZE)))
if fxp.magic != CHUNK_MAGIC:
raise FXPParseException("Invalid magic header bytes for FXP file.")
label = fxp.label.rstrip(b'\0').decode('latin1')
if fxp.type == FX_MAGIC_PARAMS:
params_fmt = '>{:d}f'.format(fxp.num_params)
params = unpack(params_fmt, fp.read(calcsize(params_fmt)))
preset = Preset('VST', fxp.plugin_id, fxp.plugin_version,
None, label, fxp.num_params, params)
elif fxp.type == FX_MAGIC_CHUNK:
chunk_size = unpack('>i', fp.read(calcsize('>i')))[0]
chunk = fp.read(chunk_size)
if len(chunk) != chunk_size:
raise FXPParseException(
"Program chunk data truncated, expected {:d} bytes, "
"read {:d}.".format(chunk_size, len(chunk)))
preset = ChunkPreset('VST', fxp.plugin_id, fxp.plugin_version,
None, label, fxp.num_params, chunk)
else:
raise FXPParseException("Invalid program type magic bytes. Type "
"'{}' not supported.".format(fxp.type))
return preset
def main(args=None):
argparser = argparse.ArgumentParser()
argparser.add_argument('-v', '--fx-version', type=int,
default=FX_DEFAULT_VERSION,
help="VST plugin version number")
argparser.add_argument('-a', '--append', action="store_true",
help="Append presets to existing Ardour preset "
"file(s), if applicable")
argparser.add_argument('-f', '--force', action="store_true",
help="Overwrite existing destination file(s)")
argparser.add_argument('-m', '--merge', action="store_true",
help="Merge presets into existing Ardour preset "
"file(s), if applicable. Existing presets with "
"the same name for the same plugin are "
"overwritten. USE WITH CARE!")
argparser.add_argument('-o', '--output-dir',
help="Ardour presets output directory")
argparser.add_argument('infiles', nargs='*', metavar='FXP',
help="FXP preset (input) file(s)")
args = argparser.parse_args(args)
output_dir = args.output_dir or os.getcwd()
if not args.infiles:
argparser.print_help()
return 2
presets = {}
for infile in args.infiles:
try:
preset = parse_fxp(infile)
except Exception as exc:
return "Error reading FXP preset file '{}': {}".format(
infile, exc)
else:
presets.setdefault(preset.plugin_id, []).append(preset)
for plugin in presets:
if not isdir(output_dir):
os.makedirs(output_dir)
xml_fn = join(output_dir, 'vst-{:010d}'.format(plugin))
if exists(xml_fn) and not any((args.append, args.force, args.merge)):
print("Ardour VST preset file '{}' already exists. "
"Skipping output.".format(xml_fn))
continue
elif args.append or args.merge:
try:
tree = ET.parse(xml_fn)
root = tree.getroot()
preset_nodes = {}
if root.tag != 'VSTPresets':
raise ValueError("Root XML element must be 'VSTPresets'.")
except Exception as exc:
return ("Output file '{}' already exists, but does not seem to be an "
"Ardour VST preset file. Cannot merge.\n{}".format(xml_fn, exc))
for node in root:
if node.tag in ('Preset', 'ChunkPreset'):
preset_nodes.setdefault(node.get('label'), []).append(node)
else:
root = ET.Element('VSTPresets')
preset_nodes = {}
for i, preset in enumerate(presets[plugin]):
sha1 = hashlib.sha1()
sha1.update(bytes(preset.label, 'latin1'))
sha1.update(bytes(str(i), 'ascii'))
uri = '{}:{:010d}:x{}'.format('VST', plugin, sha1.hexdigest())
tag = 'Preset' if isinstance(preset, Preset) else 'ChunkPreset'
if args.merge and preset.label in preset_nodes:
# replace next existing preset with same label
pnode = preset_nodes[preset.label].pop(0)
# if no more presets with this label exist, remove the key
if not preset_nodes[preset.label]:
del preset_nodes[preset.label]
pnode.clear()
pnode.tag = tag
else:
pnode = ET.SubElement(root, tag)
pnode.set('uri', uri)
pnode.set('label', preset.label)
pnode.set('version', str(preset.plugin_version))
pnode.set('numParams', str(preset.num_params))
if isinstance(preset, Preset):
for j, param in enumerate(preset.params):
ET.SubElement(pnode, 'Parameter', index=str(j),
value=str(param))
elif isinstance(preset, ChunkPreset):
pnode.text = b64encode(preset.chunk).decode('ascii')
with open(xml_fn, 'wb') as fp:
doc = ET.ElementTree(root)
doc.write(fp, encoding='UTF-8', xml_declaration=True)
if __name__ == '__main__':
sys.exit(main() or 0)
| 36.855615 | 88 | 0.567324 |
73a893bf28398084bbb5546df1085b8da1b498a6 | 523 | py | Python | webscraping/main.py | MichaelS42/web-scraping | d9ff5ab5fb7a7f5872700cfc51804cbd77607389 | [
"MIT"
] | null | null | null | webscraping/main.py | MichaelS42/web-scraping | d9ff5ab5fb7a7f5872700cfc51804cbd77607389 | [
"MIT"
] | null | null | null | webscraping/main.py | MichaelS42/web-scraping | d9ff5ab5fb7a7f5872700cfc51804cbd77607389 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
html_text = requests.get('https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation=')
soup = BeautifulSoup(html_text, 'lxml')
job = soup.find('li', class_ = 'clearfix job-bx wht-shd-bx')
company_name = job.find('h3', class_ = 'joblist-comp-name')
skills = job.find('span', class_ = 'srp-skills').text.replace(' ', '')
published_date = job.find('span', class_ = 'sim-posted').span.text
print(published_date) | 43.583333 | 153 | 0.745698 |
f6fbfcdd68f4995e0b0b671c866816317a9e997d | 9,219 | py | Python | sdk/python/pulumi_azure_native/marketplace/get_private_store_offer.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/marketplace/get_private_store_offer.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/marketplace/get_private_store_offer.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetPrivateStoreOfferResult',
'AwaitableGetPrivateStoreOfferResult',
'get_private_store_offer',
]
@pulumi.output_type
class GetPrivateStoreOfferResult:
"""
The privateStore offer data structure.
"""
def __init__(__self__, created_at=None, e_tag=None, icon_file_uris=None, id=None, modified_at=None, name=None, offer_display_name=None, plans=None, private_store_id=None, publisher_display_name=None, specific_plan_ids_limitation=None, type=None, unique_offer_id=None, update_suppressed_due_idempotence=None):
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if e_tag and not isinstance(e_tag, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", e_tag)
if icon_file_uris and not isinstance(icon_file_uris, dict):
raise TypeError("Expected argument 'icon_file_uris' to be a dict")
pulumi.set(__self__, "icon_file_uris", icon_file_uris)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if modified_at and not isinstance(modified_at, str):
raise TypeError("Expected argument 'modified_at' to be a str")
pulumi.set(__self__, "modified_at", modified_at)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if offer_display_name and not isinstance(offer_display_name, str):
raise TypeError("Expected argument 'offer_display_name' to be a str")
pulumi.set(__self__, "offer_display_name", offer_display_name)
if plans and not isinstance(plans, list):
raise TypeError("Expected argument 'plans' to be a list")
pulumi.set(__self__, "plans", plans)
if private_store_id and not isinstance(private_store_id, str):
raise TypeError("Expected argument 'private_store_id' to be a str")
pulumi.set(__self__, "private_store_id", private_store_id)
if publisher_display_name and not isinstance(publisher_display_name, str):
raise TypeError("Expected argument 'publisher_display_name' to be a str")
pulumi.set(__self__, "publisher_display_name", publisher_display_name)
if specific_plan_ids_limitation and not isinstance(specific_plan_ids_limitation, list):
raise TypeError("Expected argument 'specific_plan_ids_limitation' to be a list")
pulumi.set(__self__, "specific_plan_ids_limitation", specific_plan_ids_limitation)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_offer_id and not isinstance(unique_offer_id, str):
raise TypeError("Expected argument 'unique_offer_id' to be a str")
pulumi.set(__self__, "unique_offer_id", unique_offer_id)
if update_suppressed_due_idempotence and not isinstance(update_suppressed_due_idempotence, bool):
raise TypeError("Expected argument 'update_suppressed_due_idempotence' to be a bool")
pulumi.set(__self__, "update_suppressed_due_idempotence", update_suppressed_due_idempotence)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
"""
Private store offer creation date
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
"""
Identifier for purposes of race condition
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter(name="iconFileUris")
def icon_file_uris(self) -> Optional[Mapping[str, str]]:
"""
Icon File Uris
"""
return pulumi.get(self, "icon_file_uris")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="modifiedAt")
def modified_at(self) -> str:
"""
Private store offer modification date
"""
return pulumi.get(self, "modified_at")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="offerDisplayName")
def offer_display_name(self) -> str:
"""
It will be displayed prominently in the marketplace
"""
return pulumi.get(self, "offer_display_name")
@property
@pulumi.getter
def plans(self) -> Optional[Sequence['outputs.PlanResponse']]:
"""
Offer plans
"""
return pulumi.get(self, "plans")
@property
@pulumi.getter(name="privateStoreId")
def private_store_id(self) -> str:
"""
Private store unique id
"""
return pulumi.get(self, "private_store_id")
@property
@pulumi.getter(name="publisherDisplayName")
def publisher_display_name(self) -> str:
"""
Publisher name that will be displayed prominently in the marketplace
"""
return pulumi.get(self, "publisher_display_name")
@property
@pulumi.getter(name="specificPlanIdsLimitation")
def specific_plan_ids_limitation(self) -> Optional[Sequence[str]]:
"""
Plan ids limitation for this offer
"""
return pulumi.get(self, "specific_plan_ids_limitation")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueOfferId")
def unique_offer_id(self) -> str:
"""
Offers unique id
"""
return pulumi.get(self, "unique_offer_id")
@property
@pulumi.getter(name="updateSuppressedDueIdempotence")
def update_suppressed_due_idempotence(self) -> Optional[bool]:
"""
Indicating whether the offer was not updated to db (true = not updated). If the allow list is identical to the existed one in db, the offer would not be updated.
"""
return pulumi.get(self, "update_suppressed_due_idempotence")
class AwaitableGetPrivateStoreOfferResult(GetPrivateStoreOfferResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateStoreOfferResult(
created_at=self.created_at,
e_tag=self.e_tag,
icon_file_uris=self.icon_file_uris,
id=self.id,
modified_at=self.modified_at,
name=self.name,
offer_display_name=self.offer_display_name,
plans=self.plans,
private_store_id=self.private_store_id,
publisher_display_name=self.publisher_display_name,
specific_plan_ids_limitation=self.specific_plan_ids_limitation,
type=self.type,
unique_offer_id=self.unique_offer_id,
update_suppressed_due_idempotence=self.update_suppressed_due_idempotence)
def get_private_store_offer(offer_id: Optional[str] = None,
private_store_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateStoreOfferResult:
"""
The privateStore offer data structure.
API Version: 2020-01-01.
:param str offer_id: The offer ID to update or delete
:param str private_store_id: The store ID - must use the tenant ID
"""
__args__ = dict()
__args__['offerId'] = offer_id
__args__['privateStoreId'] = private_store_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:marketplace:getPrivateStoreOffer', __args__, opts=opts, typ=GetPrivateStoreOfferResult).value
return AwaitableGetPrivateStoreOfferResult(
created_at=__ret__.created_at,
e_tag=__ret__.e_tag,
icon_file_uris=__ret__.icon_file_uris,
id=__ret__.id,
modified_at=__ret__.modified_at,
name=__ret__.name,
offer_display_name=__ret__.offer_display_name,
plans=__ret__.plans,
private_store_id=__ret__.private_store_id,
publisher_display_name=__ret__.publisher_display_name,
specific_plan_ids_limitation=__ret__.specific_plan_ids_limitation,
type=__ret__.type,
unique_offer_id=__ret__.unique_offer_id,
update_suppressed_due_idempotence=__ret__.update_suppressed_due_idempotence)
| 38.898734 | 312 | 0.669053 |
e77a1c0fc79f37b2175b7303eb166f580bc71036 | 1,217 | py | Python | scriptable/ast/variable/assignment.py | c7nw3r/scriptable | b285d865da41774c8321ebf51f5a468ef1c92402 | [
"Apache-2.0"
] | null | null | null | scriptable/ast/variable/assignment.py | c7nw3r/scriptable | b285d865da41774c8321ebf51f5a468ef1c92402 | [
"Apache-2.0"
] | null | null | null | scriptable/ast/variable/assignment.py | c7nw3r/scriptable | b285d865da41774c8321ebf51f5a468ef1c92402 | [
"Apache-2.0"
] | null | null | null | from scriptable.api import AST
from scriptable.api.accessor import Accessor
from scriptable.api.ast_binding import ASTBinding
from scriptable.ast.property import PropertyAccess, Property
class Assignment(AST[None]):
def __init__(self, target: AST, source: AST):
self.source = source
self.target = target
def execute(self, binding: ASTBinding) -> None:
value = self.source.execute(binding)
if isinstance(self.target, PropertyAccess):
def unwrap(obj):
if isinstance(obj, Accessor):
return obj.value
return obj
branch = list(map(lambda ast: ast.execute(binding), self.target.branch))
current = branch.pop(0)
while len(branch) > 1:
current = current[unwrap(branch.pop(0))]
current[unwrap(branch.pop(0))] = value
return None
if isinstance(self.target, Property):
binding.add_property(self.target.value, value)
return None
raise ValueError("cannot handle " + str(self.target))
@staticmethod
def parse(target: AST, source: AST) -> 'Assignment':
return Assignment(target, source)
| 32.026316 | 84 | 0.622021 |
ef98bea9d6e53bfdd55ad002fda190a02436d355 | 1,530 | py | Python | bin/compile.py | lihengl/dizu-api | b7c10c4e8594b150792c3634ae5dbf957b23aeb9 | [
"MIT"
] | null | null | null | bin/compile.py | lihengl/dizu-api | b7c10c4e8594b150792c3634ae5dbf957b23aeb9 | [
"MIT"
] | null | null | null | bin/compile.py | lihengl/dizu-api | b7c10c4e8594b150792c3634ae5dbf957b23aeb9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import json
import sys
def translate(cond):
if cond == u"全": return "true"
return "false"
js_head = "function(n){"
js_body = "if(%s)return'%s';"
js_tail = "return'00000';}"
ifile = codecs.open("tmp/weighted.json", "r", encoding="utf-8")
rules = json.loads(ifile.read())
ifile.close()
for c in rules:
for d in rules[c]:
for s in rules[c][d]:
js = js_head
for rule in rules[c][d][s].pop("weighted"):
segs = rule.split(":")
condition = translate(segs[-1])
zipcode = segs[0]
js += js_body % (condition, zipcode)
js += js_tail
rules[c][d][s] = js
ofile = codecs.open("tmp/result.json", "w", encoding="utf-8")
json.dump(rules, ofile, indent=4, ensure_ascii=False)
ofile.close()
ifile = open("tmp/result.json", "r")
ofile = open("lib.js", "w")
for index, line in enumerate(ifile):
line = line.replace('": "function', '":function')
line = line.replace(';}"', ';}')
line = line.replace(' ', '')
line = line.replace(' ', '')
line = line.replace(' ', '')
if index == 0:
ofile.write("module.exports = {\n")
elif line == "}":
ofile.write("};\n")
elif "if(true)" in line:
line = line.replace("if(true)", "")
line = line.replace("return'00000';", "")
ofile.write(line)
else:
ofile.write(line)
ofile.close()
ifile.close()
sys.exit(0)
| 23.538462 | 63 | 0.533987 |
81f9a5950c6338f7b80eb26c76d1282899861cb8 | 493 | py | Python | pythonteste/desafio42.py | dangiotto/Python | 29a9d18d7595a5c21e65dafc39f7fd4c55d8971c | [
"MIT"
] | 1 | 2020-10-17T03:23:59.000Z | 2020-10-17T03:23:59.000Z | pythonteste/desafio42.py | dangiotto/Python | 29a9d18d7595a5c21e65dafc39f7fd4c55d8971c | [
"MIT"
] | null | null | null | pythonteste/desafio42.py | dangiotto/Python | 29a9d18d7595a5c21e65dafc39f7fd4c55d8971c | [
"MIT"
] | null | null | null | print('=-='*20)
print('Analisador de Triângulo')
print('=-='*20)
a = float(input('Informe o primeiro lado :'))
b = float(input('Informe o segundo lado: '))
c = float(input('Informe o terceiro lado: '))
if a < b + c and b < a + c and c < a + b:
print('Pode ser um triângulo')
if a==b and b==c and c==a:
print('Tipo Equilatero')
elif a!=b and b!=c and c!=a:
print('Tipo Escaleno')
else:
print('Tipo Isóceles')
else:
print('Não pode ser um triângulo') | 29 | 45 | 0.586207 |
997429a4c9e675d40426b33ea034afd9a8b6079c | 62 | py | Python | owl_rdf_utils/__init__.py | Bioprotocols/paml | a1597f9e889be63a7cfb76f869224f200ca0e58f | [
"MIT"
] | 6 | 2021-10-16T04:42:59.000Z | 2022-03-07T18:54:04.000Z | owl_rdf_utils/__init__.py | Bioprotocols/paml | a1597f9e889be63a7cfb76f869224f200ca0e58f | [
"MIT"
] | 56 | 2021-10-15T21:54:30.000Z | 2022-03-31T20:22:26.000Z | owl_rdf_utils/__init__.py | Bioprotocols/paml | a1597f9e889be63a7cfb76f869224f200ca0e58f | [
"MIT"
] | 2 | 2022-01-26T07:14:45.000Z | 2022-02-04T11:27:35.000Z | from .restrictions import *
from .to_sorted_ntriples import *
| 20.666667 | 33 | 0.806452 |
26272c56650518ae3681a957b6d32f7dae17ae5d | 7,815 | py | Python | ArticleCrawler.py | Ohartv1/Programmeer-Project-2015- | 5e92148c59658ec3a6e24e87d579333e46d371c0 | [
"MIT"
] | null | null | null | ArticleCrawler.py | Ohartv1/Programmeer-Project-2015- | 5e92148c59658ec3a6e24e87d579333e46d371c0 | [
"MIT"
] | 1 | 2015-06-03T09:21:36.000Z | 2015-06-26T09:34:48.000Z | ArticleCrawler.py | Ohartv1/Programmeer-Project-2015- | 5e92148c59658ec3a6e24e87d579333e46d371c0 | [
"MIT"
] | null | null | null | # Onno Hartveldt
# 10972935
# visualisation:
# "Wie zijn de nakomelingen van een wetenschappelijke publicatie?"
import json, time, unicodedata, socket
from pattern.web import URL, DOM, plaintext
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
TARGET = "http://apps.webofknowledge.com"
SEARCH_FOR = "The Nature-Nurture Debates: 25 Years of Challenges in Understanding the Psychology of Gender"
generation = 0
archief = []
def give_start(browser, website, string_for_search):
"""
The start of the scraping algoritm. This function give the first
input in the website.
Input: the browser object, the website url, and a search element.
Output: a url to a sub page with article information
"""
browser.get(website)
# search for
elem = browser.find_element_by_id("value(input1)")
elem.send_keys(string_for_search)
elem.send_keys(Keys.RETURN)
# get link of the respons
link = browser.find_element_by_class_name("smallV110")
href = link.get_attribute("href")
return href
def scrape_reference(dom):
"""
Collect information of given article.
Input: a dom element for one article
OUtput: a dictionary with information of a article, like the
title, authors, doi, data of publication, and an link to all cited by.
"""
data_dict = {}
# title
for line in dom.by_class("title"):
article_name = plaintext(line.content.encode("ascii", "ignore"))
data_dict.update({"title": article_name})
# authors
author_list = []
for author in dom.by_attr(title="Find more records by this author"):
author = plaintext(author.content.encode("ascii", "ignore"))
author_list.append(author)
data_dict.update({"authors": author_list})
# # DOI
# doi = plaintext(dom.by_tag("value")[5].content)
# data_dict.update({"doi": doi})
# # Date
# datum = plaintext(dom.by_tag("value")[6].content)
# data_dict.update({"date": datum})
# Cited link
for link in dom.by_attr(title="View all of the articles that cite this one"):
link = link.attrs.get("href", "")
data_dict.update({"link_cited": TARGET + link})
return data_dict
def cited_by(browser, url):
"""
Walks through all pages of articles and makes a list of the url's
Input: the browser element and a url to a javaScript generated list of
all cited by articles.
Output: een list of url's to all individual cited by articles.
"""
list_of_urls = []
try:
browser.get(url)
page_bottom = browser.find_element_by_id("pageCount.bottom").text
except socket.timeout as e:
try:
print "timeout error"
print "url at the time of error:", url
time.sleep(60)
browser.get(url)
socket.setdefaulttimeout(60)
page_bottom = browser.find_element_by_id("pageCount.bottom").text
except:
print "exception unkown"
return list_of_urls
while True:
links = browser.find_elements_by_class_name('smallV110')
# get rid of doubles
iterations = len(links) / 2
while iterations > 0:
links.pop()
iterations = iterations - 1
for link in links:
href = link.get_attribute("href")
list_of_urls.append(href)
stop = browser.find_element_by_class_name("goToPageNumber-input")
stop = stop.get_attribute("value")
if stop == page_bottom:
break
browser.find_element_by_class_name("paginationNext").click()
return list_of_urls
def crawl(browser, article, generation):
"""
A recursive funcion with a base when it gets no url of cited by articles.
Input: the browser, one article as dictionary, the generation counter
Output: a list of article dictionaries
"""
generation = generation + 1
master_article = article
list_of_articles = []
list_of_citations = []
if not master_article.get("title") in archief:
iteration = 0
for each in cited_by(browser, article.get("link_cited")):
iteration = iteration + 1
if each == None:
print "break, no citations"
break
try:
# Download the HTML file of starting point
url = URL(each)
html = url.download()
# Parse the HTML file into a DOM representation
dom = DOM(html)
current = scrape_reference(dom)
list_of_citations.append(current.get("title"))
if current.get("link_cited") != None:
if len(list_of_articles) == 0:
print list_of_articles
list_of_articles = crawl(browser, current, generation)
else:
list_of_articles = list_of_articles + crawl(browser, current, generation)
print len(list_of_articles)
archief.append(current.get("title"))
except pattern.web.URLTimeout:
print "break at pattern.web.URLTimeout"
break
master_article.update({"cited_by": list_of_citations, "generation": generation})
list_of_articles.append(master_article)
return list_of_articles
def data_to_connections(data):
"""
Changes the output of the crawler to the right json file needed
for the visualisation.
input: a list of dictionary with articles
output: a dictionary with a list of nodes and a list of links
"""
list_nodes = []
list_titles = []
for dic in data:
list_titles.append(dic.get("title"))
dict_nut = {}
dict_nut.update({"name": dic.get("title"), \
"num_cit": len(dic.get("cited_by")), \
"generation": dic.get("generation"), \
"date": dic.get("date")})
list_nodes.append(dict_nut)
for dic in data:
for cit in dic.get("cited_by"):
if not cit in list_titles:
dict_nut = {}
dict_nut.update({"name": cit, \
"num_cit": 0, \
"generation": dic.get("generation") + 1})
list_nodes.append(dict_nut)
list_titles.append(cit)
list_connections = []
for each in data:
for every in each.get("cited_by"):
new_dict = {}
temp = each.get("title")
new_dict.update({"source": list_titles.index(temp), \
"target": list_titles.index(every)})
list_connections.append(new_dict)
final_connections = []
for iets in list_connections:
if not iets in final_connections:
final_connections.append(iets)
return_dict = {}
return_dict.update({"nodes": list_nodes})
return_dict.update({"links": list_connections})
return return_dict
if __name__ == '__main__':
# create webbrowser
browser = webdriver.Firefox()
START_URL = give_start(browser, TARGET, SEARCH_FOR)
# Download the HTML file of starting point
url = URL(START_URL)
html = url.download()
# Parse the HTML file into a DOM representation
dom = DOM(html)
# scrape:
first = scrape_reference(dom)
# List of all article
if first.get("link_cited") != None:
list_of_articles = crawl(browser, first, generation)
browser.close()
# transform list of articles to right format for visualisation
output = data_to_connections(list_of_articles)
# # parse and write to JSON
with open("Articles_data.json", "a") as outfile:
json.dump(output, outfile, indent=2) | 28.837638 | 107 | 0.612796 |
af28e0a81852bccf75902fbc4eb3bf9ee7b79c7f | 2,344 | py | Python | vanilla_lasso/ex3_confidence_interval.py | vonguyenleduy/parametric_si_generalized_lasso | 3ef9e2d47036b812b9737d4d5b887000dc24ce03 | [
"BSD-3-Clause"
] | null | null | null | vanilla_lasso/ex3_confidence_interval.py | vonguyenleduy/parametric_si_generalized_lasso | 3ef9e2d47036b812b9737d4d5b887000dc24ce03 | [
"BSD-3-Clause"
] | null | null | null | vanilla_lasso/ex3_confidence_interval.py | vonguyenleduy/parametric_si_generalized_lasso | 3ef9e2d47036b812b9737d4d5b887000dc24ce03 | [
"BSD-3-Clause"
] | 1 | 2021-06-04T19:22:37.000Z | 2021-06-04T19:22:37.000Z | import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import gen_data
import qp_solver
import util
import parametric_si
import ci
def construct_P_q_G_h_A_b(X, y, p, lamda):
no_vars = 2 * p
# construct P
P = np.zeros((no_vars, no_vars))
XTX = np.dot(X.T, X)
P[0:p, 0:p] = XTX
P[0:p, p:2 * p] = -XTX
P[p:2 * p, 0:p] = -XTX
P[p:2 * p, p:2 * p] = XTX
# construct q
e_1 = lamda * np.ones((no_vars, 1))
XTy = np.dot(X.T, y)
e_2 = np.zeros((no_vars, 1))
e_2[0:p] = XTy
e_2[p:2 * p] = -XTy
q = e_1 - e_2
# construct G
G = np.zeros((2 * p, no_vars)) - np.identity(no_vars)
# construct h
h = np.zeros((2 * p, 1))
return P, q, G, h, None, None
def run():
n = 100
p = 5
lamda = 10
beta_vec = np.zeros(p)
signal = 2
no_active = 2
for i in range(no_active):
beta_vec[i] = signal
z_threshold = 20
X, y, true_y = gen_data.generate_test(n, p, beta_vec)
y = y.reshape((n, 1))
true_y = true_y.reshape((n, 1))
P, q, G, h, _, _ = construct_P_q_G_h_A_b(X, y, p, lamda)
x, prob = qp_solver.solve_lasso(P, q, G, h, 2 * p)
x = x.value
x = x.reshape((len(x), 1))
B_plus = x[0:p]
B_minus = x[p:2*p]
beta_hat = B_plus - B_minus
bh = util.check_zero(beta_hat.flatten())
A, XA, Ac, XAc, bhA = util.construct_A_XA_Ac_XAc_bhA(X, bh, p)
for j_selected in A:
etaj, etajTy = util.construct_test_statistic(j_selected, XA, y, A)
a, b = util.compute_a_b(y, etaj, n)
c, d = util.compute_c_d(X, a, b, lamda, p)
list_zk, list_bhz, list_active_set = parametric_si.run_parametric_si(X, P, G, h, p, c, d, z_threshold)
tn_mu = np.dot(etaj.T, true_y)[0][0]
cov = np.identity(n)
alpha = 0.05
confidence_interval = ci.compute_ci(A, bh, list_active_set, list_zk, list_bhz, etaj, etajTy, cov,
tn_mu, alpha, 'A')
print('Feature', j_selected + 1, ' True Beta:', beta_vec[j_selected],
' CI: ' + '[{:.2f}'.format(confidence_interval[0]) + ', {:.2f}]'.format(confidence_interval[1]),
' CI Length:', '{:.2f}'.format(confidence_interval[1] - confidence_interval[0]))
print("==========")
if __name__ == '__main__':
run() | 24.673684 | 110 | 0.5593 |
60cacbfd79e682b53f255cfa7899d8e5716ab9c3 | 2,920 | py | Python | conf/implicit/baselines.py | hunterhector/DDSemantics | 883ef1015bd21d9b8575d8000faf3b506a09f21c | [
"Apache-2.0"
] | null | null | null | conf/implicit/baselines.py | hunterhector/DDSemantics | 883ef1015bd21d9b8575d8000faf3b506a09f21c | [
"Apache-2.0"
] | null | null | null | conf/implicit/baselines.py | hunterhector/DDSemantics | 883ef1015bd21d9b8575d8000faf3b506a09f21c | [
"Apache-2.0"
] | 2 | 2018-06-24T17:40:31.000Z | 2020-07-30T19:19:55.000Z | import os
# Model parameters
c.ArgModelPara.model_type = "EventPairComposition"
c.ArgModelPara.event_arg_vocab_size = 58983 # event_frame_embeddings_min500
c.ArgModelPara.event_embedding_dim = 300
c.ArgModelPara.word_vocab_size = 228575
c.ArgModelPara.word_embedding_dim = 300
c.ArgModelPara.arg_composition_layer_sizes = 600, 300
c.ArgModelPara.event_composition_layer_sizes = 400, 200
c.ArgModelPara.nb_epochs = 20
c.ArgModelPara.num_slots = 3
c.ArgModelPara.use_frame = True
# c.ArgModelPara.num_event_components = 8
c.ArgModelPara.num_extracted_features = 11
c.ArgModelPara.multi_context = True
c.ArgModelPara.max_events = 150
c.ArgModelPara.batch_size = 100
# Model parameters that changes the architectures
c.ArgModelPara.loss = "cross_entropy"
c.ArgModelPara.vote_method = "cosine"
c.ArgModelPara.vote_pooling = "kernel"
# c.ArgModelPara.encode_distance = 'gaussian'
c.ArgModelPara.num_distance_features = 9
c.ArgModelPara.arg_representation_method = "fix_slots"
# How to detect Null Instantiation.
c.ArgModelPara.nid_method = "gold"
c.ArgModelPara.use_ghost = False
# Baseline stuff.
c.ArgModelPara.w2v_baseline_method = "max_sim" # max_sim, topk_average, average
c.ArgModelPara.w2v_event_repr = "concat" # concat, sum
c.ArgModelPara.w2v_baseline_avg_topk = 3 # only when topk_average
if "implicit_corpus" not in os.environ:
raise KeyError(
"Please supply the directory as environment " "variable: 'implicit_corpus'"
)
else:
base = os.environ["implicit_corpus"]
c.ImplicitArgResources.raw_corpus_name = "gigaword_events"
c.ImplicitArgResources.event_embedding_path = os.path.join(
base,
c.ImplicitArgResources.raw_corpus_name,
"embeddings/event_embeddings_mixed.pickle.wv.vectors.npy",
)
c.ImplicitArgResources.event_vocab_path = os.path.join(
base,
c.ImplicitArgResources.raw_corpus_name,
"embeddings/event_embeddings_mixed.voc",
)
c.ImplicitArgResources.raw_lookup_path = os.path.join(
base, c.ImplicitArgResources.raw_corpus_name, "vocab/"
)
c.ImplicitArgResources.word_embedding_path = os.path.join(
base, "gigaword_word_embeddings", "word_embeddings.pickle.wv.vectors.npy"
)
c.ImplicitArgResources.word_vocab_path = os.path.join(
base, "gigaword_word_embeddings", "word_embeddings.voc"
)
# Runner parameters
c.Basic.train_in = os.path.join(base, c.ImplicitArgResources.raw_corpus_name, "hashed")
c.Basic.validation_size = 10000
c.Basic.debug_dir = os.path.join(base, c.ImplicitArgResources.raw_corpus_name, "debug")
c.Basic.log_dir = os.path.join(base, c.ImplicitArgResources.raw_corpus_name, "logs")
c.Basic.model_dir = os.path.join(base, c.ImplicitArgResources.raw_corpus_name, "models")
c.Basic.test_in = os.path.join(
base, "nombank_with_gc", "processed", "cloze_hashed.json.gz"
)
c.Basic.model_name = os.path.basename(__file__).replace(".py", "")
c.Basic.do_training = False
c.Basic.do_test = False
c.Basic.run_baselines = True
| 36.049383 | 88 | 0.790068 |
8ab310c930ee051873f8a88555de983d1b860ef6 | 4,060 | py | Python | dispatcher/workers.py | ds-vologdin/dispather | 2496e2207db6353f8cf366df5a1b2866095c3bef | [
"MIT"
] | null | null | null | dispatcher/workers.py | ds-vologdin/dispather | 2496e2207db6353f8cf366df5a1b2866095c3bef | [
"MIT"
] | null | null | null | dispatcher/workers.py | ds-vologdin/dispather | 2496e2207db6353f8cf366df5a1b2866095c3bef | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from datetime import datetime
import threading
import time
from logger import logger
LOCK_POOL_WORKERS = threading.RLock()
POOL_WORKERS = {}
def _register_new_worker(worker_id, host, port, datetime_now, ttl=600):
""" Нельзя использовать без блокировки LOCK_POOL_WORKERS """
worker = {
'id': worker_id,
'last_registration': datetime_now,
'last_task_done': None,
'ttl': ttl,
'status': 'free',
'host': host,
'port': port,
}
POOL_WORKERS[worker_id] = worker
return worker
def _update_last_registration_in_worker(worker_id, datetime_now):
""" Нельзя использовать без блокировки LOCK_POOL_WORKERS """
worker = POOL_WORKERS.get(worker_id)
if not worker:
return
worker['last_registration'] = datetime_now
return worker
def register_worker(command, client, ttl=600):
"""
Функция занимается регистрацией новых воркеров и
обновлением регастрационных данных старых воркеров.
"""
port = command['port']
datetime_now = datetime.now()
with LOCK_POOL_WORKERS:
if command['id'] not in POOL_WORKERS:
result = _register_new_worker(
command['id'], client[0], port, datetime_now, ttl)
else:
result = _update_last_registration_in_worker(
command['id'], datetime_now)
logger.info('worker "%s" registered', result)
return result
def _get_free_worker():
free_worker = None
with LOCK_POOL_WORKERS:
for worker in POOL_WORKERS.values():
if worker.get('status') == 'free':
worker['status'] = 'busy'
free_worker = worker
break
return free_worker
def get_free_worker(frequency=2):
while True:
worker = _get_free_worker()
logger.debug('free worker: %s', worker)
if worker:
break
time.sleep(frequency)
return worker
def set_status_worker(worker_id, status):
if worker_id not in POOL_WORKERS:
return
with LOCK_POOL_WORKERS:
worker = POOL_WORKERS[worker_id]
worker['status'] = status
logger.debug('set_status_worker: %s', worker)
return worker
def set_status_task_done_in_worker(worker_id):
if worker_id not in POOL_WORKERS:
return
with LOCK_POOL_WORKERS:
worker = POOL_WORKERS[worker_id]
worker['status'] = 'free'
worker['last_task_done'] = datetime.now()
logger.debug('set_status_task_done_in_worker: %s', worker)
return worker
def delete_worker_of_pool(worker_id):
with LOCK_POOL_WORKERS:
worker = POOL_WORKERS.pop(worker_id)
logger.info('delete worker: %s', worker)
return worker
def is_datetime_old(current_datetime, datetime_now, ttl):
if not current_datetime:
return True
time_to_last_registration = datetime_now - current_datetime
if time_to_last_registration.seconds > ttl:
return True
return False
def clean_pool_worker():
"""
Функция для чистки пула воркеров
Воркер считаем плохим (мёртвым), если время с последней регистрации
и время с последней решённой задачи превысило TTL
"""
datetime_now = datetime.now()
bad_worker_ids = []
with LOCK_POOL_WORKERS:
for worker_id in POOL_WORKERS:
worker = POOL_WORKERS[worker_id]
ttl = worker.get('ttl', 600)
last_registration = worker.get('last_registration')
last_task_done = worker.get('last_task_done')
registration_is_old = is_datetime_old(
last_registration, datetime_now, ttl)
last_task_done_is_old = is_datetime_old(
last_task_done, datetime_now, ttl)
if registration_is_old and last_task_done_is_old:
bad_worker_ids.append(worker.get('id'))
continue
for worker_id in bad_worker_ids:
POOL_WORKERS.pop(worker_id)
logger.debug('clean pool worker: %s', bad_worker_ids)
return bad_worker_ids
| 29 | 71 | 0.653941 |
3c0cd24055e8f817ca9d51cd9f67874241e60369 | 1,583 | py | Python | week2/utilities/xgb_utils.py | justinakiud/search_with_machine_learning_course | 074f6f830eda84150efb4cdd1d3f4f25c0207c76 | [
"Apache-2.0"
] | null | null | null | week2/utilities/xgb_utils.py | justinakiud/search_with_machine_learning_course | 074f6f830eda84150efb4cdd1d3f4f25c0207c76 | [
"Apache-2.0"
] | null | null | null | week2/utilities/xgb_utils.py | justinakiud/search_with_machine_learning_course | 074f6f830eda84150efb4cdd1d3f4f25c0207c76 | [
"Apache-2.0"
] | null | null | null | # Utilities for working with XG Boost
import xgboost as xgb
from xgboost import plot_importance, plot_tree
from matplotlib import pyplot as plt
import json
# Plots useful things like the tree and importance for display
def plots(xgb_model, xgb_model_name, xgb_feat_map, xgb_plot):
print("Plotting model quality data")
try:
bst = xgb.Booster()
model = bst.load_model(xgb_model)
model_name = xgb_model_name
plt.rcParams["figure.figsize"]=[18,18]
plt.rcParams["figure.autolayout"] = True
num_trees = len(bst.get_dump(fmap=xgb_feat_map))
print("Plotting trees: %s" % (num_trees-1))
model_plot = plot_tree(bst, fmap=xgb_feat_map, num_trees=num_trees-1)
model_plot.figure.savefig("%s/%s_tree.png" % (xgb_plot, model_name), dpi=300)
print("Plotting feature importance")
impt_plt = plot_importance(bst, fmap=xgb_feat_map)
impt_plt.figure.savefig("%s/%s_importance.png" % (xgb_plot, model_name), dpi=300)
except:
print("Unable to plot our models")
# xgb_train_data is a string path to our training file
def train(xgb_train_data, num_rounds=5, xgb_conf=None ):
xgb_params = {'objective': 'reg:logistic'}
if xgb_conf is not None:
with open(xgb_conf) as json_file:
xgb_params = json.load(json_file)
dtrain = xgb.DMatrix(xgb_train_data)
bst = xgb.train(xgb_params, dtrain,
num_rounds)
print("Training XG Boost on %s for %s rounds with params: %s" % (xgb_train_data, num_rounds, xgb_params))
return bst, xgb_params | 35.977273 | 109 | 0.688566 |
2aae4132fab703cdac27f75f6897c9deb5551879 | 62 | py | Python | app/models.py | parashar-lonewolf/Redditlastic | 6cf4d5c296001c88ca592788e2145d00fa293c8e | [
"MIT"
] | 1 | 2019-03-25T12:40:49.000Z | 2019-03-25T12:40:49.000Z | app/models.py | parashar-lonewolf/Redditlastic | 6cf4d5c296001c88ca592788e2145d00fa293c8e | [
"MIT"
] | null | null | null | app/models.py | parashar-lonewolf/Redditlastic | 6cf4d5c296001c88ca592788e2145d00fa293c8e | [
"MIT"
] | null | null | null | class Post(db.Model):
__searchable__ = ['body']
# ...
| 15.5 | 29 | 0.548387 |
5d0a158f74fd428ee72bd877f642cf73b78da823 | 14,917 | py | Python | stubs/loboris-v3_2_24-frozen/microWebTemplate.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/loboris-v3_2_24-frozen/microWebTemplate.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/loboris-v3_2_24-frozen/microWebTemplate.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright © 2018 Jean-Christophe Bos & HC² (www.hc2.fr)
"""
import re
class MicroWebTemplate:
# ============================================================================
# ===( Constants )============================================================
# ============================================================================
TOKEN_OPEN = "{{"
TOKEN_CLOSE = "}}"
TOKEN_OPEN_LEN = len(TOKEN_OPEN)
TOKEN_CLOSE_LEN = len(TOKEN_CLOSE)
INSTRUCTION_PYTHON = "py"
INSTRUCTION_IF = "if"
INSTRUCTION_ELIF = "elif"
INSTRUCTION_ELSE = "else"
INSTRUCTION_FOR = "for"
INSTRUCTION_END = "end"
INSTRUCTION_INCLUDE = "include"
# ============================================================================
# ===( Constructor )==========================================================
# ============================================================================
def __init__(self, code, escapeStrFunc=None, filepath=""):
self._code = code
self._escapeStrFunc = escapeStrFunc
self._filepath = filepath
self._pos = 0
self._endPos = len(code) - 1
self._line = 1
self._reIdentifier = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
self._pyGlobalVars = {}
self._pyLocalVars = {}
self._rendered = ""
self._instructions = {
MicroWebTemplate.INSTRUCTION_PYTHON: self._processInstructionPYTHON,
MicroWebTemplate.INSTRUCTION_IF: self._processInstructionIF,
MicroWebTemplate.INSTRUCTION_ELIF: self._processInstructionELIF,
MicroWebTemplate.INSTRUCTION_ELSE: self._processInstructionELSE,
MicroWebTemplate.INSTRUCTION_FOR: self._processInstructionFOR,
MicroWebTemplate.INSTRUCTION_END: self._processInstructionEND,
MicroWebTemplate.INSTRUCTION_INCLUDE: self._processInstructionINCLUDE,
}
# ============================================================================
# ===( Functions )============================================================
# ============================================================================
def Validate(self):
try:
self._parseCode(execute=False)
return None
except Exception as ex:
return str(ex)
# ----------------------------------------------------------------------------
def Execute(self):
try:
self._parseCode(execute=True)
return self._rendered
except Exception as ex:
raise Exception(str(ex))
# ============================================================================
# ===( Utils )===============================================================
# ============================================================================
def _parseCode(self, execute):
self._pyGlobalVars = {}
self._pyLocalVars = {}
self._rendered = ""
newTokenToProcess = self._parseBloc(execute)
if newTokenToProcess is not None:
raise Exception('"%s" instruction is not valid here (line %s)' % (newTokenToProcess, self._line))
# ----------------------------------------------------------------------------
def _parseBloc(self, execute):
while self._pos <= self._endPos:
c = self._code[self._pos]
if (
c == MicroWebTemplate.TOKEN_OPEN[0]
and self._code[self._pos : self._pos + MicroWebTemplate.TOKEN_OPEN_LEN] == MicroWebTemplate.TOKEN_OPEN
):
self._pos += MicroWebTemplate.TOKEN_OPEN_LEN
tokenContent = ""
x = self._pos
while True:
if x > self._endPos:
raise Exception("%s is missing (line %s)" % (MicroWebTemplate.TOKEN_CLOSE, self._line))
c = self._code[x]
if (
c == MicroWebTemplate.TOKEN_CLOSE[0]
and self._code[x : x + MicroWebTemplate.TOKEN_CLOSE_LEN] == MicroWebTemplate.TOKEN_CLOSE
):
self._pos = x + MicroWebTemplate.TOKEN_CLOSE_LEN
break
elif c == "\n":
self._line += 1
tokenContent += c
x += 1
newTokenToProcess = self._processToken(tokenContent, execute)
if newTokenToProcess is not None:
return newTokenToProcess
continue
elif c == "\n":
self._line += 1
if execute:
self._rendered += c
self._pos += 1
return None
# ----------------------------------------------------------------------------
def _processToken(self, tokenContent, execute):
tokenContent = tokenContent.strip()
parts = tokenContent.split(" ", 1)
instructName = parts[0].strip()
instructBody = parts[1].strip() if len(parts) > 1 else None
if len(instructName) == 0:
raise Exception(
'"%s %s" : instruction is missing (line %s)' % (MicroWebTemplate.TOKEN_OPEN, MicroWebTemplate.TOKEN_CLOSE, self._line)
)
newTokenToProcess = None
if instructName in self._instructions:
newTokenToProcess = self._instructions[instructName](instructBody, execute)
elif execute:
try:
s = str(eval(tokenContent, self._pyGlobalVars, self._pyLocalVars))
if self._escapeStrFunc is not None:
self._rendered += self._escapeStrFunc(s)
else:
self._rendered += s
except Exception as ex:
raise Exception("%s (line %s)" % (str(ex), self._line))
return newTokenToProcess
# ----------------------------------------------------------------------------
def _processInstructionPYTHON(self, instructionBody, execute):
if instructionBody is not None:
raise Exception('Instruction "%s" is invalid (line %s)' % (MicroWebTemplate.INSTRUCTION_PYTHON, self._line))
pyCode = ""
while True:
if self._pos > self._endPos:
raise Exception('"%s" instruction is missing (line %s)' % (MicroWebTemplate.INSTRUCTION_END, self._line))
c = self._code[self._pos]
if (
c == MicroWebTemplate.TOKEN_OPEN[0]
and self._code[self._pos : self._pos + MicroWebTemplate.TOKEN_OPEN_LEN] == MicroWebTemplate.TOKEN_OPEN
):
self._pos += MicroWebTemplate.TOKEN_OPEN_LEN
tokenContent = ""
x = self._pos
while True:
if x > self._endPos:
raise Exception("%s is missing (line %s)" % (MicroWebTemplate.TOKEN_CLOSE, self._line))
c = self._code[x]
if (
c == MicroWebTemplate.TOKEN_CLOSE[0]
and self._code[x : x + MicroWebTemplate.TOKEN_CLOSE_LEN] == MicroWebTemplate.TOKEN_CLOSE
):
self._pos = x + MicroWebTemplate.TOKEN_CLOSE_LEN
break
elif c == "\n":
self._line += 1
tokenContent += c
x += 1
tokenContent = tokenContent.strip()
if tokenContent == MicroWebTemplate.INSTRUCTION_END:
break
raise Exception('"%s" is a bad instruction in a python bloc (line %s)' % (tokenContent, self._line))
elif c == "\n":
self._line += 1
if execute:
pyCode += c
self._pos += 1
if execute:
lines = pyCode.split("\n")
indent = ""
for line in lines:
if len(line.strip()) > 0:
for c in line:
if c == " " or c == "\t":
indent += c
else:
break
break
pyCode = ""
for line in lines:
if line.find(indent) == 0:
line = line[len(indent) :]
pyCode += line + "\n"
try:
exec(pyCode, self._pyGlobalVars, self._pyLocalVars)
except Exception as ex:
raise Exception("%s (line %s)" % (str(ex), self._line))
return None
# ----------------------------------------------------------------------------
def _processInstructionIF(self, instructionBody, execute):
if instructionBody is not None:
if execute:
try:
result = eval(instructionBody, self._pyGlobalVars, self._pyLocalVars)
if not isinstance(result, bool):
raise Exception('"%s" is not a boolean expression (line %s)' % (instructionBody, self._line))
except Exception as ex:
raise Exception("%s (line %s)" % (str(ex), self._line))
else:
result = False
newTokenToProcess = self._parseBloc(execute and result)
if newTokenToProcess is not None:
if newTokenToProcess == MicroWebTemplate.INSTRUCTION_END:
return None
elif newTokenToProcess == MicroWebTemplate.INSTRUCTION_ELSE:
newTokenToProcess = self._parseBloc(execute and not result)
if newTokenToProcess is not None:
if newTokenToProcess == MicroWebTemplate.INSTRUCTION_END:
return None
raise Exception('"%s" instruction waited (line %s)' % (MicroWebTemplate.INSTRUCTION_END, self._line))
raise Exception('"%s" instruction is missing (line %s)' % (MicroWebTemplate.INSTRUCTION_END, self._line))
elif newTokenToProcess == MicroWebTemplate.INSTRUCTION_ELIF:
self._processInstructionIF(self._elifInstructionBody, execute and not result)
return None
raise Exception('"%s" instruction waited (line %s)' % (MicroWebTemplate.INSTRUCTION_END, self._line))
raise Exception('"%s" instruction is missing (line %s)' % (MicroWebTemplate.INSTRUCTION_END, self._line))
raise Exception('"%s" alone is an incomplete syntax (line %s)' % (MicroWebTemplate.INSTRUCTION_IF, self._line))
# ----------------------------------------------------------------------------
def _processInstructionELIF(self, instructionBody, execute):
if instructionBody is None:
raise Exception('"%s" alone is an incomplete syntax (line %s)' % (MicroWebTemplate.INSTRUCTION_ELIF, self._line))
self._elifInstructionBody = instructionBody
return MicroWebTemplate.INSTRUCTION_ELIF
# ----------------------------------------------------------------------------
def _processInstructionELSE(self, instructionBody, execute):
if instructionBody is not None:
raise Exception('Instruction "%s" is invalid (line %s)' % (MicroWebTemplate.INSTRUCTION_ELSE, self._line))
return MicroWebTemplate.INSTRUCTION_ELSE
# ----------------------------------------------------------------------------
def _processInstructionFOR(self, instructionBody, execute):
if instructionBody is not None:
parts = instructionBody.split(" ", 1)
identifier = parts[0].strip()
if self._reIdentifier.match(identifier) is not None and len(parts) > 1:
parts = parts[1].strip().split(" ", 1)
if parts[0] == "in" and len(parts) > 1:
expression = parts[1].strip()
newTokenToProcess = None
beforePos = self._pos
if execute:
try:
result = eval(expression, self._pyGlobalVars, self._pyLocalVars)
except:
raise Exception("%s (line %s)" % (str(expression), self._line))
if execute and len(result) > 0:
for x in result:
self._pyLocalVars[identifier] = x
self._pos = beforePos
newTokenToProcess = self._parseBloc(True)
if newTokenToProcess != MicroWebTemplate.INSTRUCTION_END:
break
else:
newTokenToProcess = self._parseBloc(False)
if newTokenToProcess is not None:
if newTokenToProcess == MicroWebTemplate.INSTRUCTION_END:
return None
raise Exception('"%s" instruction waited (line %s)' % (MicroWebTemplate.INSTRUCTION_END, self._line))
raise Exception('"%s" instruction is missing (line %s)' % (MicroWebTemplate.INSTRUCTION_END, self._line))
raise Exception('"%s %s" is an invalid syntax' % (MicroWebTemplate.INSTRUCTION_FOR, instructionBody))
raise Exception('"%s" alone is an incomplete syntax (line %s)' % (MicroWebTemplate.INSTRUCTION_FOR, self._line))
# ----------------------------------------------------------------------------
def _processInstructionEND(self, instructionBody, execute):
if instructionBody is not None:
raise Exception('Instruction "%s" is invalid (line %s)' % (MicroWebTemplate.INSTRUCTION_END, self._line))
return MicroWebTemplate.INSTRUCTION_END
# ----------------------------------------------------------------------------
def _processInstructionINCLUDE(self, instructionBody, execute):
if not instructionBody:
raise Exception('"%s" alone is an incomplete syntax (line %s)' % (MicroWebTemplate.INSTRUCTION_INCLUDE, self._line))
filename = instructionBody.replace('"', "").replace("'", "").strip()
idx = self._filepath.rindex("/")
if idx >= 0:
filename = self._filepath[: idx + 1] + filename
with open(filename, "r") as file:
includeCode = file.read()
self._code = self._code[: self._pos] + includeCode + self._code[self._pos :]
self._endPos += len(includeCode)
# ============================================================================
# ============================================================================
# ============================================================================
| 47.506369 | 134 | 0.475699 |
e5b6d512ed1aa09ab0a2318cd21095a4dab0f6c4 | 836 | py | Python | analysis/visualizeStats.py | schurterb/kmeansconv | 74912b9fdfc1e688be737ba0117461ef8959207b | [
"Unlicense"
] | 2 | 2016-12-08T02:37:00.000Z | 2017-07-21T01:02:39.000Z | analysis/visualizeStats.py | schurterb/kmeansconv | 74912b9fdfc1e688be737ba0117461ef8959207b | [
"Unlicense"
] | null | null | null | analysis/visualizeStats.py | schurterb/kmeansconv | 74912b9fdfc1e688be737ba0117461ef8959207b | [
"Unlicense"
] | null | null | null | import matplotlib.pyplot as plt
from .makeErrorCurves import makeErrorCurves
from scipy.io import loadmat
def showStats(dataFile):
fig_axes = plt.subplots(nrows=2, ncols=2)
dFile = dataFile[0]
mData = loadmat(dFile + 'errors_new.mat')
data0 = [mData.get('p_thresholds'), mData.get('p_err'), mData.get('p_tp'),
mData.get('p_fp'), mData.get('p_pos'), mData.get('p_neg'), mData.get('p_sqerr')]
data1 = [mData.get('p_thresholds').min(), mData.get('p_thresholds').max()]
data2 = [mData.get('r_thresholds'), mData.get('r_err'), mData.get('r_tp'),
mData.get('r_fp'), mData.get('r_pos'), mData.get('r_neg')]
data3 = [mData.get('r_thresholds').min(), mData.get('r_thresholds').max()]
makeErrorCurves(data0, data1, data2, data3, fig_axes)
return data0, data2 | 41.8 | 94 | 0.643541 |
9506693572202fc19c72f52e02e8e43bb6be8948 | 1,944 | py | Python | dev/tools/leveleditor/direct/p3d/pmerge.py | CrankySupertoon01/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2021-02-13T22:40:50.000Z | 2021-02-13T22:40:50.000Z | dev/tools/leveleditor/direct/p3d/pmerge.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | dev/tools/leveleditor/direct/p3d/pmerge.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 2 | 2019-12-02T01:39:10.000Z | 2021-02-13T22:41:00.000Z | #! /usr/bin/env python
usageText = """
This script can be used to merge together the contents of two or more
separately-built stage directories, built independently via ppackage,
or via Packager.py.
This script is actually a wrapper around Panda's PackageMerger.py.
Usage:
%(prog)s [opts] [inputdir1 .. inputdirN]
Parameters:
inputdir1 .. inputdirN
Specify the full path to the input directories you wish to merge.
These are the directories specified by -i on the previous
invocations of ppackage. The order is mostly unimportant.
Options:
-i install_dir
The full path to the final install directory. This may also
contain some pre-existing contents; if so, it is merged with all
of the input directories as well.
-h
Display this help
"""
import sys
import getopt
import os
from direct.p3d import PackageMerger
from pandac.PandaModules import *
def usage(code, msg = ''):
print >> sys.stderr, usageText % {'prog' : os.path.split(sys.argv[0])[1]}
print >> sys.stderr, msg
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:h')
except getopt.error, msg:
usage(1, msg)
installDir = None
for opt, arg in opts:
if opt == '-i':
installDir = Filename.fromOsSpecific(arg)
elif opt == '-h':
usage(0)
else:
print 'illegal option: ' + arg
sys.exit(1)
inputDirs = []
for arg in args:
inputDirs.append(Filename.fromOsSpecific(arg))
if not inputDirs:
print "no input directories specified."
sys.exit(1)
try:
pm = PackageMerger.PackageMerger(installDir)
for dir in inputDirs:
pm.merge(dir)
pm.close()
except PackageMerger.PackageMergerError:
# Just print the error message and exit gracefully.
inst = sys.exc_info()[1]
print inst.args[0]
sys.exit(1)
# An explicit call to exit() is required to exit the program, when
# this module is packaged in a p3d file.
sys.exit(0)
| 22.870588 | 77 | 0.684671 |
c4ee63f24354e5c03ae90ef8153db47051d8f5f1 | 25,327 | py | Python | src/unicon/plugins/tests/test_plugin_linux.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | null | null | null | src/unicon/plugins/tests/test_plugin_linux.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | null | null | null | src/unicon/plugins/tests/test_plugin_linux.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | null | null | null | """
Unittests for Linux plugin
Uses the mock_device.py script to test the execute service.
"""
__author__ = "Dave Wapstra <dwapstra@cisco.com>"
from concurrent.futures import ThreadPoolExecutor
import multiprocessing
import os
import re
import yaml
import datetime
import unittest
import importlib
from pprint import pformat
from unittest.mock import Mock, call, patch
from pyats.topology import loader
import unicon
from unicon import Connection
from unicon.core.errors import SubCommandFailure, ConnectionError as UniconConnectionError
from unicon.plugins.linux.patterns import LinuxPatterns
from unicon.plugins.linux.settings import LinuxSettings
from unicon.eal.dialogs import Dialog
from unicon.mock.mock_device import mockdata_path
with open(os.path.join(mockdata_path, 'linux/linux_mock_data.yaml'), 'rb') as datafile:
mock_data = yaml.safe_load(datafile.read())
@patch.object(unicon.settings.Settings, 'POST_DISCONNECT_WAIT_SEC', 0)
@patch.object(unicon.settings.Settings, 'GRACEFUL_DISCONNECT_WAIT_SEC', 0)
class TestLinuxPluginConnect(unittest.TestCase):
def test_connect_ssh(self):
c = Connection(hostname='linux',
start=['mock_device_cli --os linux --state connect_ssh'],
os='linux',
username='admin',
password='cisco')
c.connect()
c.disconnect()
def test_connect_sma(self):
c = Connection(hostname='sma03',
start=['mock_device_cli --os linux --state connect_sma'],
os='linux',
username='admin',
password='cisco')
c1 = Connection(hostname='pod-esa01',
start=['mock_device_cli --os linux --state connect_sma'],
os='linux',
username='admin',
password='cisco1')
c.connect()
c1.connect()
c.disconnect()
c1.disconnect()
def test_connect_for_password(self):
c = Connection(hostname='agent-lab11-pm',
start=['mock_device_cli --os linux --state connect_for_password'],
os='linux',
username='admin',
password='cisco')
c.connect()
c.disconnect()
def test_bad_connect_for_password(self):
c = Connection(hostname='agent-lab11-pm',
start=['mock_device_cli --os linux --state connect_for_password'],
os='linux',
username='admin',
password='bad_pw')
with self.assertRaisesRegex(UniconConnectionError, 'failed to connect to agent-lab11-pm'):
c.connect()
def test_bad_connect_for_password_credential(self):
c = Connection(hostname='agent-lab11-pm',
start=['mock_device_cli --os linux --state connect_for_password'],
os='linux',
credentials=dict(default=dict(
username='admin', password='bad_pw')))
with self.assertRaisesRegex(UniconConnectionError, 'failed to connect to agent-lab11-pm'):
c.connect()
def test_bad_connect_for_password_credential_no_recovery(self):
""" Ensure password retry does not happen if a credential fails. """
c = Connection(hostname='agent-lab11-pm',
start=['mock_device_cli --os linux --state connect_for_password'],
os='linux',
credentials=dict(default=dict(
username='admin', password='cisco'),
bad=dict(username='baduser', password='bad_pw')),
login_creds=['bad', 'default'])
with self.assertRaisesRegex(UniconConnectionError, 'failed to connect to agent-lab11-pm'):
c.connect()
def test_bad_connect_for_password_credential_proper_recovery(self):
""" Test proper way to try multiple device credentials. """
c = Connection(hostname='agent-lab11-pm',
start=['mock_device_cli --os linux --state connect_for_password'],
os='linux',
credentials=dict(default=dict(
username='admin', password='cisco'),
bad=dict(username='baduser', password='bad_pw')),
login_creds=['bad', 'default'])
try:
c.connect()
except UniconConnectionError:
c.context.login_creds=['default']
c.connect()
def test_bad_connect_for_password_credential_proper_recovery_pyats(self):
""" Test proper way to try multiple device credentials via pyats. """
testbed = """
devices:
agent-lab11-pm:
type: linux
os: linux
connections:
defaults:
class: unicon.Unicon
cli:
command: mock_device_cli --os linux --state connect_for_password
credentials:
default:
username: admin
password: cisco
bad:
username: admin
password: bad_pw
login_creds: [bad, default]
"""
tb=loader.load(testbed)
l = tb.devices['agent-lab11-pm']
with self.assertRaises(UniconConnectionError):
l.connect(connection_timeout=20)
l.destroy()
l.connect(login_creds=['default'])
self.assertEqual(l.is_connected(), True)
l.disconnect()
def test_connect_for_login_incorrect(self):
c = Connection(hostname='agent-lab11-pm',
start=['mock_device_cli --os linux --state login'],
os='linux',
username='cisco',
password='wrong_password')
with self.assertRaisesRegex(UniconConnectionError, 'failed to connect to agent-lab11-pm'):
c.connect()
def test_connect_hit_enter(self):
c = Connection(hostname='linux',
start=['mock_device_cli --os linux --state hit_enter'],
os='linux')
c.connect()
c.disconnect()
def test_connect_timeout(self):
testbed = """
devices:
lnx-server:
type: linux
os: linux
connections:
defaults:
class: unicon.Unicon
cli:
command: mock_device_cli --os linux --state login_ssh_delay
"""
tb=loader.load(testbed)
l = tb.devices['lnx-server']
l.connect(connection_timeout=20)
self.assertEqual(l.is_connected(), True)
l.disconnect()
def test_connect_timeout_error(self):
testbed = """
devices:
lnx-server:
type: linux
os: linux
connections:
defaults:
class: unicon.Unicon
cli:
command: mock_device_cli --os linux --state login_ssh_delay
"""
tb=loader.load(testbed)
l = tb.devices['lnx-server']
with self.assertRaises(UniconConnectionError) as err:
l.connect(connection_timeout=0.5)
l.disconnect()
def test_connect_passphrase(self):
testbed = """
devices:
lnx-server:
type: linux
os: linux
credentials:
default:
username: admin
password: cisco
connections:
defaults:
class: unicon.Unicon
cli:
command: mock_device_cli --os linux --state login_passphrase
"""
tb=loader.load(testbed)
l = tb.devices['lnx-server']
l.connect()
def test_connect_connectReply(self):
c = Connection(hostname='linux',
start=['mock_device_cli --os linux --state connect_ssh'],
os='linux',
username='admin',
password='cisco',
connect_reply = Dialog([[r'^(.*?)Password:']]))
c.connect()
self.assertIn("^(.*?)Password:", str(c.connection_provider.get_connection_dialog()))
c.disconnect()
def test_connect_admin_prompt(self):
c = Connection(hostname='linux',
start=['mock_device_cli --os linux --state linux_password4'],
os='linux',
username='admin',
password='cisco')
c.connect()
c.disconnect()
class TestLinuxPluginPrompts(unittest.TestCase):
prompt_cmds = [
'prompt1',
'prompt2',
'prompt3',
'prompt4',
'prompt5',
'prompt6',
'prompt7',
'prompt8',
'prompt9',
'prompt10',
'prompt11',
'prompt12',
'prompt13',
'prompt14',
'prompt15',
'prompt16',
'prompt17',
'prompt18',
'prompt19'
]
@classmethod
def setUpClass(cls):
cls.c = Connection(hostname='linux',
start=['mock_device_cli --os linux --state exec'],
os='linux')
cls.c.connect()
def test_connect(self):
for p in self.prompt_cmds:
# will raise a timeout error if prompt is not matched
self.c.execute(p, timeout=15)
def test_prompt_removal(self):
for p in self.prompt_cmds:
self.c.execute(p, timeout=15)
ls = self.c.execute('ls')
self.assertEqual(ls.replace('\r', ''), mock_data['exec']['commands']['ls'].strip())
@patch.object(unicon.settings.Settings, 'POST_DISCONNECT_WAIT_SEC', 0)
@patch.object(unicon.settings.Settings, 'GRACEFUL_DISCONNECT_WAIT_SEC', 0)
class TestLearnHostname(unittest.TestCase):
def test_learn_hostname(self):
states = {
'exec': 'Linux',
'exec2': 'Linux',
'exec3': 'Linux',
'exec4': 'host',
'exec5': 'agent-lab9-pm',
'exec6': 'agent-lab11-pm',
'exec7': 'localhost',
'exec8': 'vm-7',
'exec9': 'dev-server',
'exec10': 'dev-1-name',
'exec11': 'new-host',
'exec12': 'host',
'exec13': 'host',
'exec14': 'rally',
'exec15': LinuxSettings().DEFAULT_LEARNED_HOSTNAME,
'sma_prompt' : 'sma03',
'sma_prompt_1' : 'pod-esa01',
'exec18': LinuxSettings().DEFAULT_LEARNED_HOSTNAME,
}
for state in states:
print('\n\n## Testing state %s ##' % state)
testbed = """
devices:
lnx:
os: linux
type: linux
tacacs:
username: admin
passwords:
linux: admin
connections:
defaults:
class: unicon.Unicon
cli:
command: mock_device_cli --os linux --state {state}
""".format(state=state)
tb = loader.load(testbed)
c = tb.devices.lnx
c.connect(learn_hostname=True)
self.assertEqual(c.learned_hostname, states[state])
# only check for supported prompts
if states[state] != LinuxSettings().DEFAULT_LEARNED_HOSTNAME:
x = c.execute('xml')
self.assertEqual(x.replace('\r', ''), mock_data['exec']['commands']['xml']['response'].strip())
x = c.execute('banner1')
self.assertEqual(x.replace('\r', ''), mock_data['exec']['commands']['banner1']['response'].strip())
x = c.execute('banner2')
self.assertEqual(x.replace('\r', ''), mock_data['exec']['commands']['banner2']['response'].strip())
def test_connect_disconnect_without_learn_hostname(self):
testbed = """
devices:
lnx:
os: linux
type: linux
tacacs:
username: admin
passwords:
linux: admin
connections:
defaults:
class: unicon.Unicon
cli:
command: mock_device_cli --os linux --state exec
"""
tb = loader.load(testbed)
lnx = tb.devices.lnx
lnx.connect()
lnx.disconnect()
lnx.connect()
def test_connect_disconnect_with_learn_hostname(self):
testbed = """
devices:
lnx:
os: linux
type: linux
tacacs:
username: admin
passwords:
linux: admin
connections:
defaults:
class: unicon.Unicon
cli:
command: mock_device_cli --os linux --state exec
"""
tb = loader.load(testbed)
lnx = tb.devices.lnx
lnx.connect(learn_hostname=True)
lnx.disconnect()
# If disconnect is used, learn_hostname will still be used even though not specified.
lnx.connect()
class TestRegexPattern(unittest.TestCase):
def test_prompt_pattern(self):
patterns = LinuxPatterns().__dict__
known_slow_patterns = ['learn_hostname', 'learn_os_prompt']
slow_patterns = {}
lines = ("a" * 80 + '\n')*500
for p in sorted(patterns):
if p in known_slow_patterns: continue
regex = patterns[p]
print("Pattern: {} '{}', ".format(p, regex), end="", flush=True)
timings = []
for x in range(3):
start_time = datetime.datetime.now()
m = re.search(regex,lines)
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
us = elapsed_time.microseconds
print("us: {} ".format(us), end='')
if us > 2000:
timings.append(us)
print()
if len(timings) == 3:
slow_patterns[regex] = timings
if slow_patterns:
raise Exception('Slow patterns:\n{}'.format(pformat(slow_patterns)))
class TestPS1PS2(unittest.TestCase):
def test_ps1_ps2_prompts(self):
testbed = """
devices:
lnx-server:
type: linux
os: linux
tacacs:
username: cisco
passwords:
linux: cisco
connections:
defaults:
class: unicon.Unicon
cli:
command: mock_device_cli --os linux --state exec_ps1
"""
from pyats.topology import loader
tb = loader.load(testbed)
n = tb.devices['lnx-server']
n.connect(learn_hostname=False)
r = n.execute('for x in 1 2 3; do\necho $x\ndone')
self.assertEqual(r, {'for x in 1 2 3; do': '', 'echo $x': '', 'done': '1\r\n2\r\n3'})
class TestLinuxPluginPing(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.c = Connection(hostname='linux',
start=['mock_device_cli --os linux --state exec'],
os='linux')
cls.c.connect()
def test_ping_success(self):
r = self.c.ping('127.0.0.1')
self.assertEqual(r.replace('\r', ''),
mock_data['exec']['commands']['ping -A -c5 127.0.0.1']['response'].strip())
def test_ping_fail(self):
with self.assertRaises(SubCommandFailure) as err:
r = self.c.ping('2.2.2.2')
self.assertEqual(err.exception.args[1][0], '100% packet loss')
def test_ping_empty_error_pattern(self):
r = self.c.ping('2.2.2.2', error_pattern=[])
self.assertEqual(r.replace('\r', ''),
mock_data['exec']['commands']['ping -A -c5 2.2.2.2']['response'].strip())
def test_ping_none_error_pattern(self):
r = self.c.ping('2.2.2.2', error_pattern=None)
self.assertEqual(r.replace('\r', ''),
mock_data['exec']['commands']['ping -A -c5 2.2.2.2']['response'].strip())
def test_ping_fail_custom_error_pattern(self):
with self.assertRaises(SubCommandFailure) as err:
r = self.c.ping('127.0.0.1', error_pattern=[' 0% packet loss'])
self.assertEqual(err.exception.args[1][0], ' 0% packet loss')
def test_ping_options(self):
r = self.c.ping('127.0.0.1', options='A')
self.assertEqual(r.replace('\r', ''),
mock_data['exec']['commands']['ping -A -c5 127.0.0.1']['response'].strip())
def test_ping_count(self):
r = self.c.ping('127.0.0.1', count=10)
self.assertEqual(r.replace('\r', ''),
mock_data['exec']['commands']['ping -A -c10 127.0.0.1']['response'].strip())
def test_ping_no_addr(self):
with self.assertRaises(SubCommandFailure) as err:
r = self.c.ping('')
self.assertEqual(err.exception.args[0], 'Address is not specified')
def test_ping_invalid_error_pattern(self):
with self.assertRaises(ValueError) as err:
r = self.c.ping('127.0.0.1', error_pattern='abc')
self.assertEqual(err.exception.args[0], 'error pattern must be a list')
def test_ping_ipv6_addr(self):
r = self.c.ping('::1')
self.assertEqual(r.replace('\r', ''),
mock_data['exec']['commands']['ping6 -A -c5 ::1']['response'].strip())
def test_ping_unknown_boolean_option(self):
with self.assertLogs('unicon') as cm:
r = self.c.ping('127.0.0.1', options='Az')
self.assertEqual(cm.output, ['WARNING:unicon:'
'Uknown ping option - z, ignoring'])
def test_ping_unknown_arg_option(self):
with self.assertLogs('unicon') as cm:
r = self.c.ping('127.0.0.1', x='a')
self.assertEqual(cm.output, ['WARNING:unicon:'
'Uknown ping option - x, ignoring'])
class TestLinuxPluginTERM(unittest.TestCase):
def test_linux_TERM(self):
testbed = """
devices:
lnx:
os: linux
type: linux
connections:
defaults:
class: unicon.Unicon
vty:
command: bash
"""
tb = loader.load(testbed)
l = tb.devices.lnx
l.connect()
l.execute('PS1=bash#')
# forcing the prompt pattern without $
# echo $TERM is matched as a prompt pattern depending on timing
l.state_machine.get_state('shell').pattern = r'^(.*?([>~%]|[^#\s]#))\s?$'
term = l.execute('echo $TERM')
self.assertEqual(term, l.settings.TERM)
def test_os_TERM(self):
testbed = """
devices:
lnx:
os: linux
type: linux
connections:
defaults:
class: unicon.Unicon
vty:
command: bash
"""
tb = loader.load(testbed)
l = tb.devices.lnx
s = LinuxSettings()
delattr(s, 'TERM')
delattr(s, 'ENV')
l.connect(settings=s)
l.execute('PS1=bash#')
# forcing the prompt pattern without $
# echo $TERM is matched as a prompt pattern depending on timing
l.state_machine.get_state('shell').pattern = r'^(.*?([>~%]|[^#\s]#))\s?$'
term = l.execute('echo $TERM')
self.assertEqual(term, os.environ['TERM'])
class TestLinuxPluginENV(unittest.TestCase):
def test_linux_ENV(self):
testbed = """
devices:
lnx:
os: linux
type: linux
connections:
defaults:
class: unicon.Unicon
vty:
command: bash
"""
tb = loader.load(testbed)
l = tb.devices.lnx
l.connect()
term = l.execute('echo $TERM')
self.assertIn(l.settings.ENV['TERM'], term)
lc = l.execute('echo $LC_ALL')
self.assertIn(l.settings.ENV['LC_ALL'], lc)
size = l.execute('stty size')
self.assertEqual(size, '200 200')
class TestLinuxPluginExecute(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.c = Connection(hostname='linux',
start=['mock_device_cli --os linux --state exec'],
os='linux',
credentials={'sudo': {'password': 'sudo_password'}})
cls.c.connect()
def test_execute_error_pattern(self):
with self.assertRaises(SubCommandFailure) as err:
r = self.c.execute('cd abc')
def test_multi_thread_execute(self):
commands = ['ls'] * 10
with ThreadPoolExecutor(max_workers=10) as executor:
all_task = [executor.submit(self.c.execute, cmd)
for cmd in commands]
results = [task.result() for task in all_task]
def test_multi_process_execute(self):
class Child(multiprocessing.Process):
pass
commands = ['ls'] * 3
processes = [Child(target=self.c.execute, args=(cmd,))
for cmd in commands]
for process in processes:
process.start()
for process in processes:
process.join()
def test_execute_check_retcode(self):
self.c.settings.CHECK_RETURN_CODE = True
with self.assertRaises(SubCommandFailure):
self.c.execute('cd abc', error_pattern=[], valid_retcodes=[0])
# second time, the mocked return code is 0
self.c.execute('ls', error_pattern=[], valid_retcodes=[0])
# third time, the mocked return code is 2
self.c.execute('ls', error_pattern=[], valid_retcodes=[2])
with self.assertRaises(AssertionError):
# raises assertion because the valid_retcodes is not a list
self.c.execute('cd abc', error_pattern=[], valid_retcodes=0)
# return code is 2 (last one in the mock list)
with self.assertRaises(SubCommandFailure):
self.c.execute('ls', error_pattern=[])
self.c.settings.CHECK_RETURN_CODE = False
# should not raise exception
self.c.execute('cd abc', error_pattern=[], valid_retcodes=[0])
# should not have echo $? in the output
self.assertEqual(self.c.spawn.match.match_output,
'cd abc\r\nbash: cd: abc: No such file or directory\r\nLinux$ ')
# return code is 2 (last one in the mock list)
with self.assertRaises(SubCommandFailure):
self.c.execute('ls', error_pattern=[], check_retcode=True)
# return code is 2 (last one in the mock list)
self.c.execute('ls', error_pattern=[], check_retcode=True, valid_retcodes=[0, 2])
def test_sudo_handler(self):
self.c.execute('sudo')
self.c.context.credentials['sudo']['password'] = 'unknown'
with self.assertRaises(unicon.core.errors.SubCommandFailure):
self.c.execute('sudo_invalid')
self.c.context.credentials['sudo']['password'] = 'invalid'
with self.assertRaises(unicon.core.errors.SubCommandFailure):
self.c.execute('sudo_invalid')
@patch.object(unicon.settings.Settings, 'POST_DISCONNECT_WAIT_SEC', 0)
@patch.object(unicon.settings.Settings, 'GRACEFUL_DISCONNECT_WAIT_SEC', 0)
class TestLoginPasswordPrompts(unittest.TestCase):
def test_custom_user_password_prompt(self):
c = Connection(hostname='linux',
start=['mock_device_cli --os linux --state linux_login3'],
os='linux',
username='user3',
password='cisco')
c.settings.LOGIN_PROMPT = r'.*Identifier:\s?$'
c.settings.PASSWORD_PROMPT = r'.*Passe:\s?$'
c.connect()
c.disconnect()
def test_topology_custom_user_password_prompt(self):
testbed = r"""
devices:
linux:
type: linux
os: linux
tacacs:
username: user3
login_prompt: '.*Identifier:\s?$'
password_prompt: '.*Passe:\s?$'
passwords:
linux: cisco
connections:
defaults:
class: unicon.Unicon
linux:
command: 'mock_device_cli --os linux --state linux_login3'
"""
t = loader.load(testbed)
d = t.devices['linux']
d.connect()
d.disconnect()
class TestLinuxPromptOverride(unittest.TestCase):
def test_override_prompt(self):
settings = LinuxSettings()
prompt = 'prompt'
settings.PROMPT = prompt
c = Connection(hostname='linux',
start=['mock_device_cli --os linux --state exec'],
os='linux',
settings=settings)
assert c.state_machine.states[0].pattern == prompt
def test_override_shell_prompt(self):
settings = LinuxSettings()
prompt = 'shell_prompt'
settings.SHELL_PROMPT = prompt
c = Connection(hostname='linux',
start=['mock_device_cli --os linux --state exec'],
os='linux',
settings=settings,
learn_hostname=True)
c.connect()
assert c.state_machine.states[0].pattern == prompt
if __name__ == "__main__":
unittest.main()
| 34.318428 | 111 | 0.550796 |
587caca061f2a5857f53edeaae6258926b8ccdd6 | 17,708 | py | Python | Need2convert/zzfatranslator-core.py | PersianWikipedia/fawikibot | 30b2e2aa824753cd0927573fa2f7fdb907cb15d2 | [
"MIT"
] | 12 | 2015-07-06T09:51:40.000Z | 2020-07-05T21:27:12.000Z | Need2convert/zzfatranslator-core.py | PersianWikipedia/fawikibot | 30b2e2aa824753cd0927573fa2f7fdb907cb15d2 | [
"MIT"
] | 3 | 2017-01-08T01:12:00.000Z | 2020-09-15T04:25:19.000Z | Need2convert/zzfatranslator-core.py | PersianWikipedia/fawikibot | 30b2e2aa824753cd0927573fa2f7fdb907cb15d2 | [
"MIT"
] | 6 | 2015-07-06T09:53:39.000Z | 2021-07-07T09:12:52.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Reza(User:reza1615), 2011
#
# Distributed under the terms of MIT License (MIT)
# for more information see https://fa.wikipedia.org/wiki/کاربر:Fatranslator/ترجمه_همسنگ
#
from pywikibot.compat import query
import re
import pywikibot
import ref_link_correction_core
import codecs
from pywikibot import config
import time
import MySQLdb as mysqldb
ensite = pywikibot.Site('en', 'wikipedia')
fasite = pywikibot.Site('fa', 'wikipedia')
BotVersion=u'۱۰.۱ core'
_cache={}
def Check_Page_Exists(page_link):
page_link=page_link.replace(u' ',u'_')
if _cache.get(tuple([page_link, 'Check_Page_Exists'])):
return _cache[tuple([page_link, 'Check_Page_Exists'])]
site = pywikibot.Site('fa')
params = {
'action': 'query',
'prop':'info',
'titles': page_link
}
query_page =pywikibot.data.api.Request(site=site, **params).submit()
try:
for i in query_page[u'query'][u'pages']:
redirect_link=query_page[u'query'][u'pages'][i]['pageid']
_cache[tuple([page_link, 'Check_Page_Exists'])]=True
return True# page existed
except:
_cache[tuple([page_link, 'Check_Page_Exists'])]=False
return False# page not existed
def namespacefinder( enlink ,firstsite):
try:
enlink=unicode(str(enlink),'UTF-8').replace(u'[[',u'').replace(u']]',u'').replace(u'en:',u'').replace(u'fa:',u'')
except:
enlink=enlink.replace(u'[[',u'').replace(u']]',u'').replace(u'en:',u'').replace(u'fa:',u'')
enlink=enlink.replace(u' ',u'_')
if _cache.get(tuple([enlink,firstsite, 'namespacefinder'])):
return _cache[tuple([enlink,firstsite, 'namespacefinder'])]
site = pywikibot.Site(firstsite)
params = {
'action': 'query',
'prop': 'langlinks',
'titles': enlink,
'redirects': 1,
'lllimit':500,
}
a=1
if a:
categoryname =pywikibot.data.api.Request(site=site, **params).submit()
for item in categoryname[u'query'][u'pages']:
fanamespace=categoryname[u'query'][u'pages'][item]['ns']
_cache[tuple([enlink,firstsite, 'namespacefinder'])]=fanamespace
return fanamespace
else:
_cache[tuple([enlink,firstsite, 'namespacefinder'])]=False
return False
def clean_word (word):
word=word.lower()
word=word.replace(u"'",u"").replace(u'"',u"").replace(u'_',u" ")
word=re.sub(ur'[\-\.\:,;@#\$\*\+\!\?%\^\/\\\<\>ʻ”“‘’‚’”\(\)\}\{–—−ـ_]',u'',word)
word=re.sub(ur'\s{2,}',u'',word)
return word
def Compairing(s1,s2):
s1=s1.lower()
s2=s2.lower()
# Make sorted arrays of string chars
s1c = [x for x in s1]
s1c.sort()
s2c = [x for x in s2]
s2c.sort()
i1 = 0
i2 = 0
same = 0
# "merge" strings, counting matches
while ( i1<len(s1c) and i2<len(s2c) ):
if s1c[i1]==s2c[i2]:
same += 2
i1 += 1
i2 += 1
elif s1c[i1] < s2c[i2]:
i1 += 1
else:
i2 += 1
# Return ratio of # of matching chars to total chars
return same/float(len(s1c)+len(s2c))
def is_equal (s1,s2):
s1=s1.strip()
s2=s2.strip()
CleanComp=Compairing(clean_word(s1),clean_word(s2))
Comp=Compairing(s1,s2)
if s1.lower() in s2.lower() or s2.lower() in s1.lower():
return True, 100
elif CleanComp > 0.5:
return True, CleanComp
elif Comp > 0.5:
return True, Comp
else:
return False, CleanComp
def redirect_find( page_link):
page_link=page_link.replace(u' ',u'_')
if _cache.get(tuple([page_link, 'redirect_find'])):
return _cache[tuple([page_link, 'redirect_find'])], 0
params = {
'action': 'query',
'redirects':"",
'titles': page_link
}
query_page =pywikibot.data.api.Request(site=pywikibot.Site('en'), **params).submit()
try:
redirect_link=query_page[u'query'][u'redirects'][0]['to']
is_equal_result,Comp=is_equal (page_link,redirect_link)
if is_equal_result:
#It is redirect but it seems ok to replace
_cache[tuple([page_link, 'redirect_find'])]=True
return True, Comp
else:
_cache[tuple([page_link, 'redirect_find'])]=False
return False, Comp
except:
if 'missing=""' in str(query_page):
_cache[tuple([page_link, 'redirect_find'])]=False
return False, 0
else:
_cache[tuple([page_link, 'redirect_find'])]=True
return True, 0
def link_translator(batch, ensite, fasite):
params = {
'action': 'query',
'redirects': '',
'titles': '|'.join(batch)
}
query_res = pywikibot.data.api.Request(site=ensite, **params).submit()
redirects = {i['from']: i['to'] for i in query_res['query'].get('redirects', [])}
normalizeds = {i['from']: i['to'] for i in query_res['query'].get('normalized', [])}
# resolve normalized redirects and merge normalizeds dict into redirects at the same time
for k, v in normalizeds.items():
redirects[k] = redirects.get(v, v)
wikidata = pywikibot.Site('wikidata', 'wikidata')
endbName = ensite.dbName()
fadbName = fasite.dbName()
params = {
'action': 'wbgetentities',
'sites': endbName,
'titles': '|'.join([redirects.get(i, i) for i in batch]),
'props': 'sitelinks'
}
try:
query_res = pywikibot.data.api.Request(site=wikidata, **params).submit()
except:
return {}
matches_titles = {}
entities = query_res.get('entities', {})
for qid, entity in entities.items():
if fadbName in entity.get('sitelinks', {}):
en_title = entity['sitelinks'][endbName]
fa_title = entity['sitelinks'][fadbName]
# for not updated since addition of badges on Wikidata items
if not isinstance(en_title, str):
en_title = en_title['title']
fa_title = fa_title['title']
matches_titles[en_title] = fa_title
res = {}
for i in batch:
p = redirects[i] if (i in redirects) else i
if p in matches_titles:
res[i] = matches_titles[p]
for k, v in redirects.items():
if k in res:
res[v] = res[k]
for k, v in normalizeds.items():
if k in res:
res[v] = res[k]
return res
def englishdictionry( enlink ,firstsite,secondsite):
try:
enlink=unicode(str(enlink),'UTF-8').replace(u'[[',u'').replace(u']]',u'').replace(u'en:',u'').replace(unicode('fa','UTF-8')+u':',u'')
except:
enlink=enlink.replace(u'[[',u'').replace(u']]',u'').replace(u'en:',u'').replace(unicode('fa','UTF-8')+u':',u'')
enlink=enlink.replace(u' ',u'_')
if _cache.get(tuple([enlink,firstsite, 'englishdictionry'])):
return _cache[tuple([enlink,firstsite, 'englishdictionry'])]
if enlink.find('#')!=-1:
_cache[tuple([enlink,firstsite, 'englishdictionry'])]=False
return False
if enlink==u'':
_cache[tuple([enlink,firstsite, 'englishdictionry'])]=False
return False
englishdictionry_result=link_translator([enlink], ensite, fasite)
if englishdictionry_result:
_cache[tuple([enlink,firstsite, 'englishdictionry'])]=englishdictionry_result[enlink]
return englishdictionry_result[enlink]
else:
_cache[tuple([enlink,firstsite, 'englishdictionry'])]=False
return False
def switchnamespace(namespace):
if namespace==0:
return u' '
if namespace==4:
return u'wikipedia:'
if namespace==10:
return u'template:'
if namespace==12:
return u'help:'
if namespace==14:
return u'category:'
return False
def revert(text2,text):
Regexs=re.compile(ur'\{\{\s*(?:[Cc]ite|[Cc]itation)(?:\{\{.*?\}\}|.)*?\}\}')
citebase = Regexs.findall(text)
citetarget = Regexs.findall(text2)
i=-1
for m in citebase:
i+=1
text2=text2.replace(citetarget[i],citebase[i])
return text2
def getlinks(enlink,falink,NotArticle):
site = pywikibot.Site('fa')
enlink_2=re.sub(ur'[1234567890۱۲۳۴۵۶۷۸۹۰\(\)\,\،\.]',ur'', enlink).strip()
falink_2=re.sub(ur'[1234567890۱۲۳۴۵۶۷۸۹۰\(\)\,\،\.]',ur'', falink).strip()
if not enlink_2:
# It is date linke like [[1999]] which is [[1999]] != [[۱۹۹۹]]
return
if not falink_2:
# It is date linke like [[۱۹۹۹]] which is [[1999]] != [[۱۹۹۹]]
return
try:
page = pywikibot.Page(site,enlink)
linktos=page.getReferences()
except:
return False
for page in linktos:
namespacefa=page.namespace()
if namespacefa in [1,2,3,5,7,8,9,11,13,15,101]:
continue
if NotArticle:
#At the first it should replace at none article
if namespacefa==0:
continue
try:
text=page.get()
except :
continue
if _cache.get(tuple([page.title(),enlink,'getlinks'])):
continue
else:
_cache[tuple([page.title(),enlink,'getlinks'])]=1
time.sleep(2)
pywikibot.output(u'checking '+page.title()+u' .....')
enlink=enlink.strip()
text2=text
fanum=enlink.replace(u'1',u'۱').replace(u'2',u'۲').replace(u'3',u'۳').replace(u'4',u'۴').replace(u'5',u'۵')
fanum=fanum.replace(u'6',u'۶').replace(u'7',u'۷').replace(u'8',u'۸').replace(u'9',u'۹').replace(u'0',u'۰')
fanum_text=re.sub(ur'[1234567890۱۲۳۴۵۶۷۸۹۰\(\)\,\،\.]',ur'', fanum).strip()
search_list=[]
if fanum_text and fanum!=enlink:
for i in [enlink,enlink.replace(u',',u'،'),enlink.replace(u' ',u'_'),enlink.replace(u' ',u'_').replace(u',',u'،')]:
if not i in search_list:
search_list.append(i)
else:
for i in [enlink,enlink.replace(u',',u'،'),fanum,enlink.replace(u' ',u'_'),enlink.replace(u' ',u'_').replace(u',',u'،')]:
if not i in search_list:
search_list.append(i)
text2=text2.replace(u'\r',u'')
for enlink in search_list:
pywikibot.output(enlink)
text2=re.sub(ur'\[\[ *(:|) *'+enlink+ur' *\]\]',ur'[[\1'+falink+ur']]',text2, re.DOTALL| re.IGNORECASE)
text2=re.sub(u'\| *link *\= *'+enlink+u'\n',u'|link='+falink+u'\n',text2)
Link_list = re.findall(ur'\[\[ *(:|) *([^\]\|]+) *\| *([^\]\|]+) *\]\]',text2, re.DOTALL| re.IGNORECASE)
if Link_list:
for mylink in Link_list:
if enlink.lower().replace(u'_',u' ').replace(u'،',u',').strip()==mylink[1].lower().replace(u'_',u' ').replace(u'،',u',').strip():
pywikibot.output(u'link >'+mylink[1]+u' '+ mylink[2])
#phase 1
Replace_list = re.findall(ur'\[\[ *(:|) *('+enlink+ur') *\| *([^\]\|]+) *\]\]',text2, re.IGNORECASE)
#pywikibot.output(Replace_list[0])
if Replace_list:
for replace_link in Replace_list:
if len(replace_link[2]) <4 and len (enlink)>3:
text2=text2.replace(u'[['+replace_link[0]+replace_link[1]+u'|'+replace_link[2]+u']]',ur'[['+replace_link[0]+falink+ur'|'+replace_link[2]+u']]')
else:
if replace_link[2]==re.sub(u'[ابضصثقفغعهخحجچشسیلتنمکگظطزرذدپو]',u'',replace_link[2]):
text2=text2.replace(u'[['+replace_link[0]+replace_link[1]+u'|'+replace_link[2]+u']]',ur'[['+replace_link[0]+falink+u']]')
else:
text2=text2.replace(u'[['+replace_link[0]+replace_link[1]+u'|'+replace_link[2]+u']]',ur'[['+replace_link[0]+falink+ur'|'+replace_link[2]+u']]')
#hase 2
if len(mylink[2]) <4 and len (mylink[1])>3:
text2=text2.replace(u'[['+mylink[1]+u'|'+mylink[2]+u']]',ur'[['+falink+ur'|'+mylink[2]+u']]')
else:
if mylink[2]==re.sub(u'[ابضصثقفغعهخحجچشسیلتنمکگظطزرذدپو]',u'',mylink[2]):
text2=text2.replace(u'[['+mylink[1]+u'|'+mylink[2]+u']]',ur'[['+falink+u']]')
else:
text2=text2.replace(u'[['+mylink[1]+u'|'+mylink[2]+u']]',ur'[['+falink+ur'|'+mylink[2]+u']]')
if text2.find(falink)==-1:
pywikibot.output(u'\03{lightblue}could not find any link\03{default}')
text2=revert(text2,text)
msg=u''
if text!=text2:
text2,msg=ref_link_correction_core.main(text2,u'')
if msg:
msg=u'+'+msg
try:
page.put(text2,u'ربات :[[وپ:پقا|جایگزینی پیوند قرمز]] [['+enlink+u']] > [['+falink+u']] ('+BotVersion+')'+msg)
pywikibot.output(u'\03{lightgreen}the page '+page.title()+u' had replcae item [['+enlink+u']] > [['+falink+u']]\03{default}')
except:
pywikibot.output(u'\03{lightred}the page '+page.title()+u' could not replaced so it passed\03{default}')
continue
return True
def remove_wikify (enlink,Status,Comp):
try:
page = pywikibot.Page(pywikibot.Site('fa'),enlink)
linktos=page.getReferences()
for page in linktos:
namespacefa=page.namespace()
if namespacefa != 0:
continue
try:
text=page.get()
except:
continue
text=re.sub(ur'\[\[( *'+enlink+ur' *)\|([^\]\|]+ *)\]\]',ur' \2 ',text)
if Status=='R':
text=re.sub(ur'\[\[ *('+enlink+ur') *\]\]',ur' \1 ',text)
try:
if Status=='R':
page.put(text,u'[[وپ:پقا|برداشتن ویکیسازی]] [['+enlink+u']] > تغییرمسیر نامشابه است ('+BotVersion+') '+str(Comp))
else:
page.put(text,u'[[وپ:پقا|برداشتن ویکیسازی]] [['+enlink+u']]> بخشی از یک مقاله است (در ویکیانگلیسی# دارد) ('+BotVersion+') '+str(Comp))
pywikibot.output(u'\03{lightblue}the page '+page.title()+u' remove wikifay [['+enlink+u']]\03{default}')
except:
pywikibot.output(u'\03{lightred}the page '+page.title()+u' could not replaced so it passed\03{default}')
continue
except:
return
def get_query():
querys='SELECT /* SLOW_OK */ DISTINCT pl_title,pl_namespace FROM pagelinks INNER JOIN page ON pl_from = page_id WHERE pl_title NOT IN(SELECT page_title FROM page WHERE page_namespace = 0) AND pl_namespace = 0 AND page_namespace = 0;'
pywikibot.output(querys)
site = pywikibot.Site('fa')
conn = mysqldb.connect("fawiki.labsdb", db = site.dbName()+ '_p',
user = config.db_username,
passwd = config.db_password)
cursor = conn.cursor()
cursor.execute(querys)
results = cursor.fetchall()
return results
def run(results, NotArticle):
for enlink in results:
if switchnamespace(enlink[1]):# if the link is from permited namespaces
enlink=switchnamespace(enlink[1])+unicode(enlink[0],'UTF-8').strip()
pywikibot.output(enlink)
enlink=enlink.replace(u'_',u' ').strip()
enlink2=re.sub(u'[ابضصثقفغعهخحجچشسیلتنمکگظطزرذدپو]',ur'', enlink)
if enlink2==enlink:
count=-1
for i in u'۰۱۲۳۴۵۶۷۸۹':
count+=1
enlink=enlink.replace(i,str(count))
#unwikify the redirect links
redirect_find_result, Comp =redirect_find(enlink)
if not redirect_find_result:
pywikibot.output(u'It was redirect so lets remove the wikify!')
remove_wikify (enlink,'R',Comp)
continue
falink=englishdictionry(enlink ,'en','fa')
pywikibot.output(falink)
if falink:
if namespacefinder(enlink ,'en')!=namespacefinder(falink ,'fa'):
continue
pywikibot.output(u'---------------------------------------------')
pywikibot.output(enlink+u' > '+falink)
a=getlinks(enlink,falink,NotArticle)
else:
#unwikify the # links
if u'#' in enlink:
remove_wikify (enlink,'#',Comp)
else:
pywikibot.output(u'it has farsi char')
del results,enlink
#At the first it should do replacing at none article
run(get_query(),True)
#Now do replacing at the articles
run(get_query(),False)
'''
#for test
results=[['Let the Right One In (film)',0]]
run(results,True)
run(results,False)
'''
| 40.614679 | 238 | 0.529026 |
50a2e75b97c0833b2bcccf09e4d4f15dc816fac6 | 874 | py | Python | build/sick_tim/catkin_generated/pkg.develspace.context.pc.py | 6RiverSystems/darknet_ros | 03c72b96afa99f7cc75f7792b51deb4a7f4ed379 | [
"BSD-3-Clause"
] | null | null | null | build/sick_tim/catkin_generated/pkg.develspace.context.pc.py | 6RiverSystems/darknet_ros | 03c72b96afa99f7cc75f7792b51deb4a7f4ed379 | [
"BSD-3-Clause"
] | null | null | null | build/sick_tim/catkin_generated/pkg.develspace.context.pc.py | 6RiverSystems/darknet_ros | 03c72b96afa99f7cc75f7792b51deb4a7f4ed379 | [
"BSD-3-Clause"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/kalyco/mfp_workspace/devel/.private/sick_tim/include;/home/kalyco/mfp_workspace/src/sick_tim/include;/usr/include;/usr/include/libusb-1.0".split(';') if "/home/kalyco/mfp_workspace/devel/.private/sick_tim/include;/home/kalyco/mfp_workspace/src/sick_tim/include;/usr/include;/usr/include/libusb-1.0" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;sensor_msgs;diagnostic_updater;dynamic_reconfigure".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lsick_tim_3xx;/usr/lib/x86_64-linux-gnu/libboost_system.so;-lusb-1.0".split(';') if "-lsick_tim_3xx;/usr/lib/x86_64-linux-gnu/libboost_system.so;-lusb-1.0" != "" else []
PROJECT_NAME = "sick_tim"
PROJECT_SPACE_DIR = "/home/kalyco/mfp_workspace/devel/.private/sick_tim"
PROJECT_VERSION = "0.0.10"
| 97.111111 | 353 | 0.782609 |
a4ab6ac3d387a5282baa0c7eabd009c1b6ad3a14 | 3,227 | py | Python | scripts/ais_liststations.py | rolker/noaadata | 052a4b7d634e0a3a92a348b543e5db536ae24f02 | [
"Apache-2.0"
] | 35 | 2015-02-15T17:23:00.000Z | 2022-01-27T01:49:43.000Z | scripts/ais_liststations.py | rolker/noaadata | 052a4b7d634e0a3a92a348b543e5db536ae24f02 | [
"Apache-2.0"
] | 2 | 2017-10-04T17:24:38.000Z | 2017-10-04T18:22:00.000Z | scripts/ais_liststations.py | rolker/noaadata | 052a4b7d634e0a3a92a348b543e5db536ae24f02 | [
"Apache-2.0"
] | 22 | 2015-02-08T13:29:58.000Z | 2022-03-09T03:03:16.000Z | #!/usr/bin/env python
"""List all of the AIS stations within a set of log files.
@license: Apache 2.0
"""
import os
import sys
def getStation(line, withR=False):
fields=line.split(',')
if '!AIVDM' != fields[0]:
return None
foundChecksum=False
for i,f in enumerate(fields):
if i<5: continue
# must come after the checksum field
if len(f)<1: continue
if not foundChecksum:
if -1==f.find('*'): continue # FIX: Is this sufficient to make sure this is a checksum?
#if '0'==f[0]: continue
foundChecksum=True
continue
if len(f)<2: continue
if f[0] not in ('r','b'): continue
if withR: return f
return f[1:]
return None
if __name__=='__main__':
from optparse import OptionParser
parser = OptionParser(usage="%prog [options] file1.ais [file2 ...]", version="%prog ")
parser.add_option('-c','--count-for-station',dest='count_each_station',default=False,action='store_true',
help='Count the number of lines for each station')
parser.add_option('-r','--without-r',dest='withR',default=True,action='store_false',
help='Do not keep the r in the station name')
parser.add_option('-v','--verbose',dest='verbose',default=False,action='store_true',
help='Make the test output verbose')
parser.add_option('--progress',dest='progress',default=False,action='store_true',
help='Print out the line count every n lines')
(options,args) = parser.parse_args()
verbose = options.verbose
progress_interval = 1000000
if options.count_each_station:
stations = {}
for filename in args:
for linenum,line in enumerate(open(filename)):
if options.progress:
if linenum % progress_interval == 0:
sys.stderr.write('linenum: %d\n' % linenum)
station = getStation(line,options.withR)
if station:
if station not in stations:
stations[station]=1
else:
stations[station]+=1
for station in stations:
print station, stations[station]
else:
stations = set()
for filename in args:
if options.verbose: print 'Processing file:',filename
for linenum,line in enumerate(open(filename)):
if options.progress:
if linenum % progress_interval == 0:
sys.stderr.write('linenum: %d\n' % linenum)
station = getStation(line,options.withR)
if None==station:
if verbose: print 'WARNING: no station for line',line
continue
if verbose and station not in stations:
print 'New station:',station
stations.add(station)
for item in stations:
print item
| 36.258427 | 113 | 0.528974 |
5e87c07f63cdda106f2b0b352b2b6e9b85f05d9d | 621 | py | Python | instagram_api/response/reels_tray_feed.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | 13 | 2019-08-07T21:24:34.000Z | 2020-12-12T12:23:50.000Z | instagram_api/response/reels_tray_feed.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | null | null | null | instagram_api/response/reels_tray_feed.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | null | null | null | from .mapper import ApiResponse, ApiResponseInterface
from .mapper.types import Timestamp, AnyType
from .model import Broadcast, PostLive, StoryTray, TraySuggestions
__all__ = ['ReelsTrayFeedResponse']
class ReelsTrayFeedResponseInterface(ApiResponseInterface):
story_ranking_token: str
broadcasts: [Broadcast]
tray: [StoryTray]
post_live: PostLive
sticker_version: int
face_filter_nux_version: int
stories_viewer_gestures_nux_eligible: bool
has_new_nux_story: bool
suggestions: [TraySuggestions]
class ReelsTrayFeedResponse(ApiResponse, ReelsTrayFeedResponseInterface):
pass
| 28.227273 | 73 | 0.798712 |
671b73326327f6d74c02b36cba4b67509b1b9507 | 27,931 | py | Python | pyubx2/ubxtypes_set.py | pjln/pyubx2 | 3d2f414f7f2c3d2e4d40177499228cf438ecf12e | [
"BSD-3-Clause"
] | null | null | null | pyubx2/ubxtypes_set.py | pjln/pyubx2 | 3d2f414f7f2c3d2e4d40177499228cf438ecf12e | [
"BSD-3-Clause"
] | null | null | null | pyubx2/ubxtypes_set.py | pjln/pyubx2 | 3d2f414f7f2c3d2e4d40177499228cf438ecf12e | [
"BSD-3-Clause"
] | null | null | null | # pylint: disable=unused-import
"""
UBX Protocol Input payload definitions
THESE ARE THE PAYLOAD DEFINITIONS FOR _SET_ MESSAGES _TO_ THE RECEIVER
NB: Attribute names must be unique within each message class/id
NB: Repeating groups must be defined as a tuple thus:
'group': ('numr', {dict})
where
- 'numr' is either:
a) an integer representing a fixed number of repeats e.g 32
b) a string representing the name of a preceding attribute
containing the number of repeats e.g. 'numCh'
c) 'None' for a 'variable by size' repeating group
(only one such group is permitted per message type)
- {dict} is the nested dictionary containing the repeating
attributes
Created on 27 Sep 2020
Information sourced from u-blox Interface Specifications © 2013-2021, u-blox AG
:author: semuadmin
"""
# pylint: disable=too-many-lines, line-too-long
from pyubx2.ubxtypes_core import (
C2,
C6,
C32,
I1,
I2,
I4,
R4,
R8,
U1,
U2,
U3,
U4,
U6,
U7,
U8,
U9,
U40,
U64,
X1,
X2,
X4,
)
UBX_PAYLOADS_SET = {
# AssistNow Aiding Messages: i.e. Ephemeris, Almanac, other A-GPS data input.
# Messages in the AID class are used to send GPS aiding data to the receiver
# AID messages are deprecated in favour of MGA messages in >=Gen8
"AID-ALM": {"svid": U4, "week": U4, "optBlock": ("None", {"dwrd": U4})},
"AID-AOP": {"gnssId": U1, "svId": U1, "reserved1": U2, "data": U64},
"AID-EPH": {
"svid": U4,
"how": U4,
"optBlock": (
"None",
{
"sf1d1": U4,
"sf1d2": U4,
"sf1d3": U4,
"sf1d4": U4,
"sf1d5": U4,
"sf1d6": U4,
"sf1d7": U4,
"sf1d8": U4,
"sf2d1": U4,
"sf2d2": U4,
"sf2d3": U4,
"sf2d4": U4,
"sf2d5": U4,
"sf2d6": U4,
"sf2d7": U4,
"sf2d8": U4,
"sf3d1": U4,
"sf3d2": U4,
"sf3d3": U4,
"sf3d4": U4,
"sf3d5": U4,
"sf3d6": U4,
"sf3d7": U4,
"sf3d8": U4,
},
),
},
"AID-HUI": {
"health": X4,
"utcA0": R8,
"utcA1": R8,
"utcTOW": I4,
"utcWNT": I2,
"utcLS": I2,
"utcWNF": I2,
"utcDNs": I2,
"utcLSF": I2,
"utcSpare": I2,
"klobA0": R4,
"klobA1": R4,
"klobA2": R4,
"klobA3": R4,
"klobB0": R4,
"klobB1": R4,
"klobB2": R4,
"klobB3": R4,
"flags": X4,
},
"AID-INI": {
"ecefXOrLat": I4,
"ecefYOrLon": I4,
"ecefZOrAlt": I4,
"posAcc": U4,
"tmCfg": X2,
"wn": U2,
"tow": U4,
"towNs": I4,
"tAccMs": U4,
"tAccNs": U4,
"clkDOrFreq": I4,
"clkDAccOrFreqAcc": U4,
"flags": X4,
},
# ********************************************************************
# Configuration Input Messages: i.e. Set Dynamic Model, Set DOP Mask, Set Baud Rate, etc..
# Messages in the CFG class are used to configure the receiver and read out current configuration values. Any
# messages in the CFG class sent to the receiver are either acknowledged (with message UBX-ACK-ACK) if
# processed successfully or rejected (with message UBX-ACK-NAK) if processing unsuccessfully.
"CFG-ANT": {"flags": X2, "pins": X2},
"CFG-BATCH": {
"version": U1,
"flags": X1,
"bufSize": U2,
"notifThrs": U2,
"pioId": U1,
"reserved0": U1,
},
"CFG-CFG": {"clearMask": X4, "saveMask": X4, "loadMask": X4, "deviceMask": X1},
"CFG-DAT": {
"majA": R8,
"flat": R8,
"dX": R4,
"dY": R4,
"dZ": R4,
"rotX": R4,
"rotY": R4,
"rotZ": R4,
"scale": R4,
},
"CFG-DGNSS": {
"dgnssMode": U1,
"reserved0": U3,
},
"CFG-DOSC": {
"version": U1,
"numOsc": U1,
"reserved1": U2,
"group": (
"numOsc",
{ # repeating group * numOsc
"oscId": U1,
"reserved2": U1,
"flags": X2,
"freq": U4,
"phaseOffset": I4,
"withTemp": U4,
"withAge": U4,
"timeToTemp": U2,
"reserved3": U2,
"gainVco": I4,
"gainUncertainty": U1,
"reserved4": U3,
},
),
},
"CFG-DYNSEED": {"version": U1, "reserved1": U3, "seedHi": U4, "seedLo": U4},
"CFG-ESFALG": {
"bitfield": U4,
"yaw": U4,
"pitch": I2,
"roll": I2,
},
"CFG-ESFA": {
"version": U1,
"reserved1": U9,
"accelRmsThdl": U1,
"frequency": U1,
"latency": U2,
"accuracy": U2,
"reserved2": U4,
},
"CFG-ESFG": {
"version": U1,
"reserved1": U7,
"tcTableSaveRate": U2,
"gyroRmsThdl": U1,
"frequency": U1,
"latency": U2,
"accuracy": U2,
"reserved2": U4,
},
"CFG-ESFWT": {
"version": U1,
"flags1": X1,
"flags2": X1,
"reserved1": U1,
"wtFactor": U4,
"wtQuantError": U4,
"wtCountMax": U4,
"wtLatency": U2,
"wtFrequency": U1,
"flags3": X1,
"speedDeadBand": U2,
"reserved2": U1,
},
"CFG-ESRC": {
"version": U1,
"numSources": U1,
"reserved1": U2,
"group": (
"numSources",
{ # repeating group * numSources
"extInt": U1,
"flags": X2,
"freq": U4,
"reserved2": U4,
"withTemp": U4,
"withAge": U4,
"timeToTemp": U2,
"maxDevLifeTim": U2,
"offset": I4,
"offsetUncertainty": U4,
"jitter": U4,
},
),
},
"CFG-FIXSEED": {
"version": U1,
"length": U1,
"reserved1": U2,
"seedHi": U4,
"seedLo": U4,
"group": ("length", {"classId": U1, "msgId": U1}), # repeating group * length
},
"CFG-GEOFENCE": {
"version": U1,
"numFences": U1,
"confLvl": U1,
"reserved1": U1,
"pioEnabled": U1,
"pinPolarity": U1,
"pin": U1,
"reserved2": U1,
"group": (
"numFences",
{"lat": I4, "lon": I4, "radius": U4}, # repeating group * numFences
),
},
"CFG-GNSS": {
"msgVer": U1,
"numTrkChHw": U1,
"numTrkChUse": U1,
"numConfigBlocks": U1,
"group": (
"numConfigBlocks",
{ # repeating group * numConfigBlocks
"gnssId": U1,
"resTrkCh": U1,
"maxTrkCh": U1,
"reserved1": U1,
"flags": X4,
},
),
},
"CFG-HNR": {
"highNavRate": U1,
"reserved1": U3,
},
"CFG-INF": {
"protocolID": U1,
"reserved1": U3,
"infMsgMaskDDC": X1,
"infMsgMaskUART1": X1,
"infMsgMaskUART2": X1,
"infMsgMaskUSB": X1,
"infMsgMaskSPI": X1,
"reserved2": X1,
},
"CFG-ITFM": {"config": X4, "config2": X4},
"CFG-LOGFILTER": {
"version": U1,
"flags": X1,
"minInterval": U2,
"timeThreshold": U2,
"speedThreshold": U2,
"positionThreshold": U4,
},
"CFG-MSG": {
"msgClass": U1,
"msgID": U1,
"rateDDC": U1,
"rateUART1": U1,
"rateUART2": U1,
"rateUSB": U1,
"rateSPI": U1,
"reserved": U1,
},
"CFG-NAV5": {
"mask": X2,
"dynModel": U1,
"fixMode": U1,
"fixedAlt": I4,
"fixedAltVar": U4,
"minElev": I1,
"drLimit": U1,
"pDop": U2,
"tDop": U2,
"pAcc": U2,
"tAcc": U2,
"staticHoldThresh": U1,
"dgpsTimeOut": U1,
"reserved2": U4,
"reserved3": U4,
"reserved4": U4,
},
"CFG-NAVX5": {
"version": U2,
"mask1": X2,
"mask2": X4,
"reserved0": U2,
"minSVs": U1,
"maxSVs": U1,
"minCNO": U1,
"reserved1": U1,
"iniFix3D": U1,
"reserved2": U2,
"ackAiding": U1,
"wknRollover": U2,
"sigAttenCompMode": U1,
"reserved3": U1,
"reserved4": U2,
"reserved5": U2,
"usePPP": U1,
"aopCfg": U1,
"reserved6": U2,
"aopOrbMaxErr": U2,
"reserved7": U4,
"reserved8": U3,
"useAdr": U1,
},
"CFG-NMEAvX": { # deprecated length 4
"filter": X1,
"nmeaVersion": U1,
"numSV": U1,
"flags": X1,
},
"CFG-NMEAv0": { # v0 deprecated length 12
"filter": X1,
"nmeaVersion": U1,
"numSV": U1,
"flags": X1,
"gnssToFilter": X4,
"svNumbering": U1,
"mainTalkerId": U1,
"gsvTalkerId": U1,
"version": U1,
},
"CFG-NMEA": { # preferred version length 20
"filter": X1,
"nmeaVersion": U1,
"numSV": U1,
"flags": X1,
"gnssToFilter": X4,
"svNumbering": U1,
"mainTalkerId": U1,
"gsvTalkerId": U1,
"version": U1,
"bdsTalkerId": C2,
"reserved1": U6,
},
"CFG-ODO": {
"version": U1,
"reserved1": U3,
"flags": U1,
"odoCfg": X1,
"reserved2": U6,
"cogMaxSpeed": U1,
"cogMaxPosAcc": U1,
"reserved3": U2,
"velLpGain": U1,
"cogLpGain": U1,
"reserved4": U2,
},
"CFG-PM2": {
"version": U1,
"reserved1": U1,
"reserved2": U1,
"reserved3": U1,
"flags": X4,
"updatePeriod": U4,
"searchPeriod": U4,
"gridOffset": U4,
"onTime": U2,
"minAcqTime": U2,
"reserved4": U2,
"reserved5": U2,
"reserved6": U4,
"reserved7": U4,
"reserved8": U1,
"reserved9": U1,
"reserved10": U2,
"reserved11": U4,
},
"CFG-PMS": {
"version": U1,
"powerSetupValue": U1,
"period": U2,
"onTime": U2,
"reserved1": U2,
},
"CFG-PRT": {
"portID": U1,
"reserved0": U1,
"txReady": X2,
"mode": X4,
"baudRate": U4,
"inProtoMask": X2,
"outProtoMask": X2,
"reserved4": U2,
"reserved5": U2,
},
"CFG-PWR": {"version": U1, "reserved1": U3, "state": U4},
"CFG-RATE": {"measRate": U2, "navRate": U2, "timeRef": U2},
"CFG-RINV": {"flags": X1, "group": ("None", {"data": U1})}, # repeating group
"CFG-RST": {"navBbrMask": X2, "resetMode": U1, "reserved1": U1},
"CFG-RXM": {"reserved1": U1, "lpMode": U1},
"CFG-SBAS": {
"mode": X1,
"usage": X1,
"maxSBAS": U1,
"scanmode2": X1,
"scanmode1": X4,
},
"CFG-SENIF": {
"type": U1,
"version": U1,
"flags": X1,
"pioConf": X2,
},
"CFG-SLAS": {
"mode": X1,
"reserved1": U3,
},
"CFG-SMGR": {
"minGNSSFix": U1,
"maxFreqChange": U2,
"maxPhaseCorrRate": U2,
"reserved1": U2,
"freqTolerance": U2,
"timeTolerance": U2,
"messageCfg": X2,
"maxSlewRate": U2,
"flags": X4,
},
"CFG-SPT": {
"version": U1,
"reserved0": U1,
"sensorId": U2,
"reserved1": U8,
},
"CFG-TMODE2": {
"timeMode": U1,
"reserved1": U1,
"flags": X2,
"ecefXOrLat": I4,
"ecefYOrLon": I4,
"ecefZOrAlt": I4,
"fixedPosAcc": U4,
"svinMinDur": U4,
"svinAccLimit": U4,
},
"CFG-TMODE3": {
"version": U1,
"reserved0": U1,
"flags": X2,
"ecefXOrLat": I4,
"ecefYOrLon": I4,
"ecefZOrAlt": I4,
"ecefXOrLatHP": I1,
"ecefYOrLonHP": I1,
"ecefZOrAltHP": I1,
"reserved1": U1,
"fixedPosAcc": U4,
"svinMinDur": U4,
"svinAccLimit": U4,
"reserved2": U8,
},
"CFG-TP5": {
"tpIdx": U1,
"reserved0": U1,
"reserved1": U2,
"antCableDelay": I2,
"rfGroupDelay": I2,
"freqPeriod": U4,
"freqPeriodLock": U4,
"pulseLenRatio": U4,
"pulseLenRatioLock": U4,
"userConfigDelay": I4,
"flags": X4,
},
"CFG-TXSLOT": {
"version": U1,
"enable": X1,
"refTp": U1,
"reserved1": U1,
"end1": U4,
"end2": U4,
"end3": U4,
},
"CFG-USB": {
"vendorID": U2,
"productID": U2,
"reserved1": U2,
"reserved2": U2,
"powerConsumpt": U2,
"flags": X2,
"vendorString": C32,
"productString": C32,
"serialNumber": C32,
},
"CFG-VALDEL": {
"version": U1, # = 0 no transaction, 1 with transaction
"layers": X1,
"transaction": X1, # if version = 1, else reserved
"reserved0": U1,
"group": ("None", {"keys": U4}), # repeating group
},
"CFG-VALSET": {
"version": U1, # = 0 no transaction, 1 with transaction
"layers": X1,
"transaction": U1, # if version = 1, else reserved
"reserved0": U1,
"group": ("None", {"cfgData": U1}), # repeating group
},
# ********************************************************************
# External Sensor Fusion Messages: i.e. External Sensor Measurements and Status Information.
# Messages in the ESF class are used to output external sensor fusion information from the receiver.
"ESF-MEAS": { # this version used when bit 3 of flags = 0
"timeTag": U4,
"flags": X2,
"id": U2,
"group": (
"None",
{ # repeating group * numMeas, which is bits 11..15 in flags
"data": X4,
},
),
},
"ESF-MEAS-CT": { # this version used when bit 3 of flags = 1
"timeTag": U4,
"flags": X2,
"id": U2,
"group": (
"ESF-MEAS-CT",
{ # repeating group * numMeas, which is bits 11..15 of flags
"data": X4,
},
),
"calibTtag": U4,
},
# ********************************************************************
# Logging Messages: i.e. Log creation, deletion, info and retrieval.
# Messages in the LOG class are used to configure and report status information of the logging feature.
"LOG-CREATE": {
"version": U1,
"logCfg": X1,
"reserved1": U1,
"logSize": U1,
"userDefinedSize": U4,
},
"LOG-ERASE": {},
"LOG-FINDTIME": {
"version": U1,
"type": U1,
"reserved0": U2,
"year": U2,
"month": U1,
"day": U1,
"hour": U1,
"minute": U1,
"second": U1,
"reserved1": U1,
},
"LOG-RETRIEVE": {
"startNumber": U4,
"entryCount": U4,
"version": U1,
"reserved": U3,
},
"LOG-RETRIEVEBATCH": {
"version": U1,
"flags": X1,
"reserved0": U2,
},
"LOG-STRING": {"group": ("None", {"bytes": U1})}, # repeating group
# ********************************************************************
# Multiple GNSS Assistance Messages: i.e. Assistance data for various GNSS.
# Messages in the MGA class are used for GNSS aiding information from and to the receiver.
"MGA-ANO": {
"type": U1,
"version": U1,
"svId": U1,
"gnssId": U1,
"year": U1,
"month": U1,
"day": U1,
"reserved1": U1,
"data": U64,
"reserved2": U4,
},
"MGA-BDS-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"SatH1": U1,
"IODC": U1,
"a2": I2,
"a1": I4,
"a0": I4,
"toc": U4,
"TGD1": I2,
"URAI": U1,
"IODE": U1,
"toe": U4,
"sqrtA": U4,
"e": U4,
"omega": I4,
"Deltan": I2,
"IDOT": I2,
"M0": I4,
"Omega0": I4,
"OmegaDot": I4,
"i0": I4,
"Cuc": I4,
"Cus": I4,
"Crc": I4,
"Crs": I4,
"Cic": I4,
"Cis": I4,
"reserved2": U4,
},
"MGA-BDS-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"Wna": U1,
"toa": U1,
"deltaI": I2,
"sqrtA": U4,
"e": U4,
"omega": I4,
"M0": I4,
"Omega0": I4,
"omegaDot": I4,
"a0": I2,
"a1": I2,
"reserved2": U4,
},
"MGA-BDS-HEALTH": {
"type": U1, # 0x04
"version": U1,
"reserved0": U2,
"grouphealthcode": (
30,
{
"healthCode": U2,
},
), # repeating group * 30
"reserved1": U4,
},
"MGA-BDS-UTC": {
"type": U1, # 0x05
"version": U1,
"reserved1": U2,
"a0UTC": I4,
"a1UTC": I4,
"dtLS": I1,
"reserved2": U1,
"wnRec": U1,
"wnLSF": U1,
"dN": U1,
"dtLSF": I1,
"reserved3": U2,
},
"MGA-BDS-IONO": {
"type": U1, # 0x06
"version": U1,
"reserved1": U2,
"alpha0": I1,
"alpha1": I1,
"alpha2": I1,
"alpha3": I1,
"beta0": I1,
"beta1": I1,
"beta2": I1,
"beta3": I1,
"reserved2": U4,
},
"MGA-FLASH-DATA": {
"type": U1,
"version": U1,
"sequence": U2,
"size": U2,
"group": ("size", {"data": U1}), # repeating group * size
},
"MGA-FLASH-STOP": {"type": U1, "version": U1},
"MGA-GAL-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"iodNav": U2,
"deltaN": I2,
"m0": I4,
"e": U4,
"sqrtA": U4,
"omega0": I4,
"i0": I4,
"omega": I4,
"omegaDot": I4,
"iDot": I2,
"cuc": I2,
"cus": I2,
"crc": I2,
"crs": I2,
"cic": I2,
"cis": I2,
"toe": U2,
"af0": I4,
"af1": I4,
"af2": I1,
"sisaIndexE1E5b": U1,
"toc": U2,
"bgdE1E5b": I2,
"reserved2": U2,
"healthE1B": U1,
"dataValidityE1B": U1,
"healthE5b": U1,
"dataValidityE5b": U1,
"reserved3": U4,
},
"MGA-GAL-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"ioda": U1,
"almWNa": U1,
"toa": U2,
"deltaSqrtA": I2,
"e": U2,
"deltaI": I2,
"omega0": I2,
"omegaDot": I2,
"omega": I2,
"m0": I2,
"af0": I2,
"af1": I2,
"healthE1B": U1,
"healthE5b": U1,
"reserved2": U4,
},
"MGA-GAL-TIMEOFFSET": {
"type": U1,
"version": U1,
"reserved1": U2,
"a0G": I2,
"a1G": I2,
"t0G": U1,
"wn0G": U1,
"reserved2": U2,
},
"MGA-GAL-UTC": {
"type": U1,
"version": U1,
"reserved1": U2,
"a0": I4,
"a1": I4,
"dtLS": I1,
"tot": U1,
"wnt": U1,
"wnLSF": U1,
"dN": U1,
"dTLSF": I1,
"reserved2": U2,
},
"MGA-GLO-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"FT": U1,
"B": U1,
"M": U1,
"H": I1,
"x": I4,
"y": I4,
"z": I4,
"dx": I4,
"dy": I4,
"dz": I4,
"ddx": I1,
"ddy": I1,
"ddz": I1,
"tb": U1,
"gamma": I2,
"E": U1,
"deltaTau": I1,
"tau": I4,
"reserved2": U4,
},
"MGA-GLO-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"N": U2,
"M": U1,
"C": U1,
"tau": I2,
"epsilon": U2,
"lambda": I4,
"deltaI": I4,
"tLambda": U4,
"deltaT": I4,
"deltaDT": I1,
"H": I1,
"omega": I2,
"reserved2": U4,
},
"MGA-GLO-TIMEOFFSET": {
"type": U1,
"version": U1,
"N": U2,
"tauC": I4,
"tauGps": I4,
"B1": I2,
"B2": I2,
"reserved1": U4,
},
"MGA-GPS-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"fitInterval": U1,
"uraIndex": U1,
"svHealth": U1,
"tgd": I1,
"iodc": U2,
"toc": U2,
"reserved2": U1,
"af2": I1,
"af1": I2,
"af0": I4,
"crs": I2,
"deltaN": I2,
"m0": I4,
"cuc": I2,
"cus": I2,
"e": U4,
"sqrtA": U4,
"toe": U2,
"cic": I2,
"omega0": I4,
"cis": I2,
"crc": I2,
"i0": I4,
"omega": I4,
"omegaDot": I4,
"idot": I2,
"reserved3": U4,
},
"MGA-GPS-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"svHealth": U1,
"e": U2,
"almWNa": U1,
"toa": U1,
"deltaI": I2,
"omegaDot": I2,
"sqrtA": U4,
"omega0": I4,
"omega": I4,
"m0": I4,
"af0": I2,
"af1": I2,
"reserved1": U4,
},
"MGA-GPS-HEALTH": {
"type": U1,
"version": U1,
"reserved0": U2,
"grouphealthcode": (
32,
{
"healthCode": U1,
},
), # repeating group * 32
"reserved1": U4,
},
"MGA-GPS-UTC": {
"type": U1,
"version": U1,
"reserved1": U2,
"utcA0": I4,
"utcA1": I4,
"utcDtLS": I1,
"utcTot": U1,
"utcWNt": U1,
"utcWNlsf": U1,
"utcDn": U1,
"utcDtLSF": I1,
"reserved2": U2,
},
"MGA-GPS-IONO": {
"type": U1,
"version": U1,
"reserved1": U2,
"ionoAlpha0": I1,
"ionoAlpha1": I1,
"ionoAlpha2": I1,
"ionoAlpha3": I1,
"ionoBeta0": I1,
"ionoBeta1": I1,
"ionoBeta2": I1,
"ionoBeta3": I1,
"reserved2": U4,
},
"MGA-INI-POS_XYZ": {
"type": U1,
"version": U1,
"reserved1": U2,
"ecefX": I4,
"ecefY": I4,
"ecefZ": I4,
"posAcc": U4,
},
"MGA-INI-POS_LLH": {
"type": U1,
"version": U1,
"reserved1": U2,
"lat": I4,
"lon": I4,
"alt": I4,
"posAcc": U4,
},
"MGA-INI-TIME_UTC": {
"type": U1,
"version": U1,
"ref": X1,
"leapSecs": I1,
"year": U2,
"month": U1,
"day": U1,
"hour": U1,
"minute": U1,
"second": U1,
"reserved1": U1,
"ns": U4,
"tAccS": U2,
"reserved2": U2,
"tAccNs": U4,
},
"MGA-INI-TIME_GNSS": {
"type": U1,
"version": U1,
"ref": X1,
"gnssId": U1,
"reserved1": U2,
"week": U2,
"tow": U4,
"ns": U4,
"tAccS": U2,
"reserved2": U2,
"tAccNs": U4,
},
"MGA-INI-CLKD": {
"type": U1,
"version": U1,
"reserved1": U2,
"clkD": I4,
"clkDAcc": U4,
},
"MGA-INI-FREQ": {
"type": U1,
"version": U1,
"reserved1": U1,
"flags": X1,
"freq": I4,
"freqAcc": U4,
},
"MGA-INI-EOP": {
"type": U1,
"version": U1,
"reserved1": U2,
"d2kRef": U2,
"d2kMax": U2,
"xpP0": I4,
"xpP1": I4,
"ypP0": I4,
"ypP1": I4,
"dUT1": I4,
"ddUT1": I4,
"reserved2": U40,
},
"MGA-QZSS-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"fitInterval": U1,
"uraIndex": U1,
"svHealth": U1,
"tgd": I1,
"iodc": U2,
"toc": U2,
"reserved2": U1,
"af2": I1,
"af1": I2,
"af0": I4,
"crs": I2,
"deltaN": I2,
"m0": I4,
"cuc": I2,
"cus": I2,
"e": U4,
"sqrtA": U4,
"toe": U2,
"cic": I2,
"omega0": I4,
"cis": I2,
"crc": I2,
"i0": I4,
"omega": I4,
"omegaDot": I4,
"idot": I2,
"reserved3": U2,
},
"MGA-QZSS-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"svHealth": U1,
"e": U2,
"almWNa": U1,
"toa": U1,
"deltaI": I2,
"omegaDot": I2,
"sqrtA": U4,
"omega0": I4,
"omega": I4,
"m0": I4,
"af0": I2,
"af1": I2,
"reserved1": U4,
},
"MGA-QZSS-HEALTH": {
"type": U1,
"version": U1,
"reserved0": U2,
"grouphealthcode": (
5,
{
"healthCode": U1,
},
), # repeating group * 5
"reserved1": U3,
},
# ********************************************************************
# Navigation Results Messages: i.e. Position, Speed, Time, Acceleration, Heading, DOP, SVs used.
# Messages in the NAV class are used to output navigation data such as position, altitude and velocity in a
# number of formats. Additionally, status flags and accuracy figures are output. The messages are generated with
# the configured navigation/measurement rate.
"NAV-RESETODO": {},
# ********************************************************************
# Receiver Manager Messages: i.e. Satellite Status, RTC Status.
# Messages in the RXM class are used to output status and result data from the Receiver Manager. The output
# rate is not bound to the navigation/measurement rate and messages can also be generated on events.
"RXM-PMREQ-S": {
"duration": U4,
"flags": X4,
}, # this appears to be a deprecated version
"RXM-PMREQ": {
"version": U1, # 0x00
"reserved0": U3,
"duration": U4,
"flags": X4,
"wakeupSources": X4,
},
# ********************************************************************
# Timing Messages: i.e. Time Pulse Output, Time Mark Results.
# Messages in the TIM class are used to output timing information from the receiver, like Time Pulse and Time
# Mark measurements.
"TIM-HOC": {"version": U1, "oscId": U1, "flags": U1, "reserved1": U1, "value": I4},
"TIM-VCOCAL": {
"type": U1,
"version": U1,
"oscId": U1,
"reserved1": U3,
"gainUncertainty": U2,
"gainVco": I4,
},
# ********************************************************************
# Firmware Update Messages: i.e. Memory/Flash erase/write, Reboot, Flash identification, etc..
# Messages in the UPD class are used to update the firmware and identify any attached flash device.
"UPD-SOS": {"cmd": U1, "reserved1": U3}, # Create or clear backup in flash
}
| 24.783496 | 116 | 0.41377 |
1ef809e6939644a0bdd9be8ac59cf33c010f0280 | 636 | py | Python | migrations/versions/a4f3c426417e_.py | apoorvkhare07/SUMSarizer | ff8264906c380b4d5e6a52a23040bb8bff361c92 | [
"MIT"
] | 9 | 2015-03-08T07:45:30.000Z | 2019-02-10T04:22:34.000Z | migrations/versions/a4f3c426417e_.py | apoorvkhare07/SUMSarizer | ff8264906c380b4d5e6a52a23040bb8bff361c92 | [
"MIT"
] | 45 | 2015-04-09T00:32:09.000Z | 2018-08-22T18:04:53.000Z | migrations/versions/a4f3c426417e_.py | apoorvkhare07/SUMSarizer | ff8264906c380b4d5e6a52a23040bb8bff361c92 | [
"MIT"
] | 13 | 2015-04-08T23:52:05.000Z | 2019-02-20T10:22:33.000Z | """empty message
Revision ID: a4f3c426417e
Revises: 46ae5e506646
Create Date: 2016-07-01 06:25:23.060945
"""
# revision identifiers, used by Alembic.
revision = 'a4f3c426417e'
down_revision = '46ae5e506646'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'stormpath_id')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('stormpath_id', sa.VARCHAR(), autoincrement=False, nullable=True))
### end Alembic commands ###
| 23.555556 | 103 | 0.70283 |
5e41cc55bf954f8a3bec620d1f6880984878be91 | 1,386 | py | Python | redisai/convert.py | dagdelenmustafa/redisai-py | 18837ea67101946613e8e94ec113eababe49f2e1 | [
"BSD-3-Clause"
] | null | null | null | redisai/convert.py | dagdelenmustafa/redisai-py | 18837ea67101946613e8e94ec113eababe49f2e1 | [
"BSD-3-Clause"
] | null | null | null | redisai/convert.py | dagdelenmustafa/redisai-py | 18837ea67101946613e8e94ec113eababe49f2e1 | [
"BSD-3-Clause"
] | null | null | null | from typing import Union, ByteString, Sequence
from .utils import convert_to_num
from .constants import DType
from .containers import Tensor
try:
import numpy as np
except (ImportError, ModuleNotFoundError):
np = None
def from_numpy(tensor: np.ndarray) -> Tensor:
""" Convert the numpy input from user to `Tensor` """
dtype = DType.__members__[str(tensor.dtype)]
shape = tensor.shape
blob = bytes(tensor.data)
return Tensor(blob, shape, dtype, 'BLOB')
def from_sequence(tensor: Sequence, shape: Union[list, tuple], dtype: DType) -> Tensor:
""" Convert the `list`/`tuple` input from user to `Tensor` """
return Tensor(tensor, shape, dtype, 'VALUES')
def to_numpy(value: ByteString, shape: Union[list, tuple], dtype: DType) -> np.ndarray:
""" Convert `BLOB` result from RedisAI to `np.ndarray` """
dtype = DType.__members__[dtype.lower()].value
mm = {
'FLOAT': 'float32',
'DOUBLE': 'float64'
}
if dtype in mm:
dtype = mm[dtype]
else:
dtype = dtype.lower()
a = np.frombuffer(value, dtype=dtype)
return a.reshape(shape)
def to_sequence(value: list, shape: list, dtype: DType) -> Tensor:
""" Convert `VALUES` result from RedisAI to `Tensor` """
dtype = DType.__members__[dtype.lower()]
convert_to_num(dtype, value)
return Tensor(value, tuple(shape), dtype, 'VALUES')
| 31.5 | 87 | 0.664502 |
c241a91827b0590556d77fefcf98261516281f4e | 4,463 | py | Python | ktb/util.py | ABasharEter/KaggleToolbox | bef82f9ffbc1270299d2892aadc56817e2bdf501 | [
"MIT"
] | 1 | 2020-03-30T03:55:03.000Z | 2020-03-30T03:55:03.000Z | ktb/util.py | ABasharEter/KaggleToolbox | bef82f9ffbc1270299d2892aadc56817e2bdf501 | [
"MIT"
] | null | null | null | ktb/util.py | ABasharEter/KaggleToolbox | bef82f9ffbc1270299d2892aadc56817e2bdf501 | [
"MIT"
] | null | null | null | from typing import List, NoReturn, Union, Tuple, Optional, Text, Generic, Callable, Dict
import pandas as pd
import os
import pickle
import gzip
import numpy as np
import random as rn
from datetime import datetime
SEED = 777
output_dir = "../Results"
def write_submission(df, cols = None):
if cols is None:
cols = df.columns
time_now = datetime.strftime(datetime.now(), "%Y-%m-%d_%H-%M-%S")
df[cols].to_csv(os.path.join(output_dir,f'submission-{time_now}.csv'), index=False, float_format='%.4f')
def read_object(file):
with gzip.open(file, "rb") as f:
return pickle.load(f)
def write_object(file, obj):
with gzip.open(file, "wb") as f:
pickle.dump(obj, f)
def cache_func(func,key):
if not os.path.exists(f"cache"):
os.mkdir("cache")
key = key+func.__name__
def inner_func(*args, **kwargs):
try:
if os.path.exists(f"cache/{key}"):
return read_object(f"cache/{key}")
except:
pass
obj = func(*args, **kwargs)
write_object(f"cache/{key}", obj)
return obj
return inner_func
def seed_everything(seed):
rn.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
def reduce_mem_usage(df: pd.DataFrame,
verbose: bool = True) -> pd.DataFrame:
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if (c_min > np.iinfo(np.int8).min
and c_max < np.iinfo(np.int8).max):
df[col] = df[col].astype(np.int8)
elif (c_min > np.iinfo(np.int16).min
and c_max < np.iinfo(np.int16).max):
df[col] = df[col].astype(np.int16)
elif (c_min > np.iinfo(np.int32).min
and c_max < np.iinfo(np.int32).max):
df[col] = df[col].astype(np.int32)
elif (c_min > np.iinfo(np.int64).min
and c_max < np.iinfo(np.int64).max):
df[col] = df[col].astype(np.int64)
else:
if (c_min > np.finfo(np.float16).min
and c_max < np.finfo(np.float16).max):
df[col] = df[col].astype(np.float16)
elif (c_min > np.finfo(np.float32).min
and c_max < np.finfo(np.float32).max):
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
reduction = (start_mem - end_mem) / start_mem
msg = f'Mem. usage decreased to {end_mem:5.2f} MB ({reduction * 100:.1f} % reduction)'
if verbose:
print(msg)
return df
def maddest(d : Union[np.array, pd.Series, List], axis : Optional[int]=None) -> np.array:
return np.mean(np.absolute(d - np.mean(d, axis)), axis)
def batching(df : pd.DataFrame,
batch_size : int,
add_index : Optional[bool]=True) -> pd.DataFrame :
df['batch_'+ str(batch_size)] = df.groupby(df.index//batch_size, sort=False)[df.columns[0]].agg(['ngroup']).values + 1
df['batch_'+ str(batch_size)] = df['batch_'+ str(batch_size)].astype(np.uint16)
if add_index:
df['batch_' + str(batch_size) +'_idx'] = df.index - (df['batch_'+ str(batch_size)] * batch_size)
df['batch_' + str(batch_size) +'_idx'] = df['batch_' + str(batch_size) +'_idx'].astype(np.uint16)
return df
def flattern_values(obj, func=None):
res = []
if isinstance(obj, dict):
for v in obj.values:
res.extend(flattern_values(v, func))
elif isinstance(obj, list):
for v in obj:
res.extend(flattern_values(v, func))
else:
if func is not None:
res.extend(flattern_values(func(obj), None))
else:
res.append(obj)
return res
def apply2values(obj, func):
res = None
if isinstance(obj, dict):
res = {k:apply2values(v, func) for k,v in obj.items}
elif isinstance(obj, list):
res = [apply2values(v, func) for v in obj]
else:
res = func(obj)
return res
seed_everything(SEED)
| 34.596899 | 122 | 0.562402 |
0a530623e711c043164c93414fa68dec0613d147 | 909 | py | Python | src/data.py | Aradhya-Tripathi/deBlur | 6928b582b5c5ce4f681dfa5baa23f85d8e4d44c6 | [
"MIT"
] | 4 | 2020-12-18T20:52:05.000Z | 2021-04-27T20:15:27.000Z | src/data.py | Aradhya-Tripathi/deBlur | 6928b582b5c5ce4f681dfa5baa23f85d8e4d44c6 | [
"MIT"
] | null | null | null | src/data.py | Aradhya-Tripathi/deBlur | 6928b582b5c5ce4f681dfa5baa23f85d8e4d44c6 | [
"MIT"
] | null | null | null | from PIL import Image, ImageFilter
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
import os
from torchvision import transforms
def get_paths():
path = 'image/'
lst_path = os.listdir(path)
all_paths = [path+x for x in lst_path]
return all_paths
class Data(Dataset):
def __init__(self, paths):
super(Data, self).__init__()
self.paths = paths[:2000]
self.trans = transforms.Compose([transforms.ToTensor()])
def __len__(self):
return len(self.paths)
def __getitem__(self, xid):
self.image = Image.open(self.paths[xid])
self.image = self.image.resize((128,128))
self.temp_img = self.image.resize((40,40), Image.BILINEAR)
self.xs_img = self.temp_img.resize(self.image.size, Image.NEAREST)
return self.trans(self.xs_img), self.trans(self.image)
# data = Data(get_paths())
# img, label = data[500]
# label.show() | 24.567568 | 70 | 0.690869 |
ab1ae4c629f202f3849778bc415044a361d0dfa2 | 2,682 | py | Python | settings.py | codeforamerica/transparencyjobs | a9ffec28918cf5a1b18f15ef2b17c6aa2de3e070 | [
"BSD-3-Clause"
] | null | null | null | settings.py | codeforamerica/transparencyjobs | a9ffec28918cf5a1b18f15ef2b17c6aa2de3e070 | [
"BSD-3-Clause"
] | null | null | null | settings.py | codeforamerica/transparencyjobs | a9ffec28918cf5a1b18f15ef2b17c6aa2de3e070 | [
"BSD-3-Clause"
] | 1 | 2021-04-17T15:38:14.000Z | 2021-04-17T15:38:14.000Z | # Django settings for ppp project.
import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'gatekeeper.middleware.GatekeeperMiddleware',
)
ROOT_URLCONF = 'transparencyjobs.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.markup',
'django.contrib.messages',
'django.contrib.admin',
'gatekeeper',
'transparencyjobs.jobs',
)
try:
from local_settings import *
except ImportError:
pass | 30.477273 | 79 | 0.743475 |
c1f2c98a882e1ff2ac052c015b6b3f8c03333231 | 875 | py | Python | Codefights/arcade/intro/level-4/16.Are-Similar/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codefights/arcade/intro/level-4/16.Are-Similar/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codefights/arcade/intro/level-4/16.Are-Similar/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python3
from solution1 import areSimilar as f
qa = [
([1, 2, 3], [1, 2, 3], True),
([1, 2, 3], [2, 1, 3], True),
([1, 2, 2], [2, 1, 1], False),
([1, 1, 4], [1, 2, 3], False),
([1, 2, 3], [1, 10, 2], False),
([2, 3, 1], [1, 3, 2], True),
([2, 3, 9], [10, 3, 2], False),
([4, 6, 3], [3, 4, 6], False),
([832, 998, 148, 570, 533, 561, 894, 147, 455, 279], [832, 998, 148, 570, 533, 561, 455, 147, 894, 279], True),
([832, 998, 148, 570, 533, 561, 894, 147, 455, 279], [832, 570, 148, 998, 533, 561, 455, 147, 894, 279], False)
]
for *q, a in qa:
for i, e in enumerate(q):
print('input{0}: {1}'.format(i + 1, e))
ans = f(*q)
if ans != a:
print(' [failed]')
print(' output:', ans)
print(' expected:', a)
else:
print(' [ok]')
print(' output:', ans)
print()
| 29.166667 | 115 | 0.438857 |
1c2c711f45577a76148ae8af191d115d86196cec | 8,486 | py | Python | client/filesystem.py | thatch/pyre-check | 31278b71081ec3a3636f0d94da91dc0e4273ece9 | [
"MIT"
] | null | null | null | client/filesystem.py | thatch/pyre-check | 31278b71081ec3a3636f0d94da91dc0e4273ece9 | [
"MIT"
] | null | null | null | client/filesystem.py | thatch/pyre-check | 31278b71081ec3a3636f0d94da91dc0e4273ece9 | [
"MIT"
] | null | null | null | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import errno
import fcntl
import functools
import logging
import os
import shutil
import subprocess
from contextlib import contextmanager
from typing import ContextManager, Dict, Generator, Iterable, List, Optional, Set
from .exceptions import EnvironmentException
LOG: logging.Logger = logging.getLogger(__name__)
class BuckBuilder:
def build(self, targets: Iterable[str]) -> Iterable[str]:
"""
Build the given targets, and return a list of output directories
containing the target output.
"""
raise NotImplementedError
def translate_path(root: str, path: str) -> str:
if os.path.isabs(path):
return path
translated = os.path.join(root, path)
if os.path.exists(translated):
return os.path.realpath(translated)
return path
def translate_paths(paths: Set[str], original_directory: str) -> Set[str]:
current_directory = os.getcwd()
if not original_directory.startswith(current_directory):
return paths
translation = os.path.relpath(original_directory, current_directory)
if not translation:
return paths
return {translate_path(translation, path) for path in paths}
def find_root(original_directory: str, target_file: str) -> Optional[str]:
current_directory = os.path.abspath(original_directory)
while current_directory != "/":
absolute = os.path.join(current_directory, target_file)
if os.path.isfile(absolute):
return current_directory
current_directory = os.path.dirname(current_directory)
return None
def exists(path: str) -> str:
if not os.path.isfile(path):
raise ValueError("%s is not a valid file" % path)
return path
def is_parent(parent: str, child: str) -> bool:
return child.startswith(parent.rstrip(os.sep) + os.sep)
def find_paths_with_extensions(root: str, extensions: Iterable[str]) -> List[str]:
root = os.path.abspath(root) # Return absolute paths.
extension_filter = []
for extension in extensions:
if len(extension_filter) > 0:
extension_filter.append("-or")
extension_filter.extend(["-name", "*.{}".format(extension)])
output = (
subprocess.check_output(
[
"find",
root,
# All files ending with the given extensions ...
"(",
*extension_filter,
")",
# ... and that are either regular files ...
"(",
"-type",
"f",
"-or",
# ... or symlinks.
"-type",
"l",
")",
# Print all such files.
"-print",
],
stderr=subprocess.DEVNULL,
)
.decode("utf-8")
.strip()
)
return output.split("\n") if output else []
def find_python_paths(root: str) -> List[str]:
try:
return find_paths_with_extensions(root, ["py", "pyi"])
except subprocess.CalledProcessError:
raise EnvironmentException(
"Pyre was unable to locate an analysis directory. "
"Ensure that your project is built and re-run pyre."
)
def is_empty(path: str) -> bool:
try:
return os.stat(path).st_size == 0
except FileNotFoundError:
return False
def remove_if_exists(path: str) -> None:
try:
os.remove(path)
except OSError:
pass # Not a file.
try:
shutil.rmtree(path)
except OSError:
pass # Not a directory.
def _compute_symbolic_link_mapping(
directory: str, extensions: Iterable[str]
) -> Dict[str, str]:
"""
Given a shared analysis directory, produce a mapping from actual source files
to files contained within this directory. Only includes files which have
one of the provided extensions.
Watchman watches actual source files, so when a change is detected to a
file, this mapping can be used to identify what file changed from Pyre's
perspective.
"""
symbolic_links = {}
try:
for symbolic_link in find_paths_with_extensions(directory, extensions):
symbolic_links[os.path.realpath(symbolic_link)] = symbolic_link
except subprocess.CalledProcessError as error:
LOG.warning(
"Exception encountered trying to find source files "
"in the analysis directory: `%s`",
error,
)
LOG.warning("Starting with an empty set of tracked files.")
return symbolic_links
def _delete_symbolic_link(link_path: str) -> None:
os.unlink(link_path)
def add_symbolic_link(link_path: str, actual_path: str) -> None:
directory = os.path.dirname(link_path)
try:
os.makedirs(directory)
except OSError:
pass
try:
os.symlink(actual_path, link_path)
except OSError as error:
if error.errno == errno.EEXIST:
os.unlink(link_path)
os.symlink(actual_path, link_path)
else:
LOG.error(str(error))
@contextmanager
def acquire_lock(path: str, blocking: bool) -> Generator[Optional[int], None, None]:
"""Raises an OSError if the lock can't be acquired"""
LOG.debug("Trying to acquire lock on file %s", path)
try:
with open(path, "w+") as lockfile:
if not blocking:
lock_command = fcntl.LOCK_EX | fcntl.LOCK_NB
else:
lock_command = fcntl.LOCK_EX
fcntl.lockf(lockfile.fileno(), lock_command)
yield lockfile.fileno()
fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN)
except FileNotFoundError:
LOG.debug(f"Unable to acquire lock because lock file {path} was not found")
yield
@contextmanager
def do_nothing() -> Generator[None, None, None]:
yield
def acquire_lock_if_needed(
lock_path: str, blocking: bool, needed: bool
) -> ContextManager[Optional[int]]:
if needed:
return acquire_lock(lock_path, blocking)
else:
return do_nothing()
class Filesystem:
def list(
self, root: str, patterns: List[str], exclude: Optional[List[str]] = None
) -> List[str]:
"""
Return the list of files that match any of the patterns within root.
If exclude is provided, files that match an exclude pattern are omitted.
Note: The `find` command does not understand globs properly.
e.g. 'a/*.py' will match 'a/b/c.py'
For this reason, avoid calling this method with glob patterns.
"""
command = ["find", "."]
command += self._match_any(patterns)
if exclude:
command += ["-and", "!"]
command += self._match_any(exclude)
return (
subprocess.run(command, stdout=subprocess.PIPE, cwd=root)
.stdout.decode("utf-8")
.split()
)
def _match_any(self, patterns: List[str]) -> List[str]:
expression = []
for pattern in patterns:
if expression:
expression.append("-or")
expression.extend(["-path", "./{}".format(pattern)])
return ["(", *expression, ")"]
class MercurialBackedFilesystem(Filesystem):
def list(
self, root: str, patterns: List[str], exclude: Optional[List[str]] = None
) -> List[str]:
try:
command = ["hg", "files"]
for pattern in patterns:
command += ["--include", pattern]
if exclude:
for pattern in exclude:
command += ["--exclude", pattern]
return (
subprocess.run(
command, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, cwd=root
)
.stdout.decode("utf-8")
.split()
)
except FileNotFoundError:
raise EnvironmentException("hg executable not found.")
@functools.lru_cache(1)
def get_filesystem() -> Filesystem:
try:
subprocess.check_output(["hg", "status"], stderr=subprocess.DEVNULL)
return MercurialBackedFilesystem()
except (subprocess.CalledProcessError, FileNotFoundError):
return Filesystem()
| 30.415771 | 88 | 0.604054 |
3a421437c56640a221e090bcb7ad1b2fec1e05a0 | 79,441 | py | Python | python/ray/services.py | richard4912/ray | c5b88401938c6046f270b1e7f6f763540eb8e538 | [
"Apache-2.0"
] | 1 | 2021-02-08T13:40:58.000Z | 2021-02-08T13:40:58.000Z | python/ray/services.py | richard4912/ray | c5b88401938c6046f270b1e7f6f763540eb8e538 | [
"Apache-2.0"
] | null | null | null | python/ray/services.py | richard4912/ray | c5b88401938c6046f270b1e7f6f763540eb8e538 | [
"Apache-2.0"
] | 2 | 2018-09-04T21:00:36.000Z | 2019-04-03T06:34:26.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import json
import logging
import multiprocessing
import os
import random
import resource
import shutil
import signal
import socket
import subprocess
import sys
import threading
import time
from collections import OrderedDict, namedtuple
from datetime import datetime
import redis
import pyarrow
# Ray modules
import ray.ray_constants
import ray.global_scheduler as global_scheduler
import ray.local_scheduler
import ray.plasma
PROCESS_TYPE_MONITOR = "monitor"
PROCESS_TYPE_LOG_MONITOR = "log_monitor"
PROCESS_TYPE_WORKER = "worker"
PROCESS_TYPE_RAYLET = "raylet"
PROCESS_TYPE_LOCAL_SCHEDULER = "local_scheduler"
PROCESS_TYPE_PLASMA_MANAGER = "plasma_manager"
PROCESS_TYPE_PLASMA_STORE = "plasma_store"
PROCESS_TYPE_GLOBAL_SCHEDULER = "global_scheduler"
PROCESS_TYPE_REDIS_SERVER = "redis_server"
PROCESS_TYPE_WEB_UI = "web_ui"
# This is a dictionary tracking all of the processes of different types that
# have been started by this services module. Note that the order of the keys is
# important because it determines the order in which these processes will be
# terminated when Ray exits, and certain orders will cause errors to be logged
# to the screen.
all_processes = OrderedDict(
[(PROCESS_TYPE_MONITOR, []), (PROCESS_TYPE_LOG_MONITOR, []),
(PROCESS_TYPE_WORKER, []), (PROCESS_TYPE_RAYLET, []),
(PROCESS_TYPE_LOCAL_SCHEDULER, []), (PROCESS_TYPE_PLASMA_MANAGER, []),
(PROCESS_TYPE_PLASMA_STORE, []), (PROCESS_TYPE_GLOBAL_SCHEDULER, []),
(PROCESS_TYPE_REDIS_SERVER, []), (PROCESS_TYPE_WEB_UI, [])], )
# True if processes are run in the valgrind profiler.
RUN_RAYLET_PROFILER = False
RUN_LOCAL_SCHEDULER_PROFILER = False
RUN_PLASMA_MANAGER_PROFILER = False
RUN_PLASMA_STORE_PROFILER = False
# Location of the redis server and module.
REDIS_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/common/thirdparty/redis/src/redis-server")
REDIS_MODULE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/common/redis_module/libray_redis_module.so")
# Location of the credis server and modules.
# credis will be enabled if the environment variable RAY_USE_NEW_GCS is set.
CREDIS_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/credis/redis/src/redis-server")
CREDIS_MASTER_MODULE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/credis/build/src/libmaster.so")
CREDIS_MEMBER_MODULE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/credis/build/src/libmember.so")
# Location of the raylet executables.
RAYLET_MONITOR_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/ray/raylet/raylet_monitor")
RAYLET_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "core/src/ray/raylet/raylet")
# ObjectStoreAddress tuples contain all information necessary to connect to an
# object store. The fields are:
# - name: The socket name for the object store
# - manager_name: The socket name for the object store manager
# - manager_port: The Internet port that the object store manager listens on
ObjectStoreAddress = namedtuple("ObjectStoreAddress",
["name", "manager_name", "manager_port"])
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray configures it by default automatically
# using logging.basicConfig in its entry/init points.
logger = logging.getLogger(__name__)
def address(ip_address, port):
return ip_address + ":" + str(port)
def get_ip_address(address):
assert type(address) == str, "Address must be a string"
ip_address = address.split(":")[0]
return ip_address
def get_port(address):
try:
port = int(address.split(":")[1])
except Exception:
raise Exception("Unable to parse port from address {}".format(address))
return port
def new_port():
return random.randint(10000, 65535)
def random_name():
return str(random.randint(0, 99999999))
def kill_process(p):
"""Kill a process.
Args:
p: The process to kill.
Returns:
True if the process was killed successfully and false otherwise.
"""
if p.poll() is not None:
# The process has already terminated.
return True
if any([
RUN_RAYLET_PROFILER, RUN_LOCAL_SCHEDULER_PROFILER,
RUN_PLASMA_MANAGER_PROFILER, RUN_PLASMA_STORE_PROFILER
]):
# Give process signal to write profiler data.
os.kill(p.pid, signal.SIGINT)
# Wait for profiling data to be written.
time.sleep(0.1)
# Allow the process one second to exit gracefully.
p.terminate()
timer = threading.Timer(1, lambda p: p.kill(), [p])
try:
timer.start()
p.wait()
finally:
timer.cancel()
if p.poll() is not None:
return True
# If the process did not exit within one second, force kill it.
p.kill()
if p.poll() is not None:
return True
# The process was not killed for some reason.
return False
def cleanup():
"""When running in local mode, shutdown the Ray processes.
This method is used to shutdown processes that were started with
services.start_ray_head(). It kills all scheduler, object store, and worker
processes that were started by this services module. Driver processes are
started and disconnected by worker.py.
"""
successfully_shut_down = True
# Terminate the processes in reverse order.
for process_type in all_processes.keys():
# Kill all of the processes of a certain type.
for p in all_processes[process_type]:
success = kill_process(p)
successfully_shut_down = successfully_shut_down and success
# Reset the list of processes of this type.
all_processes[process_type] = []
if not successfully_shut_down:
logger.warning("Ray did not shut down properly.")
def all_processes_alive(exclude=[]):
"""Check if all of the processes are still alive.
Args:
exclude: Don't check the processes whose types are in this list.
"""
for process_type, processes in all_processes.items():
# Note that p.poll() returns the exit code that the process exited
# with, so an exit code of None indicates that the process is still
# alive.
processes_alive = [p.poll() is None for p in processes]
if (not all(processes_alive) and process_type not in exclude):
logger.warning(
"A process of type {} has died.".format(process_type))
return False
return True
def address_to_ip(address):
"""Convert a hostname to a numerical IP addresses in an address.
This should be a no-op if address already contains an actual numerical IP
address.
Args:
address: This can be either a string containing a hostname (or an IP
address) and a port or it can be just an IP address.
Returns:
The same address but with the hostname replaced by a numerical IP
address.
"""
address_parts = address.split(":")
ip_address = socket.gethostbyname(address_parts[0])
# Make sure localhost isn't resolved to the loopback ip
if ip_address == "127.0.0.1":
ip_address = get_node_ip_address()
return ":".join([ip_address] + address_parts[1:])
def get_node_ip_address(address="8.8.8.8:53"):
"""Determine the IP address of the local node.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address of the current node.
"""
ip_address, port = address.split(":")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# This command will raise an exception if there is no internet
# connection.
s.connect((ip_address, int(port)))
node_ip_address = s.getsockname()[0]
except Exception as e:
node_ip_address = "127.0.0.1"
# [Errno 101] Network is unreachable
if e.errno == 101:
try:
# try get node ip address from host name
host_name = socket.getfqdn(socket.gethostname())
node_ip_address = socket.gethostbyname(host_name)
except Exception:
pass
return node_ip_address
def record_log_files_in_redis(redis_address, node_ip_address, log_files):
"""Record in Redis that a new log file has been created.
This is used so that each log monitor can check Redis and figure out which
log files it is reponsible for monitoring.
Args:
redis_address: The address of the redis server.
node_ip_address: The IP address of the node that the log file exists
on.
log_files: A list of file handles for the log files. If one of the file
handles is None, we ignore it.
"""
for log_file in log_files:
if log_file is not None:
redis_ip_address, redis_port = redis_address.split(":")
redis_client = redis.StrictRedis(
host=redis_ip_address, port=redis_port)
# The name of the key storing the list of log filenames for this IP
# address.
log_file_list_key = "LOG_FILENAMES:{}".format(node_ip_address)
redis_client.rpush(log_file_list_key, log_file.name)
def create_redis_client(redis_address):
"""Create a Redis client.
Args:
The IP address and port of the Redis server.
Returns:
A Redis client.
"""
redis_ip_address, redis_port = redis_address.split(":")
# For this command to work, some other client (on the same machine
# as Redis) must have run "CONFIG SET protected-mode no".
return redis.StrictRedis(host=redis_ip_address, port=int(redis_port))
def wait_for_redis_to_start(redis_ip_address, redis_port, num_retries=5):
"""Wait for a Redis server to be available.
This is accomplished by creating a Redis client and sending a random
command to the server until the command gets through.
Args:
redis_ip_address (str): The IP address of the redis server.
redis_port (int): The port of the redis server.
num_retries (int): The number of times to try connecting with redis.
The client will sleep for one second between attempts.
Raises:
Exception: An exception is raised if we could not connect with Redis.
"""
redis_client = redis.StrictRedis(host=redis_ip_address, port=redis_port)
# Wait for the Redis server to start.
counter = 0
while counter < num_retries:
try:
# Run some random command and see if it worked.
logger.info(
"Waiting for redis server at {}:{} to respond...".format(
redis_ip_address, redis_port))
redis_client.client_list()
except redis.ConnectionError as e:
# Wait a little bit.
time.sleep(1)
logger.info("Failed to connect to the redis server, retrying.")
counter += 1
else:
break
if counter == num_retries:
raise Exception("Unable to connect to Redis. If the Redis instance is "
"on a different machine, check that your firewall is "
"configured properly.")
def _autodetect_num_gpus():
"""Attempt to detect the number of GPUs on this machine.
TODO(rkn): This currently assumes Nvidia GPUs and Linux.
Returns:
The number of GPUs if any were detected, otherwise 0.
"""
proc_gpus_path = "/proc/driver/nvidia/gpus"
if os.path.isdir(proc_gpus_path):
return len(os.listdir(proc_gpus_path))
return 0
def _compute_version_info():
"""Compute the versions of Python, pyarrow, and Ray.
Returns:
A tuple containing the version information.
"""
ray_version = ray.__version__
python_version = ".".join(map(str, sys.version_info[:3]))
pyarrow_version = pyarrow.__version__
return (ray_version, python_version, pyarrow_version)
def _put_version_info_in_redis(redis_client):
"""Store version information in Redis.
This will be used to detect if workers or drivers are started using
different versions of Python, pyarrow, or Ray.
Args:
redis_client: A client for the primary Redis shard.
"""
redis_client.set("VERSION_INFO", json.dumps(_compute_version_info()))
def check_version_info(redis_client):
"""Check if various version info of this process is correct.
This will be used to detect if workers or drivers are started using
different versions of Python, pyarrow, or Ray. If the version
information is not present in Redis, then no check is done.
Args:
redis_client: A client for the primary Redis shard.
Raises:
Exception: An exception is raised if there is a version mismatch.
"""
redis_reply = redis_client.get("VERSION_INFO")
# Don't do the check if there is no version information in Redis. This
# is to make it easier to do things like start the processes by hand.
if redis_reply is None:
return
true_version_info = tuple(json.loads(ray.utils.decode(redis_reply)))
version_info = _compute_version_info()
if version_info != true_version_info:
node_ip_address = ray.services.get_node_ip_address()
error_message = ("Version mismatch: The cluster was started with:\n"
" Ray: " + true_version_info[0] + "\n"
" Python: " + true_version_info[1] + "\n"
" Pyarrow: " + str(true_version_info[2]) + "\n"
"This process on node " + node_ip_address +
" was started with:" + "\n"
" Ray: " + version_info[0] + "\n"
" Python: " + version_info[1] + "\n"
" Pyarrow: " + str(version_info[2]))
if version_info[:2] != true_version_info[:2]:
raise Exception(error_message)
else:
logger.warning(error_message)
def start_redis(node_ip_address,
port=None,
redis_shard_ports=None,
num_redis_shards=1,
redis_max_clients=None,
use_raylet=False,
redirect_output=False,
redirect_worker_output=False,
cleanup=True,
protected_mode=False,
use_credis=None):
"""Start the Redis global state store.
Args:
node_ip_address: The IP address of the current node. This is only used
for recording the log filenames in Redis.
port (int): If provided, the primary Redis shard will be started on
this port.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards.
num_redis_shards (int): If provided, the number of Redis shards to
start, in addition to the primary one. The default value is one
shard.
redis_max_clients: If this is provided, Ray will attempt to configure
Redis with this maxclients number.
use_raylet: True if the new raylet code path should be used. This is
not supported yet.
redirect_output (bool): True if output should be redirected to a file
and false otherwise.
redirect_worker_output (bool): True if worker output should be
redirected to a file and false otherwise. Workers will have access
to this value when they start up.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then all Redis processes started by this method will be killed by
services.cleanup() when the Python process that imported services
exits.
use_credis: If True, additionally load the chain-replicated libraries
into the redis servers. Defaults to None, which means its value is
set by the presence of "RAY_USE_NEW_GCS" in os.environ.
Returns:
A tuple of the address for the primary Redis shard and a list of
addresses for the remaining shards.
"""
redis_stdout_file, redis_stderr_file = new_log_files(
"redis", redirect_output)
if redis_shard_ports is None:
redis_shard_ports = num_redis_shards * [None]
elif len(redis_shard_ports) != num_redis_shards:
raise Exception("The number of Redis shard ports does not match the "
"number of Redis shards.")
if use_credis is None:
use_credis = ("RAY_USE_NEW_GCS" in os.environ)
if not use_credis:
assigned_port, _ = _start_redis_instance(
node_ip_address=node_ip_address,
port=port,
redis_max_clients=redis_max_clients,
stdout_file=redis_stdout_file,
stderr_file=redis_stderr_file,
cleanup=cleanup,
protected_mode=protected_mode)
else:
assigned_port, _ = _start_redis_instance(
node_ip_address=node_ip_address,
port=port,
redis_max_clients=redis_max_clients,
stdout_file=redis_stdout_file,
stderr_file=redis_stderr_file,
cleanup=cleanup,
protected_mode=protected_mode,
executable=CREDIS_EXECUTABLE,
# It is important to load the credis module BEFORE the ray module,
# as the latter contains an extern declaration that the former
# supplies.
modules=[CREDIS_MASTER_MODULE, REDIS_MODULE])
if port is not None:
assert assigned_port == port
port = assigned_port
redis_address = address(node_ip_address, port)
redis_client = redis.StrictRedis(host=node_ip_address, port=port)
# Store whether we're using the raylet code path or not.
redis_client.set("UseRaylet", 1 if use_raylet else 0)
# Register the number of Redis shards in the primary shard, so that clients
# know how many redis shards to expect under RedisShards.
primary_redis_client = redis.StrictRedis(host=node_ip_address, port=port)
primary_redis_client.set("NumRedisShards", str(num_redis_shards))
# Put the redirect_worker_output bool in the Redis shard so that workers
# can access it and know whether or not to redirect their output.
primary_redis_client.set("RedirectOutput", 1
if redirect_worker_output else 0)
# Store version information in the primary Redis shard.
_put_version_info_in_redis(primary_redis_client)
# Start other Redis shards. Each Redis shard logs to a separate file,
# prefixed by "redis-<shard number>".
redis_shards = []
for i in range(num_redis_shards):
redis_stdout_file, redis_stderr_file = new_log_files(
"redis-{}".format(i), redirect_output)
if not use_credis:
redis_shard_port, _ = _start_redis_instance(
node_ip_address=node_ip_address,
port=redis_shard_ports[i],
redis_max_clients=redis_max_clients,
stdout_file=redis_stdout_file,
stderr_file=redis_stderr_file,
cleanup=cleanup,
protected_mode=protected_mode)
else:
assert num_redis_shards == 1, \
"For now, RAY_USE_NEW_GCS supports 1 shard, and credis "\
"supports 1-node chain for that shard only."
redis_shard_port, _ = _start_redis_instance(
node_ip_address=node_ip_address,
port=redis_shard_ports[i],
redis_max_clients=redis_max_clients,
stdout_file=redis_stdout_file,
stderr_file=redis_stderr_file,
cleanup=cleanup,
protected_mode=protected_mode,
executable=CREDIS_EXECUTABLE,
# It is important to load the credis module BEFORE the ray
# module, as the latter contains an extern declaration that the
# former supplies.
modules=[CREDIS_MEMBER_MODULE, REDIS_MODULE])
if redis_shard_ports[i] is not None:
assert redis_shard_port == redis_shard_ports[i]
shard_address = address(node_ip_address, redis_shard_port)
redis_shards.append(shard_address)
# Store redis shard information in the primary redis shard.
primary_redis_client.rpush("RedisShards", shard_address)
if use_credis:
shard_client = redis.StrictRedis(
host=node_ip_address, port=redis_shard_port)
# Configure the chain state.
primary_redis_client.execute_command("MASTER.ADD", node_ip_address,
redis_shard_port)
shard_client.execute_command("MEMBER.CONNECT_TO_MASTER",
node_ip_address, port)
return redis_address, redis_shards
def _make_temp_redis_config(node_ip_address):
"""Create a configuration file for Redis.
Args:
node_ip_address: The IP address of this node. This should not be
127.0.0.1.
"""
redis_config_name = "/tmp/redis_conf{}".format(random_name())
with open(redis_config_name, 'w') as f:
# This allows redis clients on the same machine to connect using the
# node's IP address as opposed to just 127.0.0.1. This is only relevant
# when the server is in protected mode.
f.write("bind 127.0.0.1 {}".format(node_ip_address))
return redis_config_name
def _start_redis_instance(node_ip_address="127.0.0.1",
port=None,
redis_max_clients=None,
num_retries=20,
stdout_file=None,
stderr_file=None,
cleanup=True,
protected_mode=False,
executable=REDIS_EXECUTABLE,
modules=None):
"""Start a single Redis server.
Args:
node_ip_address (str): The IP address of the current node. This is only
used for recording the log filenames in Redis.
port (int): If provided, start a Redis server with this port.
redis_max_clients: If this is provided, Ray will attempt to configure
Redis with this maxclients number.
num_retries (int): The number of times to attempt to start Redis. If a
port is provided, this defaults to 1.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then this process will be killed by serices.cleanup() when the
Python process that imported services exits.
protected_mode: True if we should start the Redis server in protected
mode. This will prevent clients on other machines from connecting
and is only used when the Redis servers are started via ray.init()
as opposed to ray start.
executable (str): Full path tho the redis-server executable.
modules (list of str): A list of pathnames, pointing to the redis
module(s) that will be loaded in this redis server. If None, load
the default Ray redis module.
Returns:
A tuple of the port used by Redis and a handle to the process that was
started. If a port is passed in, then the returned port value is
the same.
Raises:
Exception: An exception is raised if Redis could not be started.
"""
assert os.path.isfile(executable)
if modules is None:
modules = [REDIS_MODULE]
for module in modules:
assert os.path.isfile(module)
counter = 0
if port is not None:
# If a port is specified, then try only once to connect.
num_retries = 1
else:
port = new_port()
if protected_mode:
redis_config_filename = _make_temp_redis_config(node_ip_address)
load_module_args = []
for module in modules:
load_module_args += ["--loadmodule", module]
while counter < num_retries:
if counter > 0:
logger.warning("Redis failed to start, retrying now.")
# Construct the command to start the Redis server.
command = [executable]
if protected_mode:
command += [redis_config_filename]
command += (
["--port", str(port), "--loglevel", "warning"] + load_module_args)
p = subprocess.Popen(command, stdout=stdout_file, stderr=stderr_file)
time.sleep(0.1)
# Check if Redis successfully started (or at least if it the executable
# did not exit within 0.1 seconds).
if p.poll() is None:
if cleanup:
all_processes[PROCESS_TYPE_REDIS_SERVER].append(p)
break
port = new_port()
counter += 1
if counter == num_retries:
raise Exception("Couldn't start Redis. Check log files: {} {}".format(
stdout_file.name, stderr_file.name))
# Create a Redis client just for configuring Redis.
redis_client = redis.StrictRedis(host="127.0.0.1", port=port)
# Wait for the Redis server to start.
wait_for_redis_to_start("127.0.0.1", port)
# Configure Redis to generate keyspace notifications. TODO(rkn): Change
# this to only generate notifications for the export keys.
redis_client.config_set("notify-keyspace-events", "Kl")
# Configure Redis to not run in protected mode so that processes on other
# hosts can connect to it. TODO(rkn): Do this in a more secure way.
if not protected_mode:
redis_client.config_set("protected-mode", "no")
# If redis_max_clients is provided, attempt to raise the number of maximum
# number of Redis clients.
if redis_max_clients is not None:
redis_client.config_set("maxclients", str(redis_max_clients))
else:
# If redis_max_clients is not provided, determine the current ulimit.
# We will use this to attempt to raise the maximum number of Redis
# clients.
current_max_clients = int(
redis_client.config_get("maxclients")["maxclients"])
# The below command should be the same as doing ulimit -n.
ulimit_n = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# The quantity redis_client_buffer appears to be the required buffer
# between the maximum number of redis clients and ulimit -n. That is,
# if ulimit -n returns 10000, then we can set maxclients to
# 10000 - redis_client_buffer.
redis_client_buffer = 32
if current_max_clients < ulimit_n - redis_client_buffer:
redis_client.config_set("maxclients",
ulimit_n - redis_client_buffer)
# Increase the hard and soft limits for the redis client pubsub buffer to
# 128MB. This is a hack to make it less likely for pubsub messages to be
# dropped and for pubsub connections to therefore be killed.
cur_config = (redis_client.config_get("client-output-buffer-limit")[
"client-output-buffer-limit"])
cur_config_list = cur_config.split()
assert len(cur_config_list) == 12
cur_config_list[8:] = ["pubsub", "134217728", "134217728", "60"]
redis_client.config_set("client-output-buffer-limit",
" ".join(cur_config_list))
# Put a time stamp in Redis to indicate when it was started.
redis_client.set("redis_start_time", time.time())
# Record the log files in Redis.
record_log_files_in_redis(
address(node_ip_address, port), node_ip_address,
[stdout_file, stderr_file])
return port, p
def start_log_monitor(redis_address,
node_ip_address,
stdout_file=None,
stderr_file=None,
cleanup=cleanup):
"""Start a log monitor process.
Args:
redis_address (str): The address of the Redis instance.
node_ip_address (str): The IP address of the node that this log monitor
is running on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then this process will be killed by services.cleanup() when the
Python process that imported services exits.
"""
log_monitor_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "log_monitor.py")
p = subprocess.Popen(
[
sys.executable, "-u", log_monitor_filepath, "--redis-address",
redis_address, "--node-ip-address", node_ip_address
],
stdout=stdout_file,
stderr=stderr_file)
if cleanup:
all_processes[PROCESS_TYPE_LOG_MONITOR].append(p)
record_log_files_in_redis(redis_address, node_ip_address,
[stdout_file, stderr_file])
def start_global_scheduler(redis_address,
node_ip_address,
stdout_file=None,
stderr_file=None,
cleanup=True):
"""Start a global scheduler process.
Args:
redis_address (str): The address of the Redis instance.
node_ip_address: The IP address of the node that this scheduler will
run on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then this process will be killed by services.cleanup() when the
Python process that imported services exits.
"""
p = global_scheduler.start_global_scheduler(
redis_address,
node_ip_address,
stdout_file=stdout_file,
stderr_file=stderr_file)
if cleanup:
all_processes[PROCESS_TYPE_GLOBAL_SCHEDULER].append(p)
record_log_files_in_redis(redis_address, node_ip_address,
[stdout_file, stderr_file])
def start_ui(redis_address, stdout_file=None, stderr_file=None, cleanup=True):
"""Start a UI process.
Args:
redis_address: The address of the primary Redis shard.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then this process will be killed by services.cleanup() when the
Python process that imported services exits.
"""
new_env = os.environ.copy()
notebook_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "WebUI.ipynb")
# We copy the notebook file so that the original doesn't get modified by
# the user.
random_ui_id = random.randint(0, 100000)
new_notebook_filepath = "/tmp/raylogs/ray_ui{}.ipynb".format(random_ui_id)
new_notebook_directory = os.path.dirname(new_notebook_filepath)
shutil.copy(notebook_filepath, new_notebook_filepath)
port = 8888
while True:
try:
port_test_socket = socket.socket()
port_test_socket.bind(("127.0.0.1", port))
port_test_socket.close()
break
except socket.error:
port += 1
new_env = os.environ.copy()
new_env["REDIS_ADDRESS"] = redis_address
# We generate the token used for authentication ourselves to avoid
# querying the jupyter server.
token = ray.utils.decode(binascii.hexlify(os.urandom(24)))
# The --ip=0.0.0.0 flag is intended to enable connecting to a notebook
# running within a docker container (from the outside).
command = [
"jupyter", "notebook", "--no-browser", "--port={}".format(port),
"--ip=0.0.0.0", "--NotebookApp.iopub_data_rate_limit=10000000000",
"--NotebookApp.open_browser=False",
"--NotebookApp.token={}".format(token)
]
# If the user is root, add the --allow-root flag.
if os.geteuid() == 0:
command.append("--allow-root")
try:
ui_process = subprocess.Popen(
command,
env=new_env,
cwd=new_notebook_directory,
stdout=stdout_file,
stderr=stderr_file)
except Exception:
logger.warning("Failed to start the UI, you may need to run "
"'pip install jupyter'.")
else:
if cleanup:
all_processes[PROCESS_TYPE_WEB_UI].append(ui_process)
webui_url = ("http://localhost:{}/notebooks/ray_ui{}.ipynb?token={}"
.format(port, random_ui_id, token))
logger.info("\n" + "=" * 70)
logger.info("View the web UI at {}".format(webui_url))
logger.info("=" * 70 + "\n")
return webui_url
def check_and_update_resources(resources, use_raylet):
"""Sanity check a resource dictionary and add sensible defaults.
Args:
resources: A dictionary mapping resource names to resource quantities.
use_raylet: True if we are using the raylet code path and false
otherwise.
Returns:
A new resource dictionary.
"""
if resources is None:
resources = {}
resources = resources.copy()
if "CPU" not in resources:
# By default, use the number of hardware execution threads for the
# number of cores.
resources["CPU"] = multiprocessing.cpu_count()
# See if CUDA_VISIBLE_DEVICES has already been set.
gpu_ids = ray.utils.get_cuda_visible_devices()
# Check that the number of GPUs that the local scheduler wants doesn't
# excede the amount allowed by CUDA_VISIBLE_DEVICES.
if ("GPU" in resources and gpu_ids is not None
and resources["GPU"] > len(gpu_ids)):
raise Exception("Attempting to start local scheduler with {} GPUs, "
"but CUDA_VISIBLE_DEVICES contains {}.".format(
resources["GPU"], gpu_ids))
if "GPU" not in resources:
# Try to automatically detect the number of GPUs.
resources["GPU"] = _autodetect_num_gpus()
# Don't use more GPUs than allowed by CUDA_VISIBLE_DEVICES.
if gpu_ids is not None:
resources["GPU"] = min(resources["GPU"], len(gpu_ids))
# Check types.
for _, resource_quantity in resources.items():
assert (isinstance(resource_quantity, int)
or isinstance(resource_quantity, float))
if (isinstance(resource_quantity, float)
and not resource_quantity.is_integer()):
raise ValueError("Resource quantities must all be whole numbers.")
if (use_raylet and
resource_quantity > ray.ray_constants.MAX_RESOURCE_QUANTITY):
raise ValueError("Resource quantities must be at most {}.".format(
ray.ray_constants.MAX_RESOURCE_QUANTITY))
return resources
def start_local_scheduler(redis_address,
node_ip_address,
plasma_store_name,
plasma_manager_name,
worker_path,
plasma_address=None,
stdout_file=None,
stderr_file=None,
cleanup=True,
resources=None,
num_workers=0):
"""Start a local scheduler process.
Args:
redis_address (str): The address of the Redis instance.
node_ip_address (str): The IP address of the node that this local
scheduler is running on.
plasma_store_name (str): The name of the plasma store socket to connect
to.
plasma_manager_name (str): The name of the plasma manager socket to
connect to.
worker_path (str): The path of the script to use when the local
scheduler starts up new workers.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then this process will be killed by serices.cleanup() when the
Python process that imported services exits.
resources: A dictionary mapping the name of a resource to the available
quantity of that resource.
num_workers (int): The number of workers that the local scheduler
should start.
Return:
The name of the local scheduler socket.
"""
resources = check_and_update_resources(resources, False)
logger.info("Starting local scheduler with the following resources: {}."
.format(resources))
local_scheduler_name, p = ray.local_scheduler.start_local_scheduler(
plasma_store_name,
plasma_manager_name,
worker_path=worker_path,
node_ip_address=node_ip_address,
redis_address=redis_address,
plasma_address=plasma_address,
use_profiler=RUN_LOCAL_SCHEDULER_PROFILER,
stdout_file=stdout_file,
stderr_file=stderr_file,
static_resources=resources,
num_workers=num_workers)
if cleanup:
all_processes[PROCESS_TYPE_LOCAL_SCHEDULER].append(p)
record_log_files_in_redis(redis_address, node_ip_address,
[stdout_file, stderr_file])
return local_scheduler_name
def start_raylet(redis_address,
node_ip_address,
plasma_store_name,
worker_path,
resources=None,
num_workers=0,
use_valgrind=False,
use_profiler=False,
stdout_file=None,
stderr_file=None,
cleanup=True):
"""Start a raylet, which is a combined local scheduler and object manager.
Args:
redis_address (str): The address of the Redis instance.
node_ip_address (str): The IP address of the node that this local
scheduler is running on.
plasma_store_name (str): The name of the plasma store socket to connect
to.
worker_path (str): The path of the script to use when the local
scheduler starts up new workers.
use_valgrind (bool): True if the raylet should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the raylet should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then this process will be killed by serices.cleanup() when the
Python process that imported services exits.
Returns:
The raylet socket name.
"""
if use_valgrind and use_profiler:
raise Exception("Cannot use valgrind and profiler at the same time.")
static_resources = check_and_update_resources(resources, True)
# Limit the number of workers that can be started in parallel by the
# raylet. However, make sure it is at least 1.
maximum_startup_concurrency = max(
1, min(multiprocessing.cpu_count(), static_resources["CPU"]))
# Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'.
resource_argument = ",".join([
"{},{}".format(resource_name, resource_value)
for resource_name, resource_value in zip(static_resources.keys(),
static_resources.values())
])
gcs_ip_address, gcs_port = redis_address.split(":")
raylet_name = "/tmp/raylet{}".format(random_name())
# Create the command that the Raylet will use to start workers.
start_worker_command = ("{} {} "
"--node-ip-address={} "
"--object-store-name={} "
"--raylet-name={} "
"--redis-address={}".format(
sys.executable, worker_path, node_ip_address,
plasma_store_name, raylet_name, redis_address))
command = [
RAYLET_EXECUTABLE,
raylet_name,
plasma_store_name,
node_ip_address,
gcs_ip_address,
gcs_port,
str(num_workers),
str(maximum_startup_concurrency),
resource_argument,
start_worker_command,
"", # Worker command for Java, not needed for Python.
]
if use_valgrind:
pid = subprocess.Popen(
[
"valgrind", "--track-origins=yes", "--leak-check=full",
"--show-leak-kinds=all", "--leak-check-heuristics=stdstring",
"--error-exitcode=1"
] + command,
stdout=stdout_file,
stderr=stderr_file)
elif use_profiler:
pid = subprocess.Popen(
["valgrind", "--tool=callgrind"] + command,
stdout=stdout_file,
stderr=stderr_file)
else:
pid = subprocess.Popen(command, stdout=stdout_file, stderr=stderr_file)
if cleanup:
all_processes[PROCESS_TYPE_RAYLET].append(pid)
record_log_files_in_redis(redis_address, node_ip_address,
[stdout_file, stderr_file])
return raylet_name
def start_plasma_store(node_ip_address,
redis_address,
object_manager_port=None,
store_stdout_file=None,
store_stderr_file=None,
manager_stdout_file=None,
manager_stderr_file=None,
objstore_memory=None,
cleanup=True,
plasma_directory=None,
huge_pages=False,
use_raylet=False):
"""This method starts an object store process.
Args:
node_ip_address (str): The IP address of the node running the object
store.
redis_address (str): The address of the Redis instance to connect to.
object_manager_port (int): The port to use for the object manager. If
this is not provided, one will be generated randomly.
store_stdout_file: A file handle opened for writing to redirect stdout
to. If no redirection should happen, then this should be None.
store_stderr_file: A file handle opened for writing to redirect stderr
to. If no redirection should happen, then this should be None.
manager_stdout_file: A file handle opened for writing to redirect
stdout to. If no redirection should happen, then this should be
None.
manager_stderr_file: A file handle opened for writing to redirect
stderr to. If no redirection should happen, then this should be
None.
objstore_memory: The amount of memory (in bytes) to start the object
store with.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then this process will be killed by serices.cleanup() when the
Python process that imported services exits.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
use_raylet: True if the new raylet code path should be used. This is
not supported yet.
Return:
A tuple of the Plasma store socket name, the Plasma manager socket
name, and the plasma manager port.
"""
if objstore_memory is None:
# Compute a fraction of the system memory for the Plasma store to use.
system_memory = ray.utils.get_system_memory()
if sys.platform == "linux" or sys.platform == "linux2":
# On linux we use /dev/shm, its size is half the size of the
# physical memory. To not overflow it, we set the plasma memory
# limit to 0.4 times the size of the physical memory.
objstore_memory = int(system_memory * 0.4)
# Compare the requested memory size to the memory available in
# /dev/shm.
shm_fd = os.open("/dev/shm", os.O_RDONLY)
try:
shm_fs_stats = os.fstatvfs(shm_fd)
# The value shm_fs_stats.f_bsize is the block size and the
# value shm_fs_stats.f_bavail is the number of available
# blocks.
shm_avail = shm_fs_stats.f_bsize * shm_fs_stats.f_bavail
if objstore_memory > shm_avail:
logger.warning(
"Warning: Reducing object store memory because "
"/dev/shm has only {} bytes available. You may be "
"able to free up space by deleting files in "
"/dev/shm. If you are inside a Docker container, "
"you may need to pass an argument with the flag "
"'--shm-size' to 'docker run'.".format(shm_avail))
objstore_memory = int(shm_avail * 0.8)
finally:
os.close(shm_fd)
else:
objstore_memory = int(system_memory * 0.8)
# Start the Plasma store.
logger.info("Starting the Plasma object store with {0:.2f} GB memory."
.format(objstore_memory // 10**9))
plasma_store_name, p1 = ray.plasma.start_plasma_store(
plasma_store_memory=objstore_memory,
use_profiler=RUN_PLASMA_STORE_PROFILER,
stdout_file=store_stdout_file,
stderr_file=store_stderr_file,
plasma_directory=plasma_directory,
huge_pages=huge_pages)
# Start the plasma manager.
if not use_raylet:
if object_manager_port is not None:
(plasma_manager_name, p2,
plasma_manager_port) = ray.plasma.start_plasma_manager(
plasma_store_name,
redis_address,
plasma_manager_port=object_manager_port,
node_ip_address=node_ip_address,
num_retries=1,
run_profiler=RUN_PLASMA_MANAGER_PROFILER,
stdout_file=manager_stdout_file,
stderr_file=manager_stderr_file)
assert plasma_manager_port == object_manager_port
else:
(plasma_manager_name, p2,
plasma_manager_port) = ray.plasma.start_plasma_manager(
plasma_store_name,
redis_address,
node_ip_address=node_ip_address,
run_profiler=RUN_PLASMA_MANAGER_PROFILER,
stdout_file=manager_stdout_file,
stderr_file=manager_stderr_file)
else:
plasma_manager_port = None
plasma_manager_name = None
if cleanup:
all_processes[PROCESS_TYPE_PLASMA_STORE].append(p1)
record_log_files_in_redis(redis_address, node_ip_address,
[store_stdout_file, store_stderr_file])
if not use_raylet:
if cleanup:
all_processes[PROCESS_TYPE_PLASMA_MANAGER].append(p2)
record_log_files_in_redis(redis_address, node_ip_address,
[manager_stdout_file, manager_stderr_file])
return ObjectStoreAddress(plasma_store_name, plasma_manager_name,
plasma_manager_port)
def start_worker(node_ip_address,
object_store_name,
object_store_manager_name,
local_scheduler_name,
redis_address,
worker_path,
stdout_file=None,
stderr_file=None,
cleanup=True):
"""This method starts a worker process.
Args:
node_ip_address (str): The IP address of the node that this worker is
running on.
object_store_name (str): The name of the object store.
object_store_manager_name (str): The name of the object store manager.
local_scheduler_name (str): The name of the local scheduler.
redis_address (str): The address that the Redis server is listening on.
worker_path (str): The path of the source code which the worker process
will run.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then this process will be killed by services.cleanup() when the
Python process that imported services exits. This is True by
default.
"""
command = [
sys.executable, "-u", worker_path,
"--node-ip-address=" + node_ip_address,
"--object-store-name=" + object_store_name,
"--object-store-manager-name=" + object_store_manager_name,
"--local-scheduler-name=" + local_scheduler_name,
"--redis-address=" + str(redis_address)
]
p = subprocess.Popen(command, stdout=stdout_file, stderr=stderr_file)
if cleanup:
all_processes[PROCESS_TYPE_WORKER].append(p)
record_log_files_in_redis(redis_address, node_ip_address,
[stdout_file, stderr_file])
def start_monitor(redis_address,
node_ip_address,
stdout_file=None,
stderr_file=None,
cleanup=True,
autoscaling_config=None):
"""Run a process to monitor the other processes.
Args:
redis_address (str): The address that the Redis server is listening on.
node_ip_address: The IP address of the node that this process will run
on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then this process will be killed by services.cleanup() when the
Python process that imported services exits. This is True by
default.
autoscaling_config: path to autoscaling config file.
"""
monitor_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "monitor.py")
command = [
sys.executable, "-u", monitor_path,
"--redis-address=" + str(redis_address)
]
if autoscaling_config:
command.append("--autoscaling-config=" + str(autoscaling_config))
p = subprocess.Popen(command, stdout=stdout_file, stderr=stderr_file)
if cleanup:
all_processes[PROCESS_TYPE_MONITOR].append(p)
record_log_files_in_redis(redis_address, node_ip_address,
[stdout_file, stderr_file])
def start_raylet_monitor(redis_address,
stdout_file=None,
stderr_file=None,
cleanup=True):
"""Run a process to monitor the other processes.
Args:
redis_address (str): The address that the Redis server is listening on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then this process will be killed by services.cleanup() when the
Python process that imported services exits. This is True by
default.
"""
gcs_ip_address, gcs_port = redis_address.split(":")
command = [RAYLET_MONITOR_EXECUTABLE, gcs_ip_address, gcs_port]
p = subprocess.Popen(command, stdout=stdout_file, stderr=stderr_file)
if cleanup:
all_processes[PROCESS_TYPE_MONITOR].append(p)
def start_ray_processes(address_info=None,
node_ip_address="127.0.0.1",
redis_port=None,
redis_shard_ports=None,
num_workers=None,
num_local_schedulers=1,
object_store_memory=None,
num_redis_shards=1,
redis_max_clients=None,
redis_protected_mode=False,
worker_path=None,
cleanup=True,
redirect_worker_output=False,
redirect_output=False,
include_global_scheduler=False,
include_log_monitor=False,
include_webui=False,
start_workers_from_local_scheduler=True,
resources=None,
plasma_directory=None,
huge_pages=False,
autoscaling_config=None,
use_raylet=False):
"""Helper method to start Ray processes.
Args:
address_info (dict): A dictionary with address information for
processes that have already been started. If provided, address_info
will be modified to include processes that are newly started.
node_ip_address (str): The IP address of this node.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then a random port will be chosen. If the key
"redis_address" is in address_info, then this argument will be
ignored.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards.
num_workers (int): The number of workers to start.
num_local_schedulers (int): The total number of local schedulers
required. This is also the total number of object stores required.
This method will start new instances of local schedulers and object
stores until there are num_local_schedulers existing instances of
each, including ones already registered with the given
address_info.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_protected_mode: True if we should start Redis in protected mode.
This will prevent clients from other machines from connecting and
is only done when Redis is started via ray.init().
worker_path (str): The path of the source code that will be run by the
worker.
cleanup (bool): If cleanup is true, then the processes started here
will be killed by services.cleanup() when the Python process that
called this method exits.
redirect_worker_output: True if the stdout and stderr of worker
processes should be redirected to files.
redirect_output (bool): True if stdout and stderr for non-worker
processes should be redirected to files and false otherwise.
include_global_scheduler (bool): If include_global_scheduler is True,
then start a global scheduler process.
include_log_monitor (bool): If True, then start a log monitor to
monitor the log files for all processes on this node and push their
contents to Redis.
include_webui (bool): If True, then attempt to start the web UI. Note
that this is only possible with Python 3.
start_workers_from_local_scheduler (bool): If this flag is True, then
start the initial workers from the local scheduler. Else, start
them from Python.
resources: A dictionary mapping resource name to the quantity of that
resource.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
autoscaling_config: path to autoscaling config file.
use_raylet: True if the new raylet code path should be used. This is
not supported yet.
Returns:
A dictionary of the address information for the processes that were
started.
"""
logger.info(
"Process STDOUT and STDERR is being redirected to /tmp/raylogs/.")
if resources is None:
resources = {}
if not isinstance(resources, list):
resources = num_local_schedulers * [resources]
if num_workers is not None:
workers_per_local_scheduler = num_local_schedulers * [num_workers]
else:
workers_per_local_scheduler = []
for resource_dict in resources:
cpus = resource_dict.get("CPU")
workers_per_local_scheduler.append(cpus if cpus is not None else
multiprocessing.cpu_count())
if address_info is None:
address_info = {}
address_info["node_ip_address"] = node_ip_address
if worker_path is None:
worker_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"workers/default_worker.py")
# Start Redis if there isn't already an instance running. TODO(rkn): We are
# suppressing the output of Redis because on Linux it prints a bunch of
# warning messages when it starts up. Instead of suppressing the output, we
# should address the warnings.
redis_address = address_info.get("redis_address")
redis_shards = address_info.get("redis_shards", [])
if redis_address is None:
redis_address, redis_shards = start_redis(
node_ip_address,
port=redis_port,
redis_shard_ports=redis_shard_ports,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
use_raylet=use_raylet,
redirect_output=True,
redirect_worker_output=redirect_worker_output,
cleanup=cleanup,
protected_mode=redis_protected_mode)
address_info["redis_address"] = redis_address
time.sleep(0.1)
# Start monitoring the processes.
monitor_stdout_file, monitor_stderr_file = new_log_files(
"monitor", redirect_output)
start_monitor(
redis_address,
node_ip_address,
stdout_file=monitor_stdout_file,
stderr_file=monitor_stderr_file,
cleanup=cleanup,
autoscaling_config=autoscaling_config)
if use_raylet:
start_raylet_monitor(
redis_address,
stdout_file=monitor_stdout_file,
stderr_file=monitor_stderr_file,
cleanup=cleanup)
if redis_shards == []:
# Get redis shards from primary redis instance.
redis_ip_address, redis_port = redis_address.split(":")
redis_client = redis.StrictRedis(
host=redis_ip_address, port=redis_port)
redis_shards = redis_client.lrange("RedisShards", start=0, end=-1)
redis_shards = [ray.utils.decode(shard) for shard in redis_shards]
address_info["redis_shards"] = redis_shards
# Start the log monitor, if necessary.
if include_log_monitor:
log_monitor_stdout_file, log_monitor_stderr_file = new_log_files(
"log_monitor", redirect_output=True)
start_log_monitor(
redis_address,
node_ip_address,
stdout_file=log_monitor_stdout_file,
stderr_file=log_monitor_stderr_file,
cleanup=cleanup)
# Start the global scheduler, if necessary.
if include_global_scheduler and not use_raylet:
global_scheduler_stdout_file, global_scheduler_stderr_file = (
new_log_files("global_scheduler", redirect_output))
start_global_scheduler(
redis_address,
node_ip_address,
stdout_file=global_scheduler_stdout_file,
stderr_file=global_scheduler_stderr_file,
cleanup=cleanup)
# Initialize with existing services.
if "object_store_addresses" not in address_info:
address_info["object_store_addresses"] = []
object_store_addresses = address_info["object_store_addresses"]
if "local_scheduler_socket_names" not in address_info:
address_info["local_scheduler_socket_names"] = []
local_scheduler_socket_names = address_info["local_scheduler_socket_names"]
if "raylet_socket_names" not in address_info:
address_info["raylet_socket_names"] = []
raylet_socket_names = address_info["raylet_socket_names"]
# Get the ports to use for the object managers if any are provided.
object_manager_ports = (address_info["object_manager_ports"] if
"object_manager_ports" in address_info else None)
if not isinstance(object_manager_ports, list):
object_manager_ports = num_local_schedulers * [object_manager_ports]
assert len(object_manager_ports) == num_local_schedulers
# Start any object stores that do not yet exist.
for i in range(num_local_schedulers - len(object_store_addresses)):
# Start Plasma.
plasma_store_stdout_file, plasma_store_stderr_file = new_log_files(
"plasma_store_{}".format(i), redirect_output)
plasma_manager_stdout_file, plasma_manager_stderr_file = new_log_files(
"plasma_manager_{}".format(i), redirect_output)
object_store_address = start_plasma_store(
node_ip_address,
redis_address,
object_manager_port=object_manager_ports[i],
store_stdout_file=plasma_store_stdout_file,
store_stderr_file=plasma_store_stderr_file,
manager_stdout_file=plasma_manager_stdout_file,
manager_stderr_file=plasma_manager_stderr_file,
objstore_memory=object_store_memory,
cleanup=cleanup,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
use_raylet=use_raylet)
object_store_addresses.append(object_store_address)
time.sleep(0.1)
if not use_raylet:
# Start any local schedulers that do not yet exist.
for i in range(
len(local_scheduler_socket_names), num_local_schedulers):
# Connect the local scheduler to the object store at the same
# index.
object_store_address = object_store_addresses[i]
plasma_address = "{}:{}".format(node_ip_address,
object_store_address.manager_port)
# Determine how many workers this local scheduler should start.
if start_workers_from_local_scheduler:
num_local_scheduler_workers = workers_per_local_scheduler[i]
workers_per_local_scheduler[i] = 0
else:
# If we're starting the workers from Python, the local
# scheduler should not start any workers.
num_local_scheduler_workers = 0
# Start the local scheduler. Note that if we do not wish to
# redirect the worker output, then we cannot redirect the local
# scheduler output.
local_scheduler_stdout_file, local_scheduler_stderr_file = (
new_log_files(
"local_scheduler_{}".format(i),
redirect_output=redirect_worker_output))
local_scheduler_name = start_local_scheduler(
redis_address,
node_ip_address,
object_store_address.name,
object_store_address.manager_name,
worker_path,
plasma_address=plasma_address,
stdout_file=local_scheduler_stdout_file,
stderr_file=local_scheduler_stderr_file,
cleanup=cleanup,
resources=resources[i],
num_workers=num_local_scheduler_workers)
local_scheduler_socket_names.append(local_scheduler_name)
# Make sure that we have exactly num_local_schedulers instances of
# object stores and local schedulers.
assert len(object_store_addresses) == num_local_schedulers
assert len(local_scheduler_socket_names) == num_local_schedulers
else:
# Start any raylets that do not exist yet.
for i in range(len(raylet_socket_names), num_local_schedulers):
raylet_stdout_file, raylet_stderr_file = new_log_files(
"raylet_{}".format(i), redirect_output=redirect_worker_output)
address_info["raylet_socket_names"].append(
start_raylet(
redis_address,
node_ip_address,
object_store_addresses[i].name,
worker_path,
resources=resources[i],
num_workers=workers_per_local_scheduler[i],
stdout_file=raylet_stdout_file,
stderr_file=raylet_stderr_file,
cleanup=cleanup))
if not use_raylet:
# Start any workers that the local scheduler has not already started.
for i, num_local_scheduler_workers in enumerate(
workers_per_local_scheduler):
object_store_address = object_store_addresses[i]
local_scheduler_name = local_scheduler_socket_names[i]
for j in range(num_local_scheduler_workers):
worker_stdout_file, worker_stderr_file = new_log_files(
"worker_{}_{}".format(i, j), redirect_output)
start_worker(
node_ip_address,
object_store_address.name,
object_store_address.manager_name,
local_scheduler_name,
redis_address,
worker_path,
stdout_file=worker_stdout_file,
stderr_file=worker_stderr_file,
cleanup=cleanup)
workers_per_local_scheduler[i] -= 1
# Make sure that we've started all the workers.
assert (sum(workers_per_local_scheduler) == 0)
# Try to start the web UI.
if include_webui:
ui_stdout_file, ui_stderr_file = new_log_files(
"webui", redirect_output=True)
address_info["webui_url"] = start_ui(
redis_address,
stdout_file=ui_stdout_file,
stderr_file=ui_stderr_file,
cleanup=cleanup)
else:
address_info["webui_url"] = ""
# Return the addresses of the relevant processes.
return address_info
def start_ray_node(node_ip_address,
redis_address,
object_manager_ports=None,
num_workers=0,
num_local_schedulers=1,
object_store_memory=None,
worker_path=None,
cleanup=True,
redirect_worker_output=False,
redirect_output=False,
resources=None,
plasma_directory=None,
huge_pages=False,
use_raylet=False):
"""Start the Ray processes for a single node.
This assumes that the Ray processes on some master node have already been
started.
Args:
node_ip_address (str): The IP address of this node.
redis_address (str): The address of the Redis server.
object_manager_ports (list): A list of the ports to use for the object
managers. There should be one per object manager being started on
this node (typically just one).
num_workers (int): The number of workers to start.
num_local_schedulers (int): The number of local schedulers to start.
This is also the number of plasma stores and plasma managers to
start.
object_store_memory (int): The maximum amount of memory (in bytes) to
let the plasma store use.
worker_path (str): The path of the source code that will be run by the
worker.
cleanup (bool): If cleanup is true, then the processes started here
will be killed by services.cleanup() when the Python process that
called this method exits.
redirect_worker_output: True if the stdout and stderr of worker
processes should be redirected to files.
redirect_output (bool): True if stdout and stderr for non-worker
processes should be redirected to files and false otherwise.
resources: A dictionary mapping resource name to the available quantity
of that resource.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
use_raylet: True if the new raylet code path should be used. This is
not supported yet.
Returns:
A dictionary of the address information for the processes that were
started.
"""
address_info = {
"redis_address": redis_address,
"object_manager_ports": object_manager_ports
}
return start_ray_processes(
address_info=address_info,
node_ip_address=node_ip_address,
num_workers=num_workers,
num_local_schedulers=num_local_schedulers,
object_store_memory=object_store_memory,
worker_path=worker_path,
include_log_monitor=True,
cleanup=cleanup,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
resources=resources,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
use_raylet=use_raylet)
def start_ray_head(address_info=None,
node_ip_address="127.0.0.1",
redis_port=None,
redis_shard_ports=None,
num_workers=0,
num_local_schedulers=1,
object_store_memory=None,
worker_path=None,
cleanup=True,
redirect_worker_output=False,
redirect_output=False,
start_workers_from_local_scheduler=True,
resources=None,
num_redis_shards=None,
redis_max_clients=None,
redis_protected_mode=False,
include_webui=True,
plasma_directory=None,
huge_pages=False,
autoscaling_config=None,
use_raylet=False):
"""Start Ray in local mode.
Args:
address_info (dict): A dictionary with address information for
processes that have already been started. If provided, address_info
will be modified to include processes that are newly started.
node_ip_address (str): The IP address of this node.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then a random port will be chosen. If the key
"redis_address" is in address_info, then this argument will be
ignored.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards.
num_workers (int): The number of workers to start.
num_local_schedulers (int): The total number of local schedulers
required. This is also the total number of object stores required.
This method will start new instances of local schedulers and object
stores until there are at least num_local_schedulers existing
instances of each, including ones already registered with the given
address_info.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
worker_path (str): The path of the source code that will be run by the
worker.
cleanup (bool): If cleanup is true, then the processes started here
will be killed by services.cleanup() when the Python process that
called this method exits.
redirect_worker_output: True if the stdout and stderr of worker
processes should be redirected to files.
redirect_output (bool): True if stdout and stderr for non-worker
processes should be redirected to files and false otherwise.
start_workers_from_local_scheduler (bool): If this flag is True, then
start the initial workers from the local scheduler. Else, start
them from Python.
resources: A dictionary mapping resource name to the available quantity
of that resource.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_protected_mode: True if we should start Redis in protected mode.
This will prevent clients from other machines from connecting and
is only done when Redis is started via ray.init().
include_webui: True if the UI should be started and false otherwise.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
autoscaling_config: path to autoscaling config file.
use_raylet: True if the new raylet code path should be used. This is
not supported yet.
Returns:
A dictionary of the address information for the processes that were
started.
"""
num_redis_shards = 1 if num_redis_shards is None else num_redis_shards
return start_ray_processes(
address_info=address_info,
node_ip_address=node_ip_address,
redis_port=redis_port,
redis_shard_ports=redis_shard_ports,
num_workers=num_workers,
num_local_schedulers=num_local_schedulers,
object_store_memory=object_store_memory,
worker_path=worker_path,
cleanup=cleanup,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
include_global_scheduler=True,
include_log_monitor=True,
include_webui=include_webui,
start_workers_from_local_scheduler=start_workers_from_local_scheduler,
resources=resources,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
redis_protected_mode=redis_protected_mode,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
autoscaling_config=autoscaling_config,
use_raylet=use_raylet)
def try_to_create_directory(directory_path):
"""Attempt to create a directory that is globally readable/writable.
Args:
directory_path: The path of the directory to create.
"""
if not os.path.exists(directory_path):
try:
os.makedirs(directory_path)
except OSError as e:
if e.errno != os.errno.EEXIST:
raise e
logger.warning(
"Attempted to create '{}', but the directory already "
"exists.".format(directory_path))
# Change the log directory permissions so others can use it. This is
# important when multiple people are using the same machine.
os.chmod(directory_path, 0o0777)
def new_log_files(name, redirect_output):
"""Generate partially randomized filenames for log files.
Args:
name (str): descriptive string for this log file.
redirect_output (bool): True if files should be generated for logging
stdout and stderr and false if stdout and stderr should not be
redirected.
Returns:
If redirect_output is true, this will return a tuple of two
filehandles. The first is for redirecting stdout and the second is
for redirecting stderr. If redirect_output is false, this will
return a tuple of two None objects.
"""
if not redirect_output:
return None, None
# Create a directory to be used for process log files.
logs_dir = "/tmp/raylogs"
try_to_create_directory(logs_dir)
# Create another directory that will be used by some of the RL algorithms.
try_to_create_directory("/tmp/ray")
log_id = random.randint(0, 10000)
date_str = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
log_stdout = "{}/{}-{}-{:05d}.out".format(logs_dir, name, date_str, log_id)
log_stderr = "{}/{}-{}-{:05d}.err".format(logs_dir, name, date_str, log_id)
# Line-buffer the output (mode 1)
log_stdout_file = open(log_stdout, "a", buffering=1)
log_stderr_file = open(log_stderr, "a", buffering=1)
return log_stdout_file, log_stderr_file
| 42.779214 | 79 | 0.641004 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.