content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def update_action_state():
""" :type action: dart.model.action.Action """
# we receive a list of {action_id, action_status, workflow_instance_id/status}
# We will update the database for each such entry
try:
action_status_updates = request.get_json()
_logger.info("AWS_Batch: extracted j... | 5,357,800 |
def page_not_found():
"""Directs to error page if user is not logged in.
:return: HTML file for error page.
"""
error = 'You must be logged in to view this page.'
return render_template('error.html', error=error) | 5,357,801 |
def mtf_toy_model_parallel():
"""Set of hyperparameters."""
hparams = mtf_toy_base()
hparams.add_hparam("layout", "hidden:0")
return hparams | 5,357,802 |
def evaluate_model(model, X_test, Y_test, category_names,):
"""used to evaluate given model by using
confusion matrix and classification report
args:
- model (sklearn model)
- X_test
- Y_test
- category_names: list of 36 category names
returns:
None
"""
# predict
y_pre... | 5,357,803 |
def test_insertion(empty_stack):
"""test that we can push a val into an empty stack and increment the ._size of the stack"""
assert empty_stack.top is None
assert empty_stack.push(1).val == 1
assert empty_stack._size == 1 | 5,357,804 |
def to_dataframe(sas7bdat_file: Union[str, Path]) -> pd.DataFrame:
"""Converts a sas7bdat and/or xpt file into a pandas dataframe.
args:
sas7bdat_file: The name, including the path, for the sas7bdat file.
return:
A pandas dataframe containing the data from the sas7bdat file.
"""
df... | 5,357,805 |
def ShrinkBE(slack):
""" shrinks quota for all BE workloads by be_shrink_rate
"""
be_shrink_ratio = st.params['quota_controller']['BE_shrink_ratio']
be_shrink_rate = 1 + be_shrink_ratio * slack
min_be_quota = int(st.node.cpu * 100000 * st.params['quota_controller']['min_be_quota'])
max_be_quota = int(st.nod... | 5,357,806 |
def display_candle(
data: pd.DataFrame,
to_symbol: str,
from_symbol: str,
ma: Optional[Iterable[int]] = None,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Show candle plot for fx data.
Parameters
----------
data : pd.DataFrame
Loaded fx historical data
to_symbol... | 5,357,807 |
def tensor_imshow(inp, title=None, **kwargs):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
# Mean and std for ImageNet
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp, **kwargs)... | 5,357,808 |
def shift_scale_rmsf(rmsf_double, phi, cellsize, ccomp, faraday_peak):
"""Shift and scale the RMSF, to the parameters of the found clean component.
Args:
rmsf_double (numpy array): double sized array of complex point spread
function values in Faraday space.
phi (numpy array): array of Fara... | 5,357,809 |
def displayCards(cards):
"""Display all cards in the cards list"""
rows = ['', '', '', '', '']
for i, card in enumerate(cards):
rows[0] += ' ___ '
if card == BACKSIDE:
rows[1] += '|## | '
rows[2] += '|###| '
rows[3] += '|_##| '
else:
... | 5,357,810 |
def region_filter(annos, annotation):
"""filter for Region annotations.
The 'time' parameter can match either 'time' or 'timeEnd' parameters.
"""
result = []
for anno in annos:
time = annotation.get("time")
timeEnd = annotation.get("timeEnd")
for key in ['text', 'tags']:
... | 5,357,811 |
def estimator_mixt_default(sample):
"""Default estimator of mixture distribution
This estimator returns tuple with two non-overlaping parts of `sample`
which are estimated to come from continuous and discrete parts of mixture
distribution. Estimation is done by deciding sample element to be from
di... | 5,357,812 |
def modularity_clustering(graph, size_cutoff=10, deg_cutoff=0.5,
callback=None):
"""
Use the Clauset-Newman-Moore greedy modularity maximization
algorithm to partition the TN93 pairwise graph into communities.
Modularity quantifies the density of edges at the periphery of
... | 5,357,813 |
def get_post_processors():
"""
Loads post processors by inspecting members of the 'post_processors' package.
"""
post_processor_classes = []
for _, member in inspect.getmembers(post_processors):
if inspect.isclass(member):
post_processor_classes.append(member)
return post_pr... | 5,357,814 |
def apply_array_pars(arr_par_file="arr_pars.csv"):
""" a function to apply array-based multipler parameters.
Args:
arr_par_file (`str`): path to csv file detailing parameter array multipliers.
This file is written by PstFromFlopy.
Note:
Used to implement the parameterization co... | 5,357,815 |
def vulcanize(name: str) -> str:
"""Add prefixes to names that are similar to the prefixes seen
in Vulcan characters in the Star Trek™ franchise.
:param name: The name to modify.
:return: A :class:str object.
:rtype: str
Usage:
>>> # Seed the RNG to make the example predic... | 5,357,816 |
def intersperse(iterable, element):
"""Generator yielding all elements of `iterable`, but with `element`
inserted between each two consecutive elements"""
iterable = iter(iterable)
yield next(iterable)
for next_from_iterable in iterable:
yield element
yield next_from_iterable | 5,357,817 |
def B(j, p, x, knots):
""" Compute B-splines using recursive definition. """
if p == 0:
if knots[j] <= x < knots[j+1]:
return 1.0
else:
return 0.0
else:
left = special_div((x-knots[j])*B(j,p-1,x,knots), knots[j+p]-knots[j])
right = special_div(... | 5,357,818 |
def not_falsy(item: T, item_name: str) -> T:
"""
Check if a value is falsy and throw an exception if so.
:param item: the item to check for falsiness.
:param item_name: the name of the item to include in any exception.
:raises ValueError: if the item is falsy.
:returns: the item.
"""
if ... | 5,357,819 |
def _apply_limit_abs_unit(x, lim, unit):
"""Return one limit with applied unit(abs(x)). See get_limits."""
if unit is None:
return lim
unit = unit.lower()
if unit == 'near':
return lim * np.nanmin(np.abs(x))
if unit == 'far':
return lim * np.nanmax(np.abs(x))
elif unit ==... | 5,357,820 |
def load_fits(name):
""" Open a fits file image
Inputs:
name: name of the .fits file (str).
Output:
image:
"""
while True:
try:
file = fits.open(name)
image = file.copy()
return image, name
except FileNotFoundError:
prin... | 5,357,821 |
def run_flow(command, contents):
"""Run Flow command on a given contents."""
read, write = os.pipe()
os.write(write, str.encode(contents))
os.close(write)
try:
output = subprocess.check_output(
command, stderr=subprocess.STDOUT, stdin=read
)
decoded_output = out... | 5,357,822 |
async def __fetch_img_data(session: ClientSession, tile_data, image_tiles) -> None:
"""Get a cv2 image from a URL and insert it into the full-size image."""
start_y = tile_data['y']
end_y = start_y + tile_data['height']
start_x = tile_data['x']
end_x = start_x + tile_data['width']
async with se... | 5,357,823 |
def cleanup():
"""Clean up resoruces in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get... | 5,357,824 |
def plot_part1(avg_face, face_hog):
"""plot average face and hog representatitons of face."""
plt.subplot(1, 2, 1)
plt.imshow(avg_face)
plt.axis('off')
plt.title('average face image')
plt.subplot(1, 2, 2)
plt.imshow(face_hog)
plt.title('hog representation of face')
plt.axis('off')
... | 5,357,825 |
def social_bonus_count(user, count):
"""Returns True if the number of social bonus the user received equals to count."""
return user.actionmember_set.filter(social_bonus_awarded=True).count() >= count | 5,357,826 |
def del_category_tag_lib(self,c_uuid,t_uuid):
"""04删除便签或分类"""
if c_uuid:
category = Category.by_uuid(c_uuid)
if category is None:
flash(self, '分类不存在', 'error')
return {'status':False}
if category.articles:
flash(self,'分类下面有文章,请先删除文章','error')... | 5,357,827 |
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two `LocationGlobal` or `LocationGlobalRelative` objects.
This method is an approximation, and will not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test co... | 5,357,828 |
def pd_series_overload(data=None, index=None, dtype=None, name=None, copy=False, fastpath=False):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series
Limitations
-----------
- Parameters ``dtype`` and ``copy`` are currently... | 5,357,829 |
def get_export_summary(results):
"""Prints to screen the exporting results of example programs.
Args:
results - results of the compilation stage. which is the output of and export_repos()
Returns: Numbers of failed results
"""
pass_table = PrettyTable()
pass_table.field_names = ["EXAM... | 5,357,830 |
def _misfitfunc(data, predicted):
"""
Calculate the total data misfit function between the observed and predicted
data.
"""
result = 0.
for d, p, in zip(data, predicted):
residuals = d.observed - p
result += sqrt(numpy.dot(d.weights*residuals, residuals))/d.norm
return result | 5,357,831 |
def lammps_calc_rdf(job):
"""Create an rdf from the gsd file using Freud analysis scripts."""
import mbuild as mb
import MDAnalysis as mda
traj = mda.coordinates.XTC.XTCReader("prod.xtc")
top = mda.topology.LAMMPSParser.DATAParser("box.lammps")
u = mda.Universe(top, traj)
u.trajectory.next(... | 5,357,832 |
def stream_doi(app, doi):
"""Returns tuple of URL string and a urlopen() return value."""
apikey = app.cfg.get_or_die('api-keys', 'crossref')
url = ('http://crossref.org/openurl/?id=%s&noredirect=true&pid=%s&'
'format=unixref' % (wu.urlquote(doi), wu.urlquote(apikey)))
return url, wu.urlopen... | 5,357,833 |
def system_resource_repo(class_ini, class_configurator): # redefining ini, class_configurator pylint: disable=W0621
"""
Like `resource_repo`, but yields the system repository instead of the
default repository.
"""
with ResourceCreatorContextManager(class_ini, class_configurator,
... | 5,357,834 |
def do_evaluation(
*,
input_path,
training_path: Optional[str] = None,
testing_path: Optional[str] = None,
method,
prediction_task,
dimensions: int = 300,
number_walks: int = 8,
walk_length: int = 8,
window_size: int = 4,
p: float = 1.5,
q: float = 2.1,
alpha: float =... | 5,357,835 |
def test_structure_fatal_deformities(good_structure, deformity):
"""Make specific checks upon performing single invalidating deformations
of the data of a good structure.
"""
import re
if deformity is None:
return StructureResource(**good_structure)
deformity, message = deformity
... | 5,357,836 |
def displayTCP(tcp):
"""Display the TCP header."""
print "[TCP Header]"
print "\t Source Port: " + str(tcp.sport)
print "\t Destination Port: " + str(tcp.dport)
print "\t Sequence Number: " + str(tcp.seq)
print "\t Acknowledgment Number: " + str(tcp.ack)
print "\t Data Offset: " + str(tcp.d... | 5,357,837 |
def edit_user():
""" 返回待编辑用户信息 """
data = request.json
user_id = data.get('id')
_edit = User.query.filter_by(id=user_id).first()
_data = {'account': _edit.account, 'name': _edit.name, 'role_id': _edit.role_id}
return jsonify({'data': _data, 'status': 1}) | 5,357,838 |
def fast_infer(test_data, trg_idx2word):
"""
Inference by beam search decoder based solely on Fluid operators.
"""
place = fluid.CUDAPlace(0) if InferTaskConfig.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
out_ids, out_scores = fast_decoder(
ModelHyperParams.src_vocab_size,... | 5,357,839 |
def transactions(request):
"""See all transactions that have been contained in blocks."""
vote_list = Vote.objects.all().order_by('timestamp')
paginator = Paginator(vote_list, 100, orphans=20, allow_empty_first_page=True)
page = request.GET.get('page')
votes = paginator.get_page(page)
hashes =... | 5,357,840 |
def evaluate_all_models():
"""Evalate all models via cross validation."""
# retrieve data from model
x = get_feature_matrix()
y = get_labels()
# evaluate models
try:
for name in classifiers.keys():
evaluate_model(name, classifiers[name], x, y)
evaluate_model('VotingCl... | 5,357,841 |
def adjust_images():
"""Adjust image colors."""
albums = get_albums()
for album in albums:
path = ADJUSTED_DIR / album[0]['album']
path.mkdir(exist_ok=True)
with multiprocessing.Pool(processes=PROCESSES) as pool:
results = [pool.apply_async(adjust_album, (a,)) for a in albums]
... | 5,357,842 |
def count(pred_tokens, gold_tokens, ngram, result):
"""
count
"""
cover_count, total_count = result
pred_dict = get_dict(pred_tokens, ngram)
gold_dict = get_dict(gold_tokens, ngram)
cur_cover_count = 0
cur_total_count = 0
for token, freq in pred_dict.items():
if gold_dict.get... | 5,357,843 |
def elections_vote_places_geo(source="xd", folder=".", fLOG=noLOG):
"""
Retrieves data vote places (bureaux de vote in French)
with geocodes.
@param source should be None unless you want to use the backup plan ("xd")
@param folder where to download
@param fLOG logging functi... | 5,357,844 |
def erfc(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the :math:`1-erf(x)`, for more details of `erf` function
please refer to `math.erf`.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (... | 5,357,845 |
def ner_manual_tokenizers_bert(
dataset: str,
source: Union[str, Iterable[dict]],
loader: Optional[str] = None,
label: Optional[List[str]] = None,
tokenizer_vocab: Optional[str] = None,
lowercase: bool = False,
hide_special: bool = False,
hide_wp_prefix: bool = False,
) -> Dict[str, Any]... | 5,357,846 |
def print_image(image, position, scale=1):
"""
Affiche une image à une position donnée.
Parameters
----------
image : string
Le chemin vers l'image dans les fichiers
position : int * int
Les coordonnées de l'image
scale : int
Le facteur d'échelle
"""
img = ut... | 5,357,847 |
def test_nested_simple_condition() -> None:
"""
Iterates and maps expressions over a complex Condition:
(A=B OR A=B) AND (A=B OR A=B)
"""
c1 = Column(None, "t1", "c1")
c2 = Column(None, "t1", "c2")
co1 = binary_condition(None, ConditionFunctions.EQ, c1, c2)
c3 = Column(None, "t1", "c1"... | 5,357,848 |
def test_buy_one_investor(chain: TesterChain, web3: Web3, ico: Contract, uncapped_token: Contract, customer: str, preico_token_price, preico_starts_at, team_multisig):
"""Can buy when crowdsale is running."""
original_balance = web3.eth.getBalance(team_multisig)
wei_value = to_wei(1, "ether")
buys_toke... | 5,357,849 |
def draw_list(draw_info, color_positions={}, clear_bg=False):
"""
Find it's height width and then five them diffrent shades to see them properly
"""
lst = draw_info.lst
if clear_bg:
clear_rect = (draw_info.SIDE_PAD//2, draw_info.TOP_PAD,
draw_info.width - draw_info.SID... | 5,357,850 |
def print_file_cats(lines, category, categorizations):
"""
Debug printing
:param lines:
:param category:
:return:
"""
print(category)
for line_number, line in enumerate(lines):
stripped_line = line.strip("\n")
# Wow if I had assigned a doc string to the variable then I ... | 5,357,851 |
def get_file_manager(ext=None, callback=None):
"""Get file manager.
Context manager to temporarily set `ext` and `callback` of file_manager.
"""
app = MDApp.get_running_app()
if getattr(app, "file_manager") is None:
app.file_manager = MDFileManager()
file_manager = app.file_manager
... | 5,357,852 |
def _deep_setattr(obj, key, val):
"""
Set an attribute `key` on the object. If any of the prefix attributes do
not exist, they are set to :class:`~pyro.nn.PyroModule`.
"""
def _getattr(obj, attr):
obj_next = getattr(obj, attr, None)
if obj_next is not None:
return obj_ne... | 5,357,853 |
def rename_columns(data_dict: dict, file_out: str):
"""
Given a GRASP input excel file, renames the columns and index names, so that these are standardize and don't cause
problems with functions in this package.
If the number of columns in the given excel file sheet is less than it should be, it adds c... | 5,357,854 |
def sunnynet_train_model(train_path, save_folder, save_file, model_type='SunnyNet_3x3',
loss_function='MSELoss', alpha=1e-3, cuda=True):
"""
Trains a SunnyNet neural network model to be used to predict non-LTE populations.
Needs a "train" file prepared with build_training_set(). Com... | 5,357,855 |
def icon16(self):
"""
TOWRITE
"""
qDebug("icon16()")
self.iconResize(16) | 5,357,856 |
def inverse_max_dcg(labels,
gain_fn=lambda labels: tf.pow(2.0, labels) - 1.,
rank_discount_fn=lambda rank: 1. / tf.math.log1p(rank),
topn=None):
"""Computes the inverse of max DCG.
Args:
labels: A `Tensor` with shape [batch_size, list_size]. Each valu... | 5,357,857 |
def get(url):
"""
用 GET 请求 url 并返回响应,对301进行了处理
:param url:
:return:status_code, headers, body
"""
protocol, host, port, path = parsed_url(url)
s = socket_by_protocol(protocol)
s.connect((host, port))
request = 'GET {} HTTP/1.1\r\nhost: {}\r\nConnection: close\r\n\r\n'.format(path, ... | 5,357,858 |
def make_rule(frontier_pair, amr, tree, align, next_index):
"""
Creates a new rule with the given parts, and collapses these parts in the
original graph and tree.
"""
constituent, amr_fragment = frontier_pair
outside_edges = [e for e in amr.triples() if e not in amr_fragment.triples()]
root_label = amr_... | 5,357,859 |
def NotEqual(data1, data2, target=utils.CCE):
"""
check whether data1 notequals to data2.
Args:
data1 (tvm.tensor.Tensor): Tensor.
data2 (tvm.tensor.Tensor): Tensor.
Returns:
tvm.tensor.Tensor. If data1 notequal to data2 return True, else return False.
Supported Platfo... | 5,357,860 |
def read_labels(labels_path):
"""Reads list of labels from a file"""
with open(labels_path, 'rb') as f:
return [w.strip() for w in f.readlines()] | 5,357,861 |
def test_double_binding_raises(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that binding a function more than once results in an error."""
binder, _ = binder_and_pool
match = "has already been bounded by"
with pytest.raises(FunctionAlreadyBoundError, match=match):
@bin... | 5,357,862 |
def ensure_hours_unique(sender, instance, raw, using, update_fields, **kwargs):
"""Some DB's don't consider multiple rows which contain the same columns
and include null to violate unique contraints so we do our own check"""
if instance.id is None:
try:
Hours.objects.get(service_event=i... | 5,357,863 |
def targeted_neurogenesis(weights, n_replace, targeted_portion, is_training):
"""
Takes a weight matrix and applied targetted dropout based on weight
importance (From Gomez et al. 2019; https://for.ai/blog/targeted-dropout/)
Args:
weights - the input by ouput matrix of weights
dropout_r... | 5,357,864 |
def calc_distance_two_points(long_from, lat_from, long_to, lat_to):
"""Calculate distance between two points
Parameters
----------
long_from : float
Longitute coordinate from point
lat_from : float
Latitute coordinate from point
long_to : float
Longitute coordinate to po... | 5,357,865 |
def modified_precision(reference_max_counts, hypothesis, n):
"""
Calculate modified ngram precision.
The normal precision method may lead to some wrong translations with
high-precision, e.g., the translation, in which a word of reference
repeats several times, has very high precision.
This fun... | 5,357,866 |
def bin_spectrum(bin_width, wavelength, doppler_shift, flux, flux_uncertainty,
final_uncertainty='combine'):
"""
Args:
wavelength:
doppler_shift:
flux:
flux_uncertainty:
Returns:
"""
bw = bin_width
wv = wavelength
ds = doppler_shift
f =... | 5,357,867 |
def ppo(env_fn,
# by default, use the neural network mlp we define in core
actor_critic=core.mlp_actor_critic,
ac_kwargs=dict(),
seed=0,
steps_per_epoch=4000,
epochs=50,
gamma=0.99,
clip_ratio=0.2,
pi_lr=3e-4,
vf_lr=1e-3,
train_pi_iters=80,
train_v_iters=80,
lam=0.97,
max_ep_len=1000,
targ... | 5,357,868 |
def test_double_smirks():
"""Test filtering based on 2 different smirks patterns."""
molecules = []
for i in [0, 1, 2, 3, 5]:
molecules.append(
Molecule.from_file(get_data_file_path(f'1-validate_and_assign_graphs_and_confs/BBB-0000{i}-00.sdf'), "sdf"))
# filter P should only be one m... | 5,357,869 |
def scrub(old_fs: Vfs, new_fs: Vfs) -> Vfs:
"""Try to eliminate files which were previously installed but are no longer used."""
old_fs = old_fs.copy()
new_fs = new_fs.copy()
# Look for files in the old log which are no longer present in the new log
for txn in old_fs._log:
if txn[0] == "li... | 5,357,870 |
def test_renamed_objects(pipeline, clean_db):
"""
Verify that we can dump and restore renamed CQs and streams
"""
pipeline.create_stream('s', x='int')
q = """
SELECT x, count(*) FROM s GROUP BY x;
"""
pipeline.create_cv('cv_0', q)
q = """
SELECT (new).x, combine((delta).count) AS count FROM output... | 5,357,871 |
def get_filename(row):
"""
Assembles the name of the feature file.
Parameters
----------
row : pandas.Series
A row fom the sequence dataframe. Must have the following index values:
"sample_name", "inj_number", "batch_name", "acquisition_date_and_time".
Returns
-------
f... | 5,357,872 |
def create_plotly_trace(data_x, data_y, namexy, chosen_mode='lines', use_gl = True, swap_xy = False):
"""
Создание одного trace по данным
:param data_x: данные для оси x
:param data_y: данные для оси y
:param namexy: название для trace
:param chosen_mode: настройка отображения 'lines', 'marker... | 5,357,873 |
def read_metadata() -> dict:
"""Reads and returns raw metadata."""
with open(metadata_path().resolve(), "r") as fd:
return yaml.safe_load(fd) | 5,357,874 |
def plot_plaid_contrast_tuning(bf_indices, base_contrasts, mask_contrasts, base_orientations,
mask_orientations, test_responses):
"""
Plot responses to orthogonal plaid stimulus at different base and mask contrasts
Inputs:
bf_indices: [list or array] of neuron indices to use
all indices should be less... | 5,357,875 |
def new_token():
"""
Generate an access token for the user.
This endpoint requires basic auth with nickname and password.
"""
return jsonify({'token': generate_token(g.current_user['id'])}) | 5,357,876 |
def get_room_info(room_real_id: int, verify: utils.Verify = None, cookies = None):
"""
获取直播间信息(标题,简介等)
:param room_real_id: 真实房间ID
:param verify:
:return:
"""
if verify is None:
verify = utils.Verify()
api = API["live"]["info"]["room_info"]
if cookies is None:
resp =... | 5,357,877 |
def sweep_deposit_eth_accounts_balances():
"""做以太充值账户的归账操作"""
# 计算合约的一次转账操作需要的gas(可以估计一个固定值)
token_contract_addr = app.config['BLOCKLINK_ERC20_CONTRACT_ADDRESS']
gas_limit = 100000 # TODO: 不同token合约可能需要不同的gas_limit
gas_price = 1 * (10**9)
encrypt_password = app.config['ETH_ENCRYPT_PASSWORD'].en... | 5,357,878 |
def parse_raw(setup, id=None, first_line_is_header=(-1,0,1)):
"""Used in conjunction with lazy_import and parse_setup in order to make alterations
before parsing.
Parameters
----------
setup : dict
Result of h2o.parse_setup
id : str, optional
An id for the frame.
first_line_is_header ... | 5,357,879 |
def import_year(year: int = None) -> bool:
"""Downloads, extracts and imports the Losungen of a given year.
The year defaults to the next year."""
session: Session = SessionMaker()
repo = TagesLosungRepository(session)
year = datetime.date.today().year + 1 if year is None else year
losungen = re... | 5,357,880 |
def isURL(url: str) -> bool:
""" Check whether a given string is a URL. """
return url is not None and re.match(urlregex, url) is not None | 5,357,881 |
def rmse(Y_true, Y_hat):
"""
returns root mean squared error
Args:
Y_true : true outputs [N,(1)]
Y_hat : predicted outputs [N, (1)]
"""
if Y_true.ndim == 2:
Y_true = Y_true[:, 0]
if Y_hat.ndim == 2:
Y_hat = Y_hat[:, 0]
return np.sqrt(np.mean((Y_tru... | 5,357,882 |
def getAllDescWords(itemList):
"""Returns a list of "description words" for each item named in itemList."""
itemList = list(set(itemList)) # make itemList unique
descWords = []
for item in itemList:
descWords.extend(NYCitems[item][DESCWORDS])
return list(set(descWords)) | 5,357,883 |
def plot_probe_trajectory_histology(
x, y, subject_ID, axc, axs,
provenance = 'Planned',
project = 'ibl_neuropixel_brainwide_01',
gr_percentile_min=0.2, rd_percentile_min=1, rd_percentile_max=99.99,
font_size = 8, label_size = 8 ):
"""Plot slices of Histology data along th... | 5,357,884 |
def create_constrained_mechanical_system_from_component(structural_component, constant_mass=False,
constant_damping=False, constraint_formulation='boolean',
**formulation_options):
"""
Create a mechan... | 5,357,885 |
def parse_certificate_issuer_id(id):
"""
:param id: The resource collection type.
:type id: str
:rtype: KeyVaultId
"""
return parse_object_id('certificates/issuers', id) | 5,357,886 |
def create_schema(
url: str,
schema: str,
dbname: str = None
):
"""
Create a schema in the database.
:param url: the database URL
:param schema: the name of the schema
:param dbname: the name of the database
"""
# Figure out what database we're looking for.
_dbna... | 5,357,887 |
def normalize_whitespace(
text, no_line_breaks=False, strip_lines=True, keep_two_line_breaks=False
):
"""
Given ``text`` str, replace one or more spacings with a single space, and one
or more line breaks with a single newline. Also strip leading/trailing whitespace.
"""
if strip_lines:
t... | 5,357,888 |
def nancumprod(x1, **kwargs):
"""
Return the cumulative product of array elements over a given axis treating Not a Numbers (NaNs) as one.
For full documentation refer to :obj:`numpy.nancumprod`.
Limitations
-----------
Parameter ``x`` is supported as :obj:`dpnp.ndarray`.
Keyword ar... | 5,357,889 |
def _attr_manually_specified_tests_get_errors(
yaml_path: str,
yaml_entry: Dict[str, Any],
tag: str,
attr: str,
grep_tags: List[str]
) -> List[str]:
"""Report incorrect manually-specified test attributes
This function ensures that manually-specified
tests refer to files that actually ex... | 5,357,890 |
def error(data, mn, mx, confidence):
"""
Compute the error components.
:param data: the collected data.
:param mn: the critical value (minimum).
:param mx: the critical value (maximum).
:param confidence: the confidence level.
:return: (Dict) the dictionary of errors.
"""
return erru... | 5,357,891 |
def retry_connection(f):
"""Decorator. Recconect on failure.
"""
def retry(*args, **kwargs):
seconds_to_retry = 5
success = False
while (not success):
try:
result = f(*args, **kwargs)
success = True
return result
... | 5,357,892 |
def shift_mean(x_mod, x_org):
"""
Shift the mean value of `x_mod` such that it equals the mean of `x_org`.
Parameters
----------
x_org : ndarray
The array which hold the "true" mean value.
x_mod : ndarray
The modified copy of `x_org` which must have its mean value shifted.
... | 5,357,893 |
def conversation_detail(request, pk):
"""
Retrieve, update or delete a conversation.
"""
try:
conversation = Conversation.objects.get(pk=pk)
except Conversation.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = Conv_... | 5,357,894 |
def PromptToEnableApi(project, service_token, exception,
is_batch_request=False):
"""Prompts to enable the API and throws if the answer is no.
Args:
project (str): The project that the API is not enabled on.
service_token (str): The service token of the API to prompt for.
exceptio... | 5,357,895 |
def purchase_products(product_id):
"""Purchase a product"""
app.logger.info("Request to purchase product with id %s", product_id)
check_content_type("application/json")
product = Product.find(product_id)
if not product:
abort(
status.HTTP_404_NOT_FOUND, "product with id '{}' was ... | 5,357,896 |
def process_alerts(data):
"""
Returns a Pandas DataFrame from the API call.
:return: A pandas DataFrame.
"""
data_dicts = data.get("data", [])
lines = []
for data_dict in data_dicts:
data_dict["alertDescription"] = helper.extract_json_field(
data_dict.get("alertProps", {... | 5,357,897 |
def validate(df):
"""Validate the timeseries dataframe
"""
err_msgs = []
warn_msgs = []
# check column names
for col in EXP_COLS:
if col not in df:
err_msgs.append(f"**{col}** column missing")
msgs = {
"errors": err_msgs,
"warnings": warn_msgs
}
... | 5,357,898 |
def _gcs_uri_rewriter(raw_uri):
"""Rewrite GCS file paths as required by the rewrite_uris method.
The GCS rewriter performs no operations on the raw_path and simply returns
it as the normalized URI. The docker path has the gs:// prefix replaced
with gs/ so that it can be mounted inside a docker image.
Args:... | 5,357,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.