query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Write the concordance entries to the output file(filename) See sample output files for format.
def write_concordance(self, filename): all_keys = self.concordance_table.get_all_keys() lines = [] for i in all_keys: a = "" a += i + ":" f = self.concordance_table.get_value(i) if f != None: for s in f: a += " " + str(s) a += "\n" lines.append(a) a = open(filename, "w+") for i in lines: a.write(i) a.close()
[ "def write_concordance(self, filename):\r\n key_list = self.concordance_table.get_all_keys()\r\n key_list.sort()\r\n write_text = ''\r\n for x in range(0,len(key_list)):\r\n values = self.concordance_table.get_value(key_list[x])\r\n values_str = ''\r\n fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds a kfactor circulant matrix (A matrix with the structure of circulant matrices, but with the entries above the diagonal multiplied by the same factor.) The matrix is store in memory.
def factor_circulant_matrix(x, k): n=len(x) return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1)))
[ "def generate_k_circulant(n: int, k: int):\n return nx.to_numpy_matrix(\n nx.generators.classic.circulant_graph(n, list(range(1, k + 1))),\n dtype=np.int64,\n )", "def _calc_k_matrix(self):\n el_len = self.coord_electrode.size\n h = float(np.diff(self.coord_electr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the matrixvector product y = Cu where C is a kfactor circulant matrix All matrices are real
def factor_circulant_multiplication(u, x, k=1): n = len(u) D_k = (k**(1/n))**np.arange(0,n) Lambda = fft(D_k*x) return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y
[ "def scalarMultiplication(self,c):\n matrixResult = [[complex.ComplexNumber(0,0) for x in range(self.m)] for y in range(self.n)] \n for i in range (self.m):\n for j in range (self.n):\n matrixResult[i][j]=self.mtx[i][j].multiplication(c)\n matResult = Matrix(matrixResu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Solves Tx=b using the Levinson algorithm where T is apositivedefinite symmetric Toeplitz matrix b is a real vector
def levinson(r, b): n = len(b) y = zeros((n,)) x = zeros((n,)) # normalize the system so that the T matrix has diagonal of ones r_0 = r/r[0] b_0 = b/r[0] if n == 1: return b_0 y[0] = -r_0[1] x[0] = b_0[0] beta = 1 alpha = -r_0[1] for k in range(0,n-1): beta = (1 - alpha*alpha)*beta mu = (b_0[k+1] - dot(r_0[1:k+2], x[k::-1])) /beta x[0:k+1] = x[0:k+1] + mu*y[k::-1] x[k+1] = mu if k < n-2: alpha = -(r_0[k+2] + dot(r_0[1:k+2], y[k::-1]))/beta y[0:k+1] = y[0:k+1] + alpha * y[k::-1] y[k+1] = alpha return x
[ "def Backward_Euler_solver(func, mx, mt, L, T, kappa, u_0, u_T, bCond):\n x,_ = xt_points(mx, mt, L, T)\n u_j = U(func, x, L)\n u_jp1 = np.zeros(len(u_j))\n A_BE = tridiag_A(mx, mt, L, T, kappa)\n\n # Solve the PDE: loop over all time points\n for n in range(1, mt+1):\n # Backward Euler sch...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the log determinant of a positivedefinite symmetric toeplitz matrix. The determinant is computed recursively. The intermediate solutions of the Levinson recursion are expolited.
def toeplitz_slogdet(r): n = len(r) r_0 = r[0] r = np.concatenate((r, np.array([r_0]))) r /= r_0 # normalize the system so that the T matrix has diagonal of ones logdet = n*np.log(np.abs(r_0)) sign = np.sign(r_0)**n if n == 1: return (sign, logdet) # now on is a modification of Levinson algorithm y = zeros((n,)) x = zeros((n,)) b = -r[1:n+1] r = r[:n] y[0] = -r[1] x[0] = b[0] beta = 1 alpha = -r[1] d = 1 + dot(-b[0], x[0]) sign *= np.sign(d) logdet += np.log(np.abs(d)) for k in range(0,n-2): beta = (1 - alpha*alpha)*beta mu = (b[k+1] - dot(r[1:k+2], x[k::-1])) /beta x[0:k+1] = x[0:k+1] + mu*y[k::-1] x[k+1] = mu d = 1 + dot(-b[0:k+2], x[0:k+2]) sign *= np.sign(d) logdet += np.log(np.abs(d)) if k < n-2: alpha = -(r[k+2] + dot(r[1:k+2], y[k::-1]))/beta y[0:k+1] = y[0:k+1] + alpha * y[k::-1] y[k+1] = alpha return(sign, logdet)
[ "def fast_logdet(matrix):\n sign, ld = np.linalg.slogdet(matrix)\n if not sign > 0:\n return -np.inf\n return ld", "def log_abs_det_jacobian(self, z):\n pre_u = self.u_ + self.u\n pre_w = self.w_ + self.w\n a = F.softplus(self.a + self.inv)\n w = F.softmax(pre_w, dim=3)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Preprocessing needed for toeplitz_inverse_multiplication()
def toeplitz_inverse_multiplication_prep(T_column): phi=1 psi=2 assert phi != 0 assert psi != 0 assert phi != psi n = len(T_column) x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) ) y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) ) x_0 = x[0] D_phi = (phi**(1/n))**np.arange(0,n) D_psi = (psi**(1/n))**np.arange(0,n) Lambda_1 = fft(D_psi*x) Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1]))) Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1]))) Lambda_4 = fft(D_phi*x) return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)
[ "def transformPreMultiply(*args):\n return _almathswig.transformPreMultiply(*args)", "def bd_toeplitz_inverse_multiplication(u, *arrs):\n \n y = zeros(shape(u))\n n_start = 0\n n_end = 0\n for t in arrs:\n n_start = n_end\n n_end += len(t[3]) # len(t[3]) is the length of the block\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
matrix multiplication with the inverse of a blockdiagonal matrix having Toeplitz blocks. y = T u Analogous to toeplitz_inverse_multiplication()
def bd_toeplitz_inverse_multiplication(u, *arrs): y = zeros(shape(u)) n_start = 0 n_end = 0 for t in arrs: n_start = n_end n_end += len(t[3]) # len(t[3]) is the length of the block y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t) assert len(y) == n_end return y
[ "def chol_inverse_diag(t):\n (uu, nrows) = t.shape\n B = np.zeros((uu, nrows), dtype=\"float64\")\n B[1, nrows - 1] = 1.0 / t[1, nrows - 1] ** 2\n B[0, nrows - 1] = -t[0, nrows - 1] * B[1, nrows - 1] / t[1, nrows - 2]\n for j in reversed(range(nrows - 1)):\n tjj = t[1, j]\n B[1, j] = (1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a single line of csvtoarrow output. Raise RuntimeError if a line cannot be parsed. (We can't recover from that because we don't know what's happening.)
def _parse_csv_to_arrow_warning(line: str) -> I18nMessage: for pattern, builder in _ERROR_PATTERNS: match = pattern.match(line) if match: return builder(**match.groupdict()) raise RuntimeError("Could not parse csv-to-arrow output line: %r" % line)
[ "def __parse_csv_line(self, csv_line):\n if Case.label_column == -1:\n raise Exception(\"Cannot parse CSV file until properties of file have been specified to the Case class\")\n\n # Loop through each comma-separated item in the line, after first truncating the newline from the end\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return true if we should fastskip converting a pa.Array. The _true_ reason for this function is to test whether an Array contains "Inf" or "NaN". A numberconversion library will parse those. But _this_ library is for Workbench, and Workbench doesn't support NaN/Inf. So this function helps us decide _not_ to autoconvert a column when the intent isn't perfectly clear. Assume `arr` is of type `utf8` or a dictionary of `utf8`. Assume there are no gaps hidden in null values in the buffer. (It's up to the caller to prove this.)
def _utf8_chunk_may_contain_inf_or_nan(chunk: pyarrow.Array) -> bool: _, offsets_buf, data_buf = chunk.buffers() offsets = array.array("i") assert offsets.itemsize == 4 offsets.frombytes(offsets_buf) if sys.byteorder != "little": offsets.byteswap() # pyarrow is little-endian offset0 = offsets[chunk.offset] offsetN = offsets[chunk.offset + len(chunk)] # len(offsets) == 1 + len(chunk) b = data_buf[offset0:offsetN].to_pybytes() return SCARY_BYTE_REGEX.search(b) is not None
[ "def asarray_chkfinite(a):\n a = asarray(a)\n if (a.dtype.char in typecodes['AllFloat']) \\\n and (_nx.isnan(a).any() or _nx.isinf(a).any()):\n raise ValueError, \"array must not contain infs or NaNs\"\n return a", "def contains_inf(arr, node=None, var=None):\n if not _is_numeric_valu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the config information with new dropout values.
def update_dropout(info, dropout, dropout_type, prop_name): if dropout_type == "schnet_dropout": info["model_params"]["schnet_dropout"] = dropout elif dropout_type == "chemprop_dropout": info["model_params"]["cp_dropout"] = dropout elif dropout_type == "readout_dropout": # if it's in the readout layers, find the dropout # layers in the readout dictionary and update them readout = info["model_params"]["readoutdict"] layer_dics = readout[prop_name] for layer_dic in layer_dics: if layer_dic["name"] == "Dropout": layer_dic["param"]["p"] = dropout info["model_params"]["readoutdict"] = {prop_name: layer_dics} elif dropout_type == "attention_dropout": info["model_params"]["boltzmann_dict"]["dropout_rate"] = dropout else: info["model_params"][dropout_type] = dropout
[ "def conf_update(self):\n pass", "def update(self):\n self.save_config_file()", "def update_config(self, config):\n self.config = config\n self.rate_dropout = nn.Dropout(config.DROPOUT_RATES)\n self.pos_encoder.update_config(config)\n self.transformer_encoder.update_con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the config information with the number of attention heads.
def update_heads(info, heads): info["model_params"]["boltzmann_dict"]["num_heads"] = heads # Concatenate the fingerprints produced by the different heads info["model_params"]["boltzmann_dict"]["head_pool"] = "concatenate" readoutdict = info["model_params"]["readoutdict"] feat_dim = info["model_params"]["mol_basis"] for key, lst in readoutdict.items(): for i, dic in enumerate(lst): if "param" in dic and "in_features" in dic.get("param", {}): # make sure that the input dimension to the readout is equal to # `heads * feat_dim`, where `feat_dim` is the feature dimension # produced by each head readoutdict[key][i]["param"]["in_features"] = feat_dim * heads break info["model_params"]["readoutdict"] = readoutdict
[ "def increment_config_version(self):\n self.config_version += 1\n if self.config_version > MAX_CONFIG_VERSION:\n self.config_version = 1", "def _make_attention(self):\n return self.config.attention_cls(\n num_heads=self.config.num_heads,\n dtype=self.config.dtype,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update a general parameter that's in the main info dictionary.
def update_general(info, key, val): info["model_params"][key] = val
[ "def update_parameter(self, param, val, force=False):\n self._update_dict[param] = val\n if force:\n self._cur_val[param] = None", "def update_params(self):", "def updateParam(self, name, value):\n params = self.params\n params[name]['value'] = value\n self.params =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct generalized extreme value distribution. The parameters `loc`, `scale`, and `concentration` must be shaped in a way that supports broadcasting (e.g. `loc + scale` + `concentration` is valid).
def __init__(self, loc, scale, concentration, validate_args=False, allow_nan_stats=True, name='GeneralizedExtremeValue'): parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([loc, scale, concentration], dtype_hint=tf.float32) loc = tensor_util.convert_nonref_to_tensor( loc, name='loc', dtype=dtype) scale = tensor_util.convert_nonref_to_tensor( scale, name='scale', dtype=dtype) concentration = tensor_util.convert_nonref_to_tensor( concentration, name='concentration', dtype=dtype) dtype_util.assert_same_float_dtype([loc, scale, concentration]) # Positive scale is asserted by the incorporated GEV bijector. self._gev_bijector = gev_cdf_bijector.GeneralizedExtremeValueCDF( loc=loc, scale=scale, concentration=concentration, validate_args=validate_args) # Because the uniform sampler generates samples in `[0, 1)` this would # cause samples to lie in `(inf, -inf]` instead of `(inf, -inf)`. To fix # this, we use `np.finfo(dtype_util.as_numpy_dtype(self.dtype).tiny` # because it is the smallest, positive, 'normal' number. super(GeneralizedExtremeValue, self).__init__( distribution=uniform.Uniform( low=np.finfo(dtype_util.as_numpy_dtype(dtype)).tiny, high=tf.ones([], dtype=dtype), allow_nan_stats=allow_nan_stats), # The GEV bijector encodes the CDF function as the forward, # and hence needs to be inverted. bijector=invert_bijector.Invert( self._gev_bijector, validate_args=validate_args), parameters=parameters, name=name)
[ "def gaussian(mu, wid, x):\n return np.exp(-((x - mu) / (0.6005612 * wid))**2)", "def func_full_exp(x, c1, c2, c3, c4, c5, c6, c7):\n x = np.power(10, x)\n thermalCore = c1 * np.sqrt(x) * np.exp(-c2 * x)\n a = map(lambda y: 0 if y < c5 else 1, x)\n b = map(lambda y: 0 if y < c6 else 1, x)\n #b1 ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct Artillery YAML configuration
def set_yaml_config(self) -> None: # LT-248: We can pick Artillery Phase configuration from conf file self.yaml_config = { "config": { "target": self.get_swagger_url(), "processor": f"./{self.OUT_FILE}", "phases": [ { "duration": settings.DURATION or 1, "arrivalRate": settings.SPAWN_RATE or 1 } ] }, "scenarios": self.task_set.yaml_flow }
[ "def setupFromYml(self, yml):", "def __build_yaml(self):\n \n with open(self.mainConfigFile, \"r\") as f:\n self.configFiles = yaml.safe_load(f)\n\n self.yamlStream = \"# \" + self.find_file(self.configFiles['head'])+'\\n'\n with open(self.find_file(self.configFiles['head'])...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tell if a person if allergic to the given allergen.
def is_allergic_to(self, allergen): return allergen in self.list
[ "def is_allergen(self, is_allergen):\n\n self._is_allergen = is_allergen", "def is_girl(self):\n if self.gneder == self.GIRL: return True;", "def in_garden(obj):\n print(\"Searching the garden's random objects\")\n return obj in _random_objects", "def isrelatierekening(self, rekening):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This returns a single entry corresponding to the Directory Entity referred to by FolderEntityData. The returned string is given below (between Start and End) Start
def getFolderEntry(FolderEntityData): if FolderEntityData.Type not in ['IntermediateDir', 'ExperimentDir']: errprint('\nThe given EntityData does not represent the data of a directory') raise ValueError OutputLines = [] OutputLines.append("FolderID : {UID}".format(UID=FolderEntityData.ID)) OutputLines.append("ParentFolderID : {UID}".format(UID=FolderEntityData.ParentID)) OutputLines.append("FolderType : {Type}".format(Type=FolderEntityData.Type)) OutputLines.append("FolderTitle : {Title}".format(Title=FolderEntityData.Title)) OutputLines.append("FolderDescription: |-2") OutputLines += [" "+Line for Line in FolderEntityData.Description.splitlines()] OutputLines.append("") return "\n".join(OutputLines)
[ "def getFolderItemName(self) -> unicode:\n ...", "def folder_key(title,folder_name=DEFAULT_FOLDER_NAME):\n #parameter order is reversed because of kwargs necessities :(\n #i dont use this atm\n return ndb.Key('Folder', folder_name,'File',title)", "def folder_key(self):\n return self._fold...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This returns a single entry corresponding to the Experiment Entity referred to by ExpEntityData. The returned string is given below (between Start and End) Start
def getExperimentEntry(ExpEntityData): # Validate that ExpEntityData actually corresponds to an Experiment Entity if ExpEntityData.Type != 'Experiment': errprint("\nThe Entity Data does not represent the data of an experiment") raise ValueError OutputLines = [] OutputLines.append("") OutputLines.append("- ID : {ID}".format(ID=ExpEntityData.ID)) OutputLines.append(" Title : {Title}".format(Title=ExpEntityData.Title)) OutputLines.append(" Description: |-2") OutputLines += [" "+Line for Line in ExpEntityData.Description.splitlines()] OutputLines.append("") OutputLines.append( "{0:#<100}".format("## End of Experiment {UID} ".format(UID=ExpEntityData.ID))) return "\n".join(OutputLines)
[ "def entity_description(self, eid):\n entities = self._load_entities()\n return entities[eid][\"description\"]", "def getEntity(self):\n\n fid = file(self.filename)\n entityre = re.compile(\"entity (\\w+) is\", re.IGNORECASE)\n\n matches = entityre.search(fid.read())\n se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get all the employees out of the database
def get_employees(self): from Employee import Employee cursor = self.dbconnect.get_cursor() cursor.execute('select * from employee') employees = list() for row in cursor: employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]) employees.append(employee) return employees
[ "def get_employees():\n employees = list()\n try:\n connection = DBConnection.getConnection()\n cursor = connection.cursor()\n cursor.execute(\"select * from employee;\")\n rows = cursor.fetchall()\n connection.commit()\n for data in ro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this function gets all the admins from the database
def get_admins(self): from Employee import Employee admins = list() cursorRoles = self.dbconnect.get_cursor() cursorRoles.execute('select * from employeeRoles where role=\'admin\'') for row in cursorRoles: admins.append(self.get_employee(row[0])) return admins
[ "def get_all_administrators():\n return User.objects.filter(groups__name=\"administrators\")", "def get_admins():\n users = get_users()\n admins = []\n for user in users:\n if user[\"approval_level\"] == \"admin\":\n admins.append(user)\n\n return admins", "def get_admins(name):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gets a single employee out the database on an id
def get_employee(self, id): from Employee import Employee cursor = self.dbconnect.get_cursor() cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,)) row = cursor.fetchone() return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])
[ "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def get(id_: int):\n logger.debug('Retrieving employee by id %i.', id_)\n try:\n query = db.session.query(Employee)\n e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gets a single employee out the database on a name
def get_employeeOnName(self, name): from Employee import Employee cursor = self.dbconnect.get_cursor() cursor.execute('SELECT * FROM employee WHERE name=%s ', (name,)) if (cursor.rowcount != 0): row = cursor.fetchone() return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]) else: return None
[ "def get_employee_by_name(self, name):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE name=%s', (name,))\n row = cursor....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
adds an employee to the database
def add_employee(self, empl): cursor = self.dbconnect.get_cursor() try: cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)', (empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern, empl.active, empl.promotor)) cursor.execute('SELECT LASTVAL()') eid = cursor.fetchone()[0] empl.id = eid # get id and return updated object self.dbconnect.commit() except(Exception, self.dbconnect.get_error()) as error: self.dbconnect.rollback() raise Exception('\nUnable to save Employee!\n(%s)' % (error))
[ "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
adds a role to an employee
def add_employeeRole(self, id, role): cursor = self.dbconnect.get_cursor() try: cursor.execute('INSERT INTO employeeRoles values(%s,%s)', (id, role)) # get id and return updated object self.dbconnect.commit() except(Exception, self.dbconnect.get_error()) as error: self.dbconnect.rollback() raise Exception('\nUnable to save EmployeeRole!\n(%s)' % (error))
[ "def test_add_role(self):\n pass", "def add_role(self, role):\n self.roles.add(unicode(role))", "async def add_role(\n client,\n event,\n user: ('user', 'User to add role to'),\n role: ('role', 'The role to give'),\n):\n # Check for permissions\n if not event.user_permissions.can...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gets al the roles of an employee
def get_employeeRoles(self, id): cursor = self.dbconnect.get_cursor() cursor.execute('select * from employeeRoles where employee=%s', (id,)) roles = list() for row in cursor: roles.append(row[1]) return roles
[ "def get_roles(role):", "def get_roles(self) -> List[RoleObj]:\n ...", "def get_roles(self, principal_id):", "def _get_roles(self):\n return api.tuskar.OvercloudRole.list(self.request)", "def get_roles():\n\n # Get instance of RolesOperations Class\n roles_operations = RolesOpera...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
changes the data of an employee
def change_employee(self, employee): cursor = self.dbconnect.get_cursor() try: if employee.id == None: raise Exception('no id given') cursor.execute('select * from employee where employeeID=%s', (str(employee.id),)) if cursor.rowcount == 0: raise Exception('no employee found with that id') cursor.execute( 'update employee set name= %s,email= %s,office= %s,title= %s,INTernORextern= %s,active= %s,promotor= %s where employeeID=%s', (employee.name, employee.email, employee.office, employee.title, employee.internOrExtern, employee.active, employee.promotor, employee.id)) self.dbconnect.commit() except: self.dbconnect.rollback() raise Exception('unable to change employee')
[ "def updateEmp(self, data, new_data, field):\n self.__data.updateEmp(data, new_data, field)", "def update_employee(employee):\n if employee == None:\n raise DataLayerError(message=\"Employee Required\")\n if not isinstance(employee, Employee):\n raise DataLayerError(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get all the projects of an employee IMPORTANT not all fields will be completed only the fields in the project table and that of the activeYears
def get_employeeProjects(self, id): from Project import Project cursor = self.dbconnect.get_cursor() cursor.execute('select project from projectpromotor where employee=%s', (id,)) projectsId = list() for row in cursor: projectsId.append(row[0]) projects = list() for projId in projectsId: cursor.execute('select * from project where projectID=%s', (projId,)) # returns exactly one row from the table row = cursor.fetchone() project = Project(row[0], row[1], row[2], row[3]) cursor.execute('select year from projectYearConnection where projectID=%s', (projId,)) years = list() for row in cursor: years.append(row[0]) project.activeYear = years projects.append(project) return projects
[ "def getProjects(self):\n getParams = {'active':1,'account__active':1}\n return self._getJSONResponse('project', getParams)", "def get_projects():\n if current_user.get_id() is None:\n return\n with database.engine.begin() as connection:\n result = connection.execute(select(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The Simple Moving Average (SMA) is calculated by adding the price of an instrument over a number of time periods and then dividing the sum by the number of time periods. The SMA is basically the average price of the given time period, with equal weighting given to the price of each period. Simple Moving Average SMA = ( Sum ( Price, n ) ) / n
def SimpleMovingAverage(self, timeperiod = 14): return ta.SMA(self.data.close,timeperiod)
[ "def SMA(values, n):\n return pd.Series(values).rolling(n).mean()", "def SMA(df, time_period=30):\n close = df['close']\n return talib.SMA(close, timeperiod=time_period)", "def sma(df_prices, i_period):\r\n\r\n i_len = len(df_prices)\r\n assert i_len >= i_period\r\n\r\n df_sma = pd.rolling_mea...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Average True Range Is a lagging indicator, used to provide insights into volatility.
def AverageTrueRange(self, timeperiod = 14): return ta.ATR(self.data.high, self.data.low, self.data.close, timeperiod)
[ "def compute_average_true_ranges(context):\n if context.is_debug:\n start_time = time()\n \n rolling_window = 21\n moving_average = 20\n \n for market in context.prices.items:\n context.average_true_range[market] = ATR(\n context.prices[market].high[-rolling_window:],\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Starting at the current column header, shift to the right col_shift times
def get_header(col_current, col_shift): header = col_current for i in range(col_shift): header = header.right return header
[ "def _pad_columns(self):\n previous_row = self.previous\n\n if previous_row is None:\n # Start of layout; don't need to pad columns\n return\n\n while len(previous_row.end) < len(self.start):\n previous_row.end.append(set())\n\n while len(previous_row.end...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the specified column header from the header chain All rows that appear in this column are also removed
def remove_col(self, col_header): # Remove the column header from the header chain col_header.right.left = col_header.left col_header.left.right = col_header.right # Loop down through the column and remove the rows cell = col_header.down while cell != col_header: row_cell = cell.right # Move through all cells in this row and update their up/down links while row_cell != cell: row_cell.down.up = row_cell.up row_cell.up.down = row_cell.down row_cell.header.sum -= 1 # Move on to the next cell in the row row_cell = row_cell.right # Move on to the next row cell = cell.down
[ "def unremove_col(self, col_header):\n # Add the column head back into the chain\n col_header.right.left = col_header\n col_header.left.right = col_header\n # Loop up through the column and add the rows back in\n # Doing this in exactly the reverse order of the removing ensures th...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the specified column header back into the header chain Also adds all rows that this column removed back in
def unremove_col(self, col_header): # Add the column head back into the chain col_header.right.left = col_header col_header.left.right = col_header # Loop up through the column and add the rows back in # Doing this in exactly the reverse order of the removing ensures that we return # to the state we were in before the removal cell = col_header.up while cell != col_header: row_cell = cell.left # Move through all cells in this row and update their up/down links while row_cell != cell: row_cell.down.up = row_cell row_cell.up.down = row_cell row_cell.header.sum += 1 # Move on to the next cell in the row row_cell = row_cell.left # Move on to the next row cell = cell.up
[ "def remove_col(self, col_header):\n # Remove the column header from the header chain\n col_header.right.left = col_header.left\n col_header.left.right = col_header.right\n # Loop down through the column and remove the rows\n cell = col_header.down\n while cell != col_heade...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the column that has the minimum number of cells in it to minimize branching Returning a column with 0 cells in it is ok this gets dealt with in the solving loop
def get_minimum_column(self): min_col = self.root.right current_col = min_col.right while current_col != self.root: if current_col.sum < min_col.sum: min_col = current_col # Move on to the next column current_col = current_col.right return min_col
[ "def smallest_column_summand(self):\n def summands_domains_sizes(col):\n return prod( [len(x.domain) for x in col[:-1]] )\n\n col_uninstans = [(self.cols[indx], summands_domains_sizes(self.cols[indx]), indx)\n for indx in range(len(self.cols)) if summands_domains_siz...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method swaps out the numpy instance in the module, should it have one, to the one in the fake instance we have here.
def _swap_numpy(self, module): # Check to make sure this is not one of the string options from the YAML if not isinstance(module, str): if hasattr(module, 'numpy'): # Check if it has a self.numpy object # TODO: Replace this with the correct variable module.numpy = self.fake.numpy # Swap out with the class's instance of numpy return module # Return out the mutated module
[ "def test_inplace_set_value(self):\r\n dtype = self.dtype\r\n if dtype is None:\r\n dtype = theano.config.floatX\r\n\r\n shp = (100/4,1024)#100KB\r\n\r\n x = numpy.zeros(shp, dtype=dtype)\r\n x = self.cast_value(x)\r\n x_shared = self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method injects in the providers to the faker instance.
def add_providers(self): str_providers = PROVIDERS[0] # Providers, called by name live_providers = PROVIDERS[1] # Providers, provided as a live module for providers in PROVIDERS: # Iterate over the types of providers for provider in providers: # Iterate over all the methods # Inject those into faker, and swap the numpy instance self.fake.add_faker(self._swap_numpy(provider[0]), provider[1])
[ "def providers(self):\n return [self.fake_provider]", "def setup_provider(self):\n pass", "def faker() -> Faker:\n\n return Faker()", "def fake_init():\n return Faker()", "def create_providers(cls) -> Iterable['BaseProvider']:\n return []", "def test_faker_customization(self):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a map of duplicates and probabilities according to a pdf, i.e. uniform and store for reuse on each original event current version taken directly from FEBRL needs review b/c number of duplicates stored starts at 2?
def generate_duplicate_pdf(self): num_dup = 1 prob_sum = 0.0 prob_list = [(num_dup, prob_sum)] max_dups = self.duplicate_cfg["Max_duplicate"] uniform_val = 1.0 / float(max_dups) for i in range(max_dups - 1): num_dup += 1 prob_list.append((num_dup, uniform_val + prob_list[-1][1])) return prob_list
[ "def field_pdf(self):\n num_dup = 1\n prob_sum = 0.0\n prob_list = [(num_dup, prob_sum)]\n max_dups = self.duplicate_cfg[\"Max_duplicate\"]\n uniform_val = 1.0 / float(max_dups)\n self.__logger.debug(\"Maximum number of duplicatesi %d\", max_dups)\n for i in range(ma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines whether original record will be duplicated Gets the maximum number of duplicated records to generate
def expect_duplicate(self): # Reset everything for this record self._expect_duplicate = False self.__dupcntr = 0 self.__maxdup = 0 # Get the probability to generate duplicate for next record if self.fake.random.random() < self.duplicate_cfg["Prob_duplicate"]: self._expect_duplicate = True self.__maxdup = self.random_select_ndups() else: self._expect_duplicate = False self.__maxdup = 0 self.__logger.debug("expect_duplicate ndups: %d", self.__maxdup)
[ "def is_duplicate(self):\n return bool(self.duplicated)", "def is_duplicated_data_page(self, new_record_set):\n print(\"def is_duplicated_data_page \" + TimeStamp.timestamp()) #Elina 08-12-2020\n previous_page_links = [old_record for old_record in self.previous_page_of_records]\n\n def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the predictions of the original model on training and validation datasets. The original model is also trained if train = True.
def generate_original_preds(train = True): x_train, y_train, x_val, y_val, id_to_word = load_data() model = create_original_model() if train: filepath="models/original.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] model.fit(x_train, y_train, validation_data=(x_val, y_val),callbacks = callbacks_list, epochs=epochs, batch_size=batch_size) model.load_weights('./models/original.hdf5', by_name=True) pred_train = model.predict(x_train,verbose = 1, batch_size = 1000) pred_val = model.predict(x_val,verbose = 1, batch_size = 1000) if not train: print('The val accuracy is {}'.format(calculate_acc(pred_val,y_val))) print('The train accuracy is {}'.format(calculate_acc(pred_train,y_train))) np.save('data/pred_train.npy', pred_train) np.save('data/pred_val.npy', pred_val)
[ "def predict(self, X_train, y_train, test_predict=False):\n self.plot_sample = self.data_loader.get_plot_sample(X_train, y_train)\n self.number = int(math.sqrt(self.data_loader.square_number))\n\n for self.model in self.models:\n self.model.fit(X_train, y_train)\n self.mod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The managed object reference ID of the root resource pool for the cluster.
def resource_pool_id(self) -> str: return pulumi.get(self, "resource_pool_id")
[ "def pool_id(self):\n return self._pool_id", "def instance_pool_id(self) -> str:\n return pulumi.get(self, \"instance_pool_id\")", "def cluster_resource_id(self) -> str:\n return pulumi.get(self, \"cluster_resource_id\")", "def managed_object_id(self):\n o = self._data[\"managed_ob...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The `ComputeCluster` data source can be used to discover the ID of a cluster in vSphere. This is useful to fetch the ID of a cluster that you want to use for virtual machine placement via the `VirtualMachine` resource, allowing to specify the cluster's root resource pool directly versus using the alias available through the `ResourcePool` data source. > You may also wish to see the `ComputeCluster` resource for more information about clusters and how to managed the resource in this provider. Example Usage ```python import pulumi import pulumi_vsphere as vsphere datacenter = vsphere.get_datacenter(name="dc01") compute_cluster = vsphere.get_compute_cluster(name="cluster01", datacenter_id=datacenter.id) ```
def get_compute_cluster(datacenter_id: Optional[str] = None, name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetComputeClusterResult: __args__ = dict() __args__['datacenterId'] = datacenter_id __args__['name'] = name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('vsphere:index/getComputeCluster:getComputeCluster', __args__, opts=opts, typ=GetComputeClusterResult).value return AwaitableGetComputeClusterResult( datacenter_id=pulumi.get(__ret__, 'datacenter_id'), id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), resource_pool_id=pulumi.get(__ret__, 'resource_pool_id'))
[ "def get_cluster_id(options):\n cluster = options.cluster\n datacenter = get_datacenter(options)\n for item in datacenter.hostFolder.childEntity:\n if (item.name == cluster):\n return item._GetMoId()", "def get_cluster_by_id(self, context, cluster_id):", "def cluster_id(self):\n n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test addition for Complex with Complex, complex, int and float
def test_add(): z = Complex(1, -2) w = Complex(1, 1) assert (z + w) == Complex(2, -1) assert (z + (1+1j)) == Complex(2, -1) assert (z + 2) == Complex(3, -2) assert (z + 2.0) == Complex(3, -2)
[ "def complex_sum(c_1,c_2):\n return c_1 + c_2", "def __add__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum + other, self._imNum)\n if isinstance(other, complex):\n return Complex(self._reNum + other.real, self._imNum + ot...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test subtraction for Complex with Complex, complex, int and float
def test_sub(): z = Complex(1, -2) w = Complex(1, 1) assert (z - w) == Complex(0, -3) assert (z - (1+1j)) == Complex(0, -3) assert (z - 2) == Complex(-1, -2) assert (z - 2.0) == Complex(-1, -2)
[ "def complex_difference(c_1,c_2):\n return c_1 - c_2", "def test_complex_number():\r\n number1 = ComplexNumber(1, -2)\r\n number2 = ComplexNumber(7.2, 0)\r\n\r\n assert number1.real_part == 1\r\n assert number1.imaginary_part == -2\r\n\r\n assert number2.real_part == 7.2\r\n assert number2.im...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute LDA model & find perplexity, save topics list for coherence calc
def lda_models(doc_term_matrix, n_topics, vectorizer, rand_start): perplexity_values = [] lda_time = [] topics_list = [] i = rand_start for num_topics in n_topics: # create model t1 = time.time() lda_model = LatentDirichletAllocation(n_components=num_topics, doc_topic_prior = 1/num_topics, topic_word_prior=0.1, n_jobs=39, random_state = i) lda_model.fit_transform(doc_term_matrix) t2 = time.time() lda_time.append(t2-t1) print(f" Model time: {t2-t1}", flush = True) # compute perplexity perplexity_values.append(lda_model.bound_) # create list of topics topics = list_topics(lda_model.components_, vectorizer, top_n=10) topics_list.append(topics) # output completion message i = i+1 print('Number of topics =', num_topics, "complete.", flush = True) return perplexity_values, lda_time, topics_list
[ "def docs_to_topic(X):\n\n#---------------------------\n def step(k):\n # TODO prior\n #lda = LdaModel(corpus=X, num_topics = k, alpha=prior, random_state=0)\n lda = LdaModel(corpus=X, num_topics = k, random_state=0)\n preds = lda[X]\n argmax = [ max(topics, key=itemgetter(1))[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Workaround manage.py migrate complications run syncdb in case it's our first run, so we make sure south_migrationhistory table is created run migrate to apply latest migrations run syncdb again to populate contrib.auth.models
def smart_syncdb_migrate(self): local('python manage.py syncdb') local('python manage.py migrate') local('python manage.py syncdb --all')
[ "def migrate():\n puts(yellow(\"Run South migrations\"))\n django_manage('migrate')", "def post_migrations(self):", "def migrate():\n run(\"./manage.py migrate\")", "def migrate(self):\n\tpass", "def migrate_database(self):\n\n self.db.migrate_database()", "def syncdb():\n command(\"syn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ssum([1,2,3]) 6 ssum([2,3]) 5 ssum([3]) 3 ssum([]) 0
def ssum(L: list) -> int: return 0 if not L else L[0]+ssum(L[1:])
[ "def total(ls):\n if ls == []:\n return 0\n return ls[0] + total(ls[1:])", "def zero_sum(list):\n if not list:\n return 0\n else:\n return sum(list)", "def sum(numbers):", "def mysum(items) :", "def example1(S):\n n = len(S)\n total = 0\n for j in range(n): ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
hello hell hel he h ниже вызовы стека для закоментированной строки иначе елка перевернется pars_str1(hello) '\nh\nhe\nhel\nhell\nhello' pars_str1(hell) '\nh\nhe\nhel\nhell' pars_str1(hel) '\nh\nhe\nhel' pars_str1(he) '\nh\nhe' pars_str1(h) '\nh' pars_str1() '' для незакоментированной строки елка развернулась pars_str1(hello) 'hello\nhell\nhel\nhe\nh\n' pars_str1(hell) 'hell\nhel\nhe\nh\n' pars_str1(hel) 'hel\nhe\nh\n' pars_str1(he) 'he\nh\n' pars_str1(h) 'h\n' pars_str1() ''
def pars_str1(stroka: str) -> str: if stroka: return f'{stroka}\n{pars_str1(stroka[:-1])}' # return f'{pars_str1(stroka[:-1])}\n{stroka}'' return ''
[ "def str_pre_regexp(self, anystring, debug_lvl = 0):\n\t\t\n\t\tstrlen = len(anystring)\n\t\t\n\t\t# A) préparation du contenu\n\t\t# --------------------------\n\t\tsubtokens = re_TOUS.findall(anystring)\n\t\t\n\t\t# £TODO now read those params in conf\n\t\tdo_cesure=True\n\t\tdo_espace=True\n\t\tdo_newline=True\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
print_stars(5) \n\n\n\n\n print_stars(4) \n\n\n\n print_stars(3) \n\n\n print_stars(2) \n\n print_stars(1) \n print_stars(0) ''
def print_stars(N: int) -> str: # if N: # return f'*\n{print_stars(N-1)}' # return '' return '' if not N else f'*\n{print_stars(N-1)}'
[ "def print_stars():\n for i in range(2):\n for j in range(35):\n print(\"*\", end = '')\n print('')", "def star():\n print('*', end='')", "def starry_box(phrase):\n numStars = len(phrase) + 4\n print '*' * numStars\n print '*', phrase, '*'\n print '*' * num...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert that the first (leftmost) protocol value is correctly fetched from the xforwardedheader.
def test_get_protocol_with_more_than_one_value(): request = Mock( headers={"X-Forwarded-Proto": "https,http,http"}, protocol="http", ) expected = "https" protocol = get_browser_protocol(request) assert expected == protocol
[ "def test_host_header_set_ok(self):\n requests = [\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\",\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com \\r\\n\\r\\n\",\n \"GET http://tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract metadata like original image name and crop position from the given file name. Change this function to use a different file name pattern.
def get_metadata_from_filename(file_name: str) -> namedtuple: if os.path.isabs(f): file_name = os.path.basename(file_name) original_image_name = file_name.split('-')[0] x_pos = int(file_name.split('.')[-2].split('+')[-2:][0]) Metadata = namedtuple('Metadata', ['original_image_name', 'x_pos']) return Metadata(original_image_name, x_pos)
[ "def parseFilename(fileName):\n # regex to match names like Axis-BaldCA_2018-05-29T16_02_30_129496.jpg\n # and bm-n-mobo-c__2017-06-25z11;53;33.jpg\n regexExpanded = '([A-Za-z0-9-_]+[^_])_+(\\d{4}-\\d\\d-\\d\\d)T(\\d\\d)[_;](\\d\\d)[_;](\\d\\d)'\n # regex to match diff minutes spec for subtracted images...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Insert the crop represented by file_name into this image.
def insert(self, file_path: str, annot_type: str) -> None: if self._valid_file_name_regex.match(os.path.basename(file_path)) is None: raise ValueError(f'Illegal file name: {os.path.basename(file_path)}') x_pos = get_metadata_from_filename(file_path).x_pos if x_pos in self._x_positions: col = self._cols[x_pos] else: col = Column() self._x_positions.append(x_pos) self._x_positions.sort() col.insert(Crop(file_path, annot_type)) self._cols[x_pos] = col self.n_cols = len(self._cols)
[ "def _add_crop(self, crop_name, parameters):\n variety_sets = parameters[\"CropParameters\"][\"Varieties\"]\n self._store[crop_name] = variety_sets", "def crop_image(self, image):\n\n pass", "def crop_to_hint(image_file):\n # [START crop_to_hint]\n vects = get_crop_hint(image_file)\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove unlabelled columns in [startcol_width, end+col_width].
def _remove_overlaps(start, end) -> int: start = self._x_positions[start % self.n_cols] end = self._x_positions[int(end) % self.n_cols] n_removed = 0 for x, col in self._cols.items(): if start - self.col_width <= x <= start or end <= x <= end + self.col_width: if col.label is None: n_removed += col.mark_as('ignore') return n_removed
[ "def get_cols_drop():", "def CleanUp(self):\n blankColumnPattern = re.compile('^-*$')\n blankColumns = []\n for columnIndex in range(self.alignment.get_alignment_length() - 1):\n columnValues = self.alignment[:,columnIndex]\n match = blankColumnPattern.search(columnValue...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return index of first unlabelled column after x.
def _next_unlabelled_col(x): for i in range(self.n_cols): idx = (x + i) % self.n_cols x_current = self._x_positions[idx] if self._cols[x_current].label is None: return idx
[ "def XToCol(self, x):\r\n \r\n colLeft = 0\r\n numColumns = self.GetColumnCount()\r\n for col in xrange(numColumns):\r\n \r\n if not self.IsColumnShown(col):\r\n continue \r\n\r\n column = self.GetColumn(col)\r\n\r\n if x < (colLeft ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move the file associated with this crop to the directory path/annot_type, where annot_type is this crop's annotation type.
def move_to(self, path: str) -> None: self._new_path = os.path.join(path, self.annot_type, os.path.basename(self._file_path)) os.rename(self._file_path, self._new_path) self._file_was_moved = True
[ "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Undo a former file movement by moving the file back to its origin.
def move_back(self) -> None: if self._file_was_moved: os.rename(self._new_path, self._file_path) pass
[ "def undo(backup):\r\n backup.load_backup()\r\n backup.undo_moves()", "def undo():\n\n try:\n my_file.undo()\n except FileNotFoundError:\n print('No file has been read yet')\n except Exception:\n print('You must make an edit to undo')", "def undo():", "def undo_moves(self):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mark this column with the provided label. Returns number of labelled crops.
def mark_as(self, label: str) -> int: self.label = label return len(self._content) // len(ANNOTATIONS)
[ "def mark_label(self, label):\n\t\tcore.BNLowLevelILMarkLabel(self.handle, label.handle)", "def get_count_by_label(self, label=None):\n if label is None:\n return len(self.data)\n else:\n return sum(1 for d in self.data if d.pred == label)", "def inc_label(self):\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move all files of this column to the corresponding directory, if this column is not labeled to be ignored. Returns number of files moved.
def move(self, dry_run: bool) -> int: if self.label == 'ignore': return 0 file_counter = 0 for crop in self._content: if not dry_run: crop.move_to(self.label) file_counter += 1 return file_counter
[ "def _move_files(self):\n self._move_directory(self._origin, self._destination)\n for directory in self._filesystem.listdir(self._filesystem.join(self._layout_tests_root, PLATFORM_DIRECTORY)):\n self._move_directory(self._filesystem.join(PLATFORM_DIRECTORY, directory, self._origin),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create metrics of gauge type for filesystem replica link lag, with the local filesystem name, replication direction, remote array name, remote filesystem name and replication status as labels.
def _replica_links_lag(self): for f in self.fb.get_filesystem_replica_links(): self.replica_links_lag.add_metric([f.local_file_system.name, f.direction, f.remote.name, f.remote_file_system.name, f.status], -1 if f.lag is None else f.lag)
[ "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n us...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds and sends an embed message with new commits information.
async def process_push_hook(push: models.PushHook): repository = push.repository project = push.project commit_str = "commit" if push.total_commits_count == 1 else "commits" # Show link to commit compare if there's more than one commit if push.total_commits_count > 1: embed_url = f"{repository.homepage}/compare/{push.before[:7]}...{push.after[:7]}" else: embed_url = f"{repository.homepage}/commit/{push.after[:7]}" if push.before == EMPTY_COMMIT: embed = discord.Embed(title=f"[{project.namespace}/{project.name}] New branch created {push.branch}", url=embed_url, colour=discord.Colour.light_grey()) embed.set_author(name=push.user_name, icon_url=push.user_avatar) await send_message(None, embed=embed, avatar_url=push.project.avatar_url) elif push.after == EMPTY_COMMIT: embed = discord.Embed(title=f"[{project.namespace}/{project.name}] Branch deleted {push.branch}", url=embed_url, colour=discord.Colour.light_grey()) embed.set_author(name=push.user_name, icon_url=push.user_avatar) await send_message(None, embed=embed, avatar_url=push.project.avatar_url) # If there are no commits, do not show a message if not push.total_commits_count: return embed = discord.Embed(title=f"[{project.namespace}/{project.name}:{push.branch}] " f"{push.total_commits_count} new {commit_str}", url=embed_url, colour=discord.Colour.blurple()) embed.set_author(name=push.user_name, icon_url=push.user_avatar) embed.description = "" for commit in push.commits: message = commit.message.splitlines()[0] embed.description += f"[`{commit.id[:7]}`]({commit.url}) {message} - {commit.author.name}\n" print("Sending push message") await send_message(None, embed=embed, avatar_url=push.project.avatar_url)
[ "def command(self, bot, comm, groups):\n commit_message = self.plugin.get_commit_message()\n bot.reply(comm, u'{user}: {msg}', kwvars={'msg': commit_message})", "def commit(self, msg=None):\n self.log.debug(\"committing in git: %s\" % msg)\n completemsg = \"EasyBuild-commit fro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds and sends an embed message with notes information.
async def process_note_hook(data: models.NoteHook): note = data.note user = data.user project = data.project colour = discord.Colour.greyple() embed = discord.Embed(url=note.url, description=note.description, colour=colour) embed.set_author(name=user.username, icon_url=user.avatar_url) if data.issue: issue = data.issue embed.title = f"[{project.namespace}/{project.name}] New comment on issue #{issue.iid}: {issue.title}" if data.commit: commit = data.commit embed.title = f"[{project.namespace}/{project.name}] New comment on commit `{commit.id[:7]}`" if data.merge_request: merge = data.merge_request embed.title = f"[{project.namespace}/{project.name}] New comment on merge request !{merge.iid}: {merge.title}" await send_message(None, embed=embed)
[ "def CreateNote(self):", "async def note(self, ctx):\n note_embed = discord.Embed(color=discord.Color.blurple())\n note_embed.add_field(name=\"__**Please Note**__\", value=RULES_NOTE)\n await ctx.send(embed=note_embed)", "async def build_embed(cls, entity, client, event, message_jump_url, d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds and sends an embed message with merge request information.
async def process_merge_request_hook(data: models.MergeRequestHook): project = data.project merge = data.merge_request user = data.user description = "" action = "Issue updated" colour = discord.Colour.light_grey() if merge.action == "open": action = "Merge request opened" description = merge.description colour = discord.Colour.dark_green() elif merge.action == "close": action = "Merge request closed" colour = discord.Colour.dark_grey() embed = discord.Embed(title=f"[{project.namespace}/{project.name}] {action}: !{merge.iid} {merge.title}", url=merge.url, description=description, colour=colour) embed.set_author(name=user.username, icon_url=user.avatar_url) embed.set_footer(text=f"{merge.source_branch} → {merge.target_branch}") await send_message(None, embed=embed)
[ "def build_commit_msg(author, reviewers, source_branch, target_branch,\n commit_message, mp_web_link):\n return \"Merge {} into {} [a={}] [r={}]\\n\\n{}\\n\\nMP: {}\".format(\n source_branch, target_branch, author,\n reviewers, commit_message, mp_web_link)", "async def build_e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that represents the window which Character Mods can be applied.
def chars_window(): path_dir = r'Sor_Mods_Storage\chars' char_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen chars = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(chars, image=mainTitleImg) title = tk.Label(chars, text="Characters Mods") comboBox_chars = ttk.Combobox(chars, values=list(char_mods_dict.keys())) def apply_char_mod(): char_selected = comboBox_chars.get() result_window = tk.Toplevel() value = '' if char_selected == '': value = f'{value} Please Select an Mod to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='chars') value = f'Character Mod {char_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(chars, text='Apply', command=apply_char_mod) title.grid(row=0, column=0) comboBox_chars.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
[ "def extend_window(self):\r\n # create a regular expression pattern to find the beginning/end of the sentence\r\n left_pattern = re.compile(r'[A-ZА-Яa-zа-я] [.!?]')\r\n right_pattern = re.compile(r'[.!?] [A-ZА-Яa-zа-я]')\r\n leftcontext = self.string[:self.start+1][::-1]\r\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that represents the window which Enemy Mods can be applied.
def enemy_window(): path_dir = r'Sor_Mods_Storage\enemies' enemy_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen enemies = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(enemies, image=mainTitleImg) title = tk.Label(enemies, text="Enemies Mods") comboBox_enemies = ttk.Combobox(enemies, values=list(enemy_mods_dict.keys())) def apply_enemy_mod(): char_selected = comboBox_enemies.get() result_window = tk.Toplevel() value = '' if char_selected == '': value = f'{value} Please Select an Mod to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='enemies') value = f'Enemy Mod {char_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(enemies, text='Apply', command=apply_enemy_mod) title.grid(row=0, column=0) comboBox_enemies.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
[ "def render_to_window(self):", "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def brain_window(volumenode):\n displaynode = volumenode.GetDisplayNode()\n displaynode.AutoWindowLevelOff()\n displaynode.SetWindowLevel(100, 50)", "def get_classroom_challenge_wind...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that represents the window which Stage Mods can be applied.
def stage_window(): path_dir = r'Sor_Mods_Storage\stages' stage_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen stages = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(stages, image=mainTitleImg) title = tk.Label(stages, text="Stage Mods") comboBox_chars = ttk.Combobox(stages, values=list(stage_mods_dict.keys())) def apply_stage_mod(): stage_selected = comboBox_chars.get() result_window = tk.Toplevel() value = '' if stage_selected == '': value = f'{value} Please Select an Stage Mod to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=stage_selected, type='stages') value = f'Enemy Mod {stage_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(stages, text='Apply', command=apply_stage_mod) title.grid(row=0, column=0) comboBox_chars.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
[ "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def box_window(**kwargs):\n return _fixed_window(**kwargs)", "def brain_window(volumenode):\n displaynode = volumenode.GetDisplayNode()\n displaynode.AutoWindowLevelOff()\n displaynode.SetWindowLevel(100, 50)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
delete the specified intentfrom your account.
def delete_intent(intent_name): try: client.get_intent( name=intent_name, versionOrAlias='$LATEST' ) answer=raw_input("Do you want to delete %s from your account(Y/y for YES, other NO):" %(intent_name)) if answer in ['Y', 'y']: client.delete_intent( name=intent_name ) print "You chose to delete the intent %s, deleted..." %(intent_name) else: print "You chose not to delete the inten t%s, exiting..." %(intent_name) except: print "There is no intent called %s, exiting..." %(intent_name) return
[ "def delete_intent(project_id, intent_id):\n\n intents_client = dialogflow.IntentsClient()\n intent_path = intents_client.intent_path(project_id, intent_id)\n intents_client.delete_intent(request={\"name\": intent_path})", "def delete_intent(project_id, intent_id):\n\n intents_client = dialogflow.Inte...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
demo function to get the intent's latest configuration
def get_intent_configuration(intent_name, version ="$LATEST"): response=client.get_intent( name=intent_name, version=version ) return response
[ "def get_config (self):\n # ret = core.adaptation.controller_adapter.domains.components[\n # 'OPENSTACK'].rest_adapter.get_config()\n # print \"Return: \", ret\n # print core.adaptation.controller_adapter.domains.components[\n # 'OPENSTACK'].rest_adapter._response.text\n pass", "def get_conf...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
a help function to print the intentinformation in format
def format_print_jobs(intent): print "\nintentName: %s" %(intent['name']) for k,v in intent.iteritems(): if k <> 'name': print "\t" + str(k) + ": " + str(v)
[ "def print_help_classify():", "def cmd_info(self):\n self.cmd_author()\n self.cmd_date()\n log = self.get_log() or ''\n print(len(log))\n print(log)", "def print_animal_info(self):", "def print_meta(self, info):\n pass", "def print_standout(info):\n sys.stdout.write(\"Info: %s\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Crawls all requested bug data and bug ids. Saves them in files (bugIDListP.pickle, bugIDList.csv, bugsData.txt ) and/or Mongo DB collections (BugIDs, BugsData) depending if they are given at initialization.
def get_all_bugs(self) -> List: #starting point offset = 0 #list for all bugs resultBugList = [] #list for bug IDs bugIDList = [] #checks if there are still results returned notEmpty = True #queries in 500 bug steps until the result list is empty while notEmpty: print("entered") #interpretation of result as list plus formatting for eval errors result = ast.literal_eval(self.session.get(self.bugURL + "&offset=" + str(offset)).text. replace('true', 'True').replace('false', 'False').replace('null', 'None'))["bugs"] #checks if the query needs to be set again with a new offset if result: resultBugList += result else: notEmpty = False #gets the ID out of all comments partList = [bug["id"] for bug in result] bugIDList += partList #sets new starting point offset += 500 #inserts bug ids and bugs into db if given one if self.mongoDB: for id in bugIDList: self.mongoDB["BugIDs"].insert_one({"ID": id}) self.mongoDB["BugsData"].insert_many(resultBugList) #creates files for bug ids and bugs if given a folder if self.folder: #saves bug list as python object with open(self.folderpath + "bugIDListP.pickle", "wb") as a: pickle.dump(bugIDList, a) #saves bug list as csv with open(self.folderpath + "bugIDList.csv", "w") as b: for id in bugIDList: b.write(str(id) + "\n") with open(self.folderpath + "bugsData.txt", "w") as c: for bug in resultBugList: c.write(str(bug) + "\n") #returns List Object for further processing return(bugIDList)
[ "def run(self, bugzilla_loc, first_bug=1, last_bug=52535, outdir=\"bugs\", outfile=\"bugs.json\"):\n\t\ttry:\n\t\t\tos.makedirs(outdir)\n\t\texcept:\n\t\t\tpass\n\t\tdef get_bug_path(bid):\n\t\t\treturn os.path.join(outdir, \"%s.json\" % (bid))\n\n\t\tfor bug in range(first_bug, last_bug):\n\t\t\tlog.info(\"Grab bu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Crawls for all comments belonging to the bugs in the BugIDList.
def get_all_comments(self, idList: Union[List, str]) -> None: #loads pickle list if it is one if type(idList) == str and ".pickle" in idList: print("pickle load") with open(idList, "rb") as f: idList = pickle.load(f) elif type(idList) == str: print("Error: Buglist parameter seems to be neither a List object or the name of a pickle file " "(needs to contain .pickle).") #goes through idList for id in tqdm(idList): #performs request and replaces trouble some parts commentsString = self.session.get(self.commentURL.format(id)).text.\ replace('true', 'True').replace('false', 'False').replace('null', 'None') #gets only the comments commentsDict = ast.literal_eval(commentsString)["bugs"][str(id)]["comments"] #enters comments into db or file if there are any comments for the id if commentsDict: if self.mongoDB: self.mongoDB["Comments"].insert_many(commentsDict) if self.folder: with open(self.folderpath + "Bugzilla_Comments.txt", 'a') as f: f.write(str(commentsDict) + "\n")
[ "def get_all_comments(self):\n\n scores = []\n posts = [p[1] for p in self.posts]\n total_comments = \"\"\n\n i = j = 1\n # creates a long text block with all the comments from a single post that is either saved in a corpus file or\n # returned in terminal\n for p in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Crawls for all comments belonging to the bugs in the BugIDList utilizing parallelization.
def get_all_comments_mp(self, list: Union[List, str], workers: int = 10) -> None: # loads pickle list if it is one if type(list) == str and ".pickle" in list: print("wat") with open(list, "rb") as f: list = pickle.load(f) elif type(list) == str: print("Error: Buglist parameter seems to be neither a List object or the name of a pickle file " "(needs to contain .pickle).") #gets workers and splits list into chunks fitting the worker amount pool = Pool(workers) list = np.array(list) lists = np.array_split(list, workers) #each worker crawls for comments for sub_list in lists: print(sub_list) pool.apply_async(self.get_all_comments, (sub_list,)) pool.close() pool.join()
[ "def get_all_comments(self):\n\n scores = []\n posts = [p[1] for p in self.posts]\n total_comments = \"\"\n\n i = j = 1\n # creates a long text block with all the comments from a single post that is either saved in a corpus file or\n # returned in terminal\n for p in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
download sequencing file from SRA archive requires local install of SRA tools in path requires verification of filenames and paths
def download_SRA(SRA): print("Downloading SRA archive") output = subprocess.run(['prefetch', '-f', 'yes', SRA], stderr=subprocess.STDOUT) print("Extracting FASTQ data") output = subprocess.run(['fastq-dump', '--gzip', NCBI_DIR+SRA+'.sra'], stderr=subprocess.STDOUT)
[ "def get_sra(accession, temp_folder):\n\n set_up_sra_cache_folder(temp_folder)\n\n logging.info(\"Downloading {} from SRA\".format(accession))\n\n local_path = os.path.join(temp_folder, accession + \".fastq\")\n logging.info(\"Local path: {}\".format(local_path))\n\n # Download via fastq-dump\n lo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
maps reads (bowtie to rRNA for legacy?) to extract ambiguous and uniquely mapped reads
def map_reads(SRA): #1. bowtie to rRNA print("Bowtie alignement on contaminant RNA...") cmd_bowtie = 'bowtie'+ ' ' + '-a' + ' ' + '-p6' + ' ' + '-S' + ' ' + '--un' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + BOWTIE_DIR+'/rRNA' + ' ' + TMP_DIR+SRA+'_trimmed.fastq' + ' ' + '|' + ' ' + 'samtools view -@ 6 -bS' + ' ' + '>' + TMP_DIR+SRA+'_trimmed_rrna.bam' output = subprocess.run(cmd_bowtie, shell=True) # 2. STAR to ref genome print("STAR alignement to yeast genome...") cmd_STAR = 'STAR --outSAMtype BAM Unsorted --runThreadN 6 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+SRA+'_STAR_' output = subprocess.run(cmd_STAR, shell=True) # 3. Samtools keep uniquely mapped reads and sort print("Samtools to keep uniquely mapped reads and sort...") cmd_samtools1 = 'samtools view -@ 6 -b -q 255 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' + ' ' + TMP_DIR+SRA+'_STAR_Aligned.out.bam' output = subprocess.run(cmd_samtools1, shell=True) cmd_samtools2 = 'samtools sort -@ 6 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' output = subprocess.run(cmd_samtools2, shell=True) cmd_samtools3 = 'samtools index' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' output = subprocess.run(cmd_samtools3, shell=True)
[ "def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
wrapper to run scikitribo from the same pipeline requires local install of modified scikitribo toolbox requires local install of all dependencies of scikitribo environment (see conda environment file)
def run_scikit_ribo(SRA, genome_fasta, genome_gtf): # 3. Scikit-ribo index print("Building scikit-ribo index") if not os.path.exists(SCIKIT_DIR): os.mkdir(SCIKIT_DIR) cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'scikit-ribo-build.py' + ' ' + '-g' + ' ' + genome_gtf + ' ' + '-f' + ' ' + genome_fasta + ' ' + '-p' + ' ' + SRA + ' ' + '-o' + SCIKIT_DIR output = subprocess.run(cmd_scikit, shell=True) print("scikit-ribo-run.py...") cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'scikit-ribo-run.py' + ' ' + '-i' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + '-f' + ' ' + SCIKIT_DIR + ' ' + '-p' + ' ' + SRA + ' ' + '-o' + ' ' + 'TMP/scikit_'+SRA output = subprocess.run(cmd_scikit, shell=True) print("plot_ribo_density_dict.py...") cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'plot_ribo_density_dict_noCDT.py' + ' ' + '-i' + ' ' + TMP_DIR+'scikit_'+SRA+'/riboseq_input.txt' + ' ' + '-g' + ' ' + 'all' + ' ' + '-o' + ' ' + TMP_DIR+'scikit_'+SRA #+'_profiles' output = subprocess.run(cmd_scikit, shell=True)
[ "def lab(session):\n session.install(\"-r\", \"requirements.txt\")\n session.run(\"jupyter\", \"lab\")", "def test_toolchain_bootstrap(orchestra: OrchestraShim):\n # Print unsolved graph\n orchestra(\"graph\", \"-b\", \"gcc\")\n\n # Print solved graph\n orchestra(\"graph\", \"-b\", \"-s\", \"gcc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns dictionary with strand orientation as values and geneIDs as Keys/
def gather_strand_by_geneID_dict(genome_gtf): strand_by_geneID_dict = {} with open(genome_gtf) as f: for line in f: current_line = line.split('\t') if current_line[2] == "CDS": current_orf = current_line[8].split(';')[2].split()[1].strip('\"') current_strand = current_line[6] strand_by_geneID_dict[current_orf] = current_strand return strand_by_geneID_dict
[ "def _get_gene_map(self) -> OrderedDict:\n if \"gene\" not in self.data:\n return OrderedDict()\n\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data[\"gene\"].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.spl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine relevant entries in crkeng.xml and build a smaller xml file for testing.
def build_test_xml(): crkeng_file_path = find_latest_xml_file(shared_res_dir / "dictionaries") print(f"Building test dictionary files using {crkeng_file_path.name}") crkeng_root = ET.parse(str(crkeng_file_path)).getroot() # relevant entries in crkeng.xml file we want to determine relevant_xml_ls: Set[str] = set() xml_ls: Set[str] = set() crkeng_entries = crkeng_root.findall(".//e") for element in crkeng_entries: xml_l = extract_l_str(element) xml_ls.add(xml_l) test_words = get_test_words() print(f"Analyzing xml l elements and test words") word_to_analyses = morphodict.analysis.relaxed_analyzer().bulk_lookup( xml_ls | test_words ) print("Analysis done") test_word_lemmas: Set[str] = set() for test_word in test_words: for analysis in word_to_analyses[test_word]: lemma = fst_analysis_parser.extract_lemma(analysis) if lemma is None: logger.warn( "Skipping test word: %s. " "Could not extract lemma from its analysis: %s", test_word, analysis, ) continue test_word_lemmas.add(lemma) for xml_l in tqdm(xml_ls, desc="screening relevant entries in crkeng.xml"): if xml_l in test_words: relevant_xml_ls.add(xml_l) continue for xml_l_analysis in word_to_analyses[xml_l]: xml_lemma = partition_analysis(xml_l_analysis)[1] for test_word_lemma in test_word_lemmas: if test_word_lemma == xml_lemma: relevant_xml_ls.add(xml_l) break relevant_crkeng_entries = [] for element in crkeng_entries: xml_l = extract_l_str(element) if xml_l in relevant_xml_ls: relevant_crkeng_entries.append(element) crkeng_xml_utils.write_xml_from_elements( list(crkeng_root.findall(".//source")) + relevant_crkeng_entries, shared_res_dir / "test_dictionaries" / "crkeng.xml", )
[ "def XML_EC_PL(Name, InputsFile, OutputFile, emin,emax):\n\n\t#On commence par afficher ce qu'on fait\r\n\tprint \" Build xml file \"\r\n\r\tprint InputsFile\n\t#ouverture du fichier dans lequel on place le source model\n\ttry:\n\t\tfresult = open(OutputFile, 'w')\n\texcept:\n\t\tprint \"Coucou\"\r\n \t#ecriture...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the config file
def update(self): self.save_config_file()
[ "def conf_update(self):\n pass", "def update_config():\n config.update_config(config.usr_config_file, config.def_config_file)", "def config_edit(self):\n Tools.file_edit(self.config_file_path)", "def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
FILL COLUMN2 WITH MOST LIKELY VALUES BASED ON COLUMN1
def fillgaps(column1,column2,train,test): ddict={} d1=test[[column1,column2]].dropna().values d2=train[[column1,column2]].dropna().values c1=np.array(d1[:,0].tolist()+d2[:,0].tolist()) c2=np.array(d1[:,1].tolist()+d2[:,1].tolist()) for ic1 in np.unique(c1): ddict[ic1]=(c2[c1==ic1].mean(),c2[c1==ic1].std()) full_data = [train, test] for dataset in full_data: for missing in np.where(np.isnan(dataset[column2]))[0]: m,s=ddict[dataset[column1][missing]] if s<=0: dataset[column2][missing]=m else: dataset[column2][missing]=np.random.normal(loc=m,scale=s,size=1) return (train,test)
[ "def _fill_col1_val_where_col2_notna(col1, col2, val):\n fill_ser = col1.copy()\n fill_ser[col2.notna()] = val\n return col1.fillna(fill_ser)", "def merge(col1, col2):\n\n new = copy.deepcopy(col1)\n\n if \"isSym\" in col1:\n for x, n in col2[\"has\"].items():\n update.add(new, x,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if player has 3 of spades in their hand.
def has_3_spades(self): if Card('3', 'spades') in self.hand: return True return False
[ "def three_of_a_kind(hand):\r\n s = [n for n,h in hand]\r\n s.sort()\r\n status = 0\r\n for i in xrange(len(s)):\r\n if s.count(s[i]) >= 3:\r\n status = 1\r\n break\r\n return bool(status)", "def is_three_of_a_kind(han...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all components that match the given type and filter
def queryComponent(type=None, filter=None, all=0):
[ "def find(self, type=None, filter=None):\n if isinstance(type, (tuple, list)):\n types = type\n else:\n types = [type] if type else self._types.keys()\n\n for type in types:\n for addon in self._addons[type].itervalues():\n if filter and not filte...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checkKey is used to check for authentication
def checkKey(self): # TO DO for checking API authentication if self.apikey is None: return False else: return True
[ "def check_auth_publickey(self, username, key):\n return AUTH_FAILED", "def api_key_check():\n req_path = request.path\n method_type = request.method\n app.logger.info(\">>> path = {}, method = {}\".format(req_path, method_type))\n\n if not app_props.api_key_check:\n app.logger.debug('>>...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
make the cosmos and DES meds files
def make_all_cosmos_des(run, cosmos_config, des_config, catfile, tileid): flist = files.get_cosmos_flist(tileid) cosmos_meds = files.get_meds_file(run, tileid, 'cosmos','i') print('making cosmos MEDS:',cosmos_meds) maker = CosmosMEDSMaker( config_path=cosmos_config, catname=catfile, flistname=flist, ) maker.write(cosmos_meds) for band in ['u','g','r','i','z']: band_flist = files.get_des_flist(band) band_meds = files.get_meds_file(run, tileid, 'des',band) print('making DES MEDS:',band_meds) maker = CosmosMEDSMaker( config_path=des_config, catname=cosmos_meds, flistname=band_flist, ) maker.write(band_meds)
[ "def writeNMD(filename, modes, atoms, zeros=False):\n\n if not isinstance(modes, (NMA, ModeSet, Mode, Vector)):\n raise TypeError('modes must be NMA, ModeSet, Mode, or Vector, '\n 'not {0}'.format(type(modes)))\n if modes.numAtoms() != atoms.numAtoms():\n raise Exception('...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
write the cutouts for the specified type
def _write_psf_cutouts_hst(self): print('writing psf cutouts') obj_data=self.obj_data psf_data=self.psf_data nfile=self.image_info.size nobj=obj_data.size cutout_hdu = self.fits['psf'] for iobj in range(nobj): if (iobj+1) % 100 == 0: print(' %d/%d' % (iobj+1,obj_data.size)) # HST psf is same for every cutout, in fact ncut should always # be 1 try: psf_im = self.psf_data.get_psf(iobj) except AttributeError: psf_im = None ncut=obj_data['ncutout'][iobj] for icut in range(ncut): if psf_im is None: row = obj_data['orig_row'][iobj, icut] col = obj_data['orig_col'][iobj, icut] file_id = obj_data['file_id'][iobj,icut] p = self.psf_data[file_id] psf_im = p.get_rec(row,col) expected_psf_shape = ( obj_data['psf_row_size'][iobj,icut], obj_data['psf_col_size'][iobj,icut], ) file_id = obj_data['file_id'][iobj, icut] row = obj_data['orig_row'][iobj, icut] col = obj_data['orig_col'][iobj, icut] start_row = obj_data['psf_start_row'][iobj, icut] if psf_im.shape != expected_psf_shape: raise ValueError("psf size mismatch, expected %s " "got %s" % (expected_psf_shape, psf_im.shape)) cutout_hdu.write(psf_im, start=start_row)
[ "def _write_cutout(self,\n iobj,\n icut,\n cutout_hdu,\n im_data,\n cutout_type):\n\n if cutout_type=='psf':\n start_row = self.obj_data['psf_start_row'][iobj,icut]\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set the box sizes and start row for each psf image
def _set_psf_layout_hst(self): print('setting psf layout for HST') obj_data=self.obj_data total_psf_pixels = 0 psf_start_row = 0 for iobj in range(obj_data.size): if (iobj+1) % 100 == 0: print(' %d/%d' % (iobj+1,obj_data.size)) # note assuming same psf for all "epochs" psf_im = self.psf_data.get_psf(iobj) psf_shape = psf_im.shape psf_npix = psf_im.size cen = (np.array(psf_shape)-1.0)/2.0 # we will expand the psfs for icut in range(obj_data['ncutout'][iobj]): obj_data['psf_row_size'][iobj,icut] = psf_shape[0] obj_data['psf_col_size'][iobj,icut] = psf_shape[1] obj_data['psf_cutout_row'][iobj,icut] = cen[0] obj_data['psf_cutout_col'][iobj,icut] = cen[1] obj_data['psf_start_row'][iobj,icut] = psf_start_row psf_start_row += psf_npix total_psf_pixels += psf_npix self.total_psf_pixels = total_psf_pixels
[ "def _set_psf_layout(self):\n\n obj_data=self.obj_data\n\n producer = self.producer\n\n cat = producer.getCatalog()\n stamps = producer.getStamps(cat[0])\n\n sdata = stamps[0]\n psfobj=sdata['stamp'].getPsf()\n psfim = psfobj.computeKernelImage(sdata['image_pos']).ar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set the box sizes and start row for each psf image
def _set_psf_layout_psfex(self): print('setting psf layout for PSFEx') obj_data=self.obj_data psf_data=self.psf_data total_psf_pixels = 0 #psf_npix = psf_size*psf_size psf_start_row = 0 for iobj in range(obj_data.size): for icut in range(obj_data['ncutout'][iobj]): row = obj_data['orig_row'][iobj, icut] col = obj_data['orig_col'][iobj, icut] file_id = obj_data['file_id'][iobj,icut] p = psf_data[file_id] pim = p.get_rec(row,col) cen = p.get_center(row,col) psf_shape = pim.shape psf_npix = pim.size obj_data['psf_row_size'][iobj,icut] = psf_shape[0] obj_data['psf_col_size'][iobj,icut] = psf_shape[1] obj_data['psf_cutout_row'][iobj,icut] = cen[0] obj_data['psf_cutout_col'][iobj,icut] = cen[1] obj_data['psf_start_row'][iobj,icut] = psf_start_row psf_start_row += psf_npix total_psf_pixels += psf_npix self.total_psf_pixels = total_psf_pixels
[ "def _set_psf_layout(self):\n\n obj_data=self.obj_data\n\n producer = self.producer\n\n cat = producer.getCatalog()\n stamps = producer.getStamps(cat[0])\n\n sdata = stamps[0]\n psfobj=sdata['stamp'].getPsf()\n psfim = psfobj.computeKernelImage(sdata['image_pos']).ar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read the cosmos catalog
def _read_catalog(self, catname): print('loading catalog:',catname) with fitsio.FITS(catname,lower=True) as fits: #cat = fits[1][100000:110000] if 'object_data' in fits: print('reading from MEDS object data') ext='object_data' else: ext=1 cat = fits[ext][:] # one cut here based on if we matched to the galsim cat w, = np.where( #(cat['mu_class'] < 3) #& #(cat['mask']==0) #& (cat['gscosmos_index'] >= 0) ) print('initial cuts %d/%d %g%%' % (w.size,cat.size,w.size/cat.size*100)) cat = cat[w] return cat
[ "def read_catalog(catalog):\n with open(catalog, \"r\") as f:\n header = f.readline()\n if header.startswith('#EventID | Time | Latitude | Longitude | Depth/km'):\n catalog = _read_iris(f)\n elif header.startswith('time, latitude, longitude, depth, depthUnits, magnitude'):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add fields from the cat some will not be in the odata but some will. When copy is True We will copy over the ones that are in both, in some cases
def _add_cat_fields(self, odata, copy=True): # these are required fileds from get_meds_output_dtype # that we have put into the input catalog always_copy=[ 'id', 'ra', 'dec', ] cat = self.cat_orig add_dt = [] for d in cat.dtype.descr: n = d[0] if n not in odata.dtype.names: add_dt.append(d) obj_data = eu.numpy_util.add_fields( odata, add_dt, ) if copy: for n in always_copy: obj_data[n] = cat[n] for d in add_dt: n = d[0] if n in always_copy: continue # don't clobber things that should be left at # their default values if n not in odata.dtype.names: obj_data[n] = cat[n] return obj_data
[ "def copyObjAttr(obj1,obj2,filterAttr=[],debug=False):\n\t\n\t\n\t#Going through all attributes blank object\n\tfor item in vars(obj1):\n\t\tif item not in filterAttr:\n\t\t\tsetattr(obj2, str(item), vars(obj1)[str(item)])\n\t\t\n\treturn obj2", "def copyAttributes(self, other, add_nxpars=False):\n import ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
make a new struct with ncutoutsizedarrays based on the actual maximum ncutout
def _make_resized_data(self, odata): nmax = odata['file_id'].shape[1] new_nmax = odata['ncutout'].max() if new_nmax < 2: new_nmax = 2 temp_obj_data = odata nobj = temp_obj_data.size new_data = meds.util.get_meds_output_struct( nobj, new_nmax, extra_fields=self._get_fields(new_nmax), ) new_data = self._add_cat_fields(new_data, copy=False) for name in new_data.dtype.names: if name in temp_obj_data.dtype.names: shape = new_data[name].shape lshape = len(shape) if lshape > 1 and shape[1] == new_nmax: new_data[name][:,:] = temp_obj_data[name][:,0:new_nmax] else: new_data[name][:] = temp_obj_data[name][:] del temp_obj_data return new_data
[ "def maxout_var(self, rv):\r\n #self.cpt += 0.00002\r\n exp_len = int(len(self.cpt)/self.card[rv])\r\n new_cpt = np.zeros(exp_len)\r\n\r\n rv_card = self.card[rv]\r\n rv_stride = self.stride[rv]\r\n\r\n k=0\r\n p = np.prod([self.card[r] for r in self.scope if self.st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get box sizes that are wither 2N or 32N, within the limits set by the user
def _get_box_sizes(self, image_info, cat): file_id=0 impath=image_info['image_path'][file_id].strip() ext=image_info['image_ext'][file_id] wcs_data = fitsio.read_header(impath, ext=ext) wcs = eu.wcsutil.WCS(wcs_data) jacob = wcs.get_jacobian(100,100) dudcol, dudrow, dvdcol, dvdrow = jacob det = dvdrow*dudcol - dvdcol*dudrow pixel_scale = np.sqrt(abs(det)) print('found pixel scale:',pixel_scale) box_size = cat['box_size_arcsec']/pixel_scale # clip to range box_size.clip( min=self['min_box_size'], max=self['max_box_size'], out=box_size, ) box_size = box_size.astype('i4') w,=np.where( ( (box_size % 2) != 0 ) ) if w.size > 0: box_size[w] += 1 return box_size
[ "def create_compute_box_size(self):\n def compute_best_size_for(dim):\n size = ((self.element_space[dim]-1)//self.box_space[dim]) + 1\n size += 2 * self.ghost_space[dim]\n while size % Level.BOX_ALIGNMENTS[dim]:\n size += 1\n return size\n\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the image info structure Set default scale to 1.0. The other fields are 0 for numbers, or blank for strings
def get_image_info_struct(nimage, path_len, image_id_len=None, wcs_len=None, ext_len=None, extra_dtype=None): dt = get_image_info_dtype( path_len, image_id_len=image_id_len, wcs_len=wcs_len, ext_len=ext_len, extra_dtype=extra_dtype, ) data = np.zeros(nimage, dtype=dt) data['scale'] = 1.0 return data
[ "def _get_meta_dict(self, img) -> dict:\n return {\"format\": img.format, \"mode\": img.mode, \"width\": img.width, \"height\": img.height}", "def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)", "def image_data_info(page):\n xObject = page['/Resources']['/XObject'].getObj...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the image_info dtype for the specified path string length and wcs string length
def get_image_info_dtype(path_len, image_id_len=None, wcs_len=None, ext_len=None, extra_dtype=None): path_fmt = 'U%d' % path_len if image_id_len is None: image_id_descr = 'i8' else: image_id_descr = 'U%d' % image_id_len if ext_len is not None: ext_descr = 'U%d' % ext_len else: ext_descr = 'i2' dt=[] for ctype in IMAGE_INFO_TYPES: path_name = '%s_path' % ctype ext_name = '%s_ext' % ctype dt += [ (path_name, path_fmt), (ext_name,ext_descr), ] dt += [ ('image_id', image_id_descr), ('image_flags', 'i8'), ('magzp', 'f4'), ('scale', 'f4'), ('position_offset','f8'), ] if wcs_len is not None: wcs_fmt = 'U%d' % wcs_len dt += [ ('wcs',wcs_fmt), ] if extra_dtype is not None: dt += extra_dtype return dt
[ "def get_image_info_struct(nimage, path_len,\n image_id_len=None,\n wcs_len=None,\n ext_len=None,\n extra_dtype=None):\n dt = get_image_info_dtype(\n path_len,\n image_id_len=image_id_len,\n w...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move files out of subdirectories in the current working directory.
def move_file(): # print("\n".join(os.listdir(filepath))) # folders = [os.path.join(filepath, fld) for fld in os.listdir(filepath)] # print(filepath + ":\n " + "\n ".join(folders)) folders = filter(os.path.isdir, os.listdir(u".")) # print("Sub-folders: ", u"\n".join(folders)) for folder in folders: files = [os.path.join(folder, fn) for fn in os.listdir(folder)] files = filter(os.path.isfile, files) for fn in files: _, filename = os.path.split(fn) shutil.move(fn, filename) assert 0 == len(os.listdir(folder))
[ "def _move_files(self):\n self._move_directory(self._origin, self._destination)\n for directory in self._filesystem.listdir(self._filesystem.join(self._layout_tests_root, PLATFORM_DIRECTORY)):\n self._move_directory(self._filesystem.join(PLATFORM_DIRECTORY, directory, self._origin),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find duplications in submitted homework.
def find_duplication(homework): re_id = re.compile(r'(?P<stuid>[0-9]{10,11})') dup_check = dict() with open(homework, 'r') as data: lines = data.readlines() for ln in lines: dt = ln.split() csum, right = dt[0], dt[1] if csum not in dup_check: dup_check[csum] = list() m = re_id.search(right) if m is not None: stu_id = m.group('stuid') dup_check[csum].append(stu_id) dup_check = filter(lambda k, v: len(v) > 1, dup_check.items()) dup_check = [(key, sorted(val)) for key, val in dup_check] return dup_check
[ "def _remove_dupes(recs, input, bad_movies, hist_list=[], feedback_list=[]):\n all_rated = input + bad_movies + hist_list + feedback_list\n nonlocal dupes\n dupes = [x for x in recs if x[0] in input]\n return [x for x in recs if x[0] not in all_rated]", "def find_duplic...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display the duplication check results.
def display_dup(dup_result): lines = [k + ": " + ", ".join(v) for k, v in dup_result] return lines
[ "def show_duplicates(l,name):\n\tdups = [x for x,y in collections.Counter(l).items() if y > 1]\n\tif len(dups) > 0:\n\t\tprint str(name),': duplicate combination(s) found for the following:'\n\t\tfor d in dups:\n\t\t\tprint_red('\\t',str(d))\n\t\treturn False\n\treturn True", "def printOverviewDuplicates(self):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a response model to pass to the presenter
def _create_response_model(self, data): pass
[ "def get_response_model_ctor(self):\n return self._response_model_ctor", "def create_json_from_model(self):\n json = {\n \"enableAutoReply\": self.enable_auto_reply,\n \"responseSubject\": self.response_subject,\n \"responseBodyPlainText\": self.response_body_plain_text,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
takes in a string of columns and places alternating checkers in those columns, starting with 'X' For example, call b.setBoard('012345') to see 'X's and 'O's alternate on the bottom row, or b.setBoard('000000') to see them alternate in the left column. moveString must be a string of integers
def setBoard( self, moveString ): nextCh = 'X' # start by playing 'X' for colString in moveString: col = int(colString) if 0 <= col <= self.__width: self.addMove(col, nextCh) if nextCh == 'X': nextCh = 'O' else: nextCh = 'X'
[ "def set_board(self, move_string):\r\n next_checker = 'X' # we starten door een 'X' te spelen\r\n for col_char in move_string:\r\n col = int(col_char)\r\n if 0 <= col <= self.width:\r\n self.add_move(col, next_checker)\r\n if next_checker == 'X':\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if AutoML can be loaded from a folder
def _check_can_load(self): if self.results_path is not None: # Dir exists and can be loaded if os.path.exists(self.results_path) and os.path.exists( os.path.join(self.results_path, "params.json") ): self.load(self.results_path) self._results_path = self.results_path
[ "def can_load_directory(cls, directory):\n return directory.file(\"__plugin__.py\").exists()", "def can_load(cls, filename):\n return False", "def load_files(self):\n\n # Load YAML files\n try:\n self.kprop = safe_load(open(\"resources/default.yml\", 'r'))\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append error message to errors.md file.
def _update_errors_report(self, model_name, error_msg): errors_filename = os.path.join(self._get_results_path(), "errors.md") with open(errors_filename, "a") as fout: self.verbose_print(f"There was an error during {model_name} training.") self.verbose_print(f"Please check {errors_filename} for details.") fout.write(f"## Error for {model_name}\n\n") fout.write(error_msg) link = "https://github.com/mljar/mljar-supervised/issues/new" fout.write( f"\n\nPlease set a GitHub issue with above error message at: {link}" ) fout.write("\n\n")
[ "def add_error(self, reference_id, error):\n\n with open('runReport.txt', 'a') as report:\n try:\n report.write(\"\\nError: \" + self.domain + \" \" + reference_id + \": \" + error)\n except Exception:\n report.write(\"\\nError: \" + self.domain + \" \" + r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current model_time_limit
def _get_model_time_limit(self): self._validate_model_time_limit() return deepcopy(self.model_time_limit)
[ "def time_limit(self) -> float:\n return self._time_limit", "def time_limit(self) -> int:\n return self._time_limit", "def time_limit(self):\n all_time_limit_updates = self.updates.exclude(\n time_limit_delta=timedelta())\n return self.time_limit_as_of_update(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current algorithms. If "auto" it is determined
def _get_algorithms(self): self._validate_algorithms() if self.algorithms == "auto": if self._get_mode() == "Explain": return [ "Baseline", "Linear", "Decision Tree", "Random Forest", "Xgboost", "Neural Network", ] if self._get_mode() == "Perform": return [ "Linear", "Random Forest", "LightGBM", "Xgboost", "CatBoost", "Neural Network", ] if self._get_mode() == "Compete": return [ "Decision Tree", "Linear", "Random Forest", "Extra Trees", "LightGBM", "Xgboost", "CatBoost", "Neural Network", "Nearest Neighbors", ] if self._get_mode() == "Optuna": return [ "Random Forest", "Extra Trees", "LightGBM", "Xgboost", "CatBoost", "Neural Network", ] else: return deepcopy(self.algorithms)
[ "def algorithms():\n algorith_paradigms = ['Divide-and-conquer', 'Backtrackig', 'Greedy-Algorithms', 'Dynamic-programming']\n return algorith_paradigms", "def algorithms(self):\n return [algorithm for algorithm in self.algorithm_handles]", "def __get_algorithms():\n return hashlib.algorithms...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current train_ensemble
def _get_train_ensemble(self): self._validate_train_ensemble() return deepcopy(self.train_ensemble)
[ "def ensemble(self):\n return self._ensemble", "def train(self):\n return self._train", "def train_environment(self):\n return self._train_environment", "def getTrainSet(self):\r\n return self.fTrainData", "def training_set(self):\n return self._training_set", "def getTr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current stack_models
def _get_stack_models(self): self._validate_stack_models() if self.stack_models == "auto": val = self._get_validation_strategy() if val.get("validation_type", "") == "custom": return False return True if self.mode in ["Compete", "Optuna"] else False else: return deepcopy(self.stack_models)
[ "def get_models(self):\n return self.ensemble.get_models()", "def get_models(self):\n self.load()\n return self._models", "def models(self):\n return self.config.models()", "def get_models(self):\n\n base = self.get_base()\n return getattr(base, self.resource).json[\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current validation_strategy
def _get_validation_strategy(self): strat = {} self._validate_validation_strategy() if self.validation_strategy == "auto": if self._get_mode() == "Explain": strat = { "validation_type": "split", "train_ratio": 0.75, "shuffle": True, "stratify": True, } elif self._get_mode() == "Perform": strat = { "validation_type": "kfold", "k_folds": 5, "shuffle": True, "stratify": True, } elif self._get_mode() in ["Compete", "Optuna"]: strat = { "validation_type": "kfold", "k_folds": 10, "shuffle": True, "stratify": True, } if self._get_ml_task() == REGRESSION: if "stratify" in strat: # it's better to always check # before delete (trust me) del strat["stratify"] return strat else: strat = deepcopy(self.validation_strategy) if self._get_ml_task() == REGRESSION: if "stratify" in strat: del strat["stratify"] return strat
[ "def getValidation(self):\n return self.validation_config", "def get_validation_rule(self):\n return self.validation_rule", "def get_strategy(self):\n return self.strategy", "def validator(self):\n return self._validator", "def paramValidationPref(self):\n # If the level o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }