Dataset Viewer
blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
f76b3b840b3db0b3d8c980dc620327745383a006
|
Python
|
17BTEC005/virat-kohli
|
/rohit sharma.py
|
UTF-8
| 135 | 3.03125 | 3 |
[] |
no_license
|
#multiply 2 no.s
a=10
b=20
c=a*b
print("multiplication of 10 and 20",a,"*",b,"=", c)
| true |
8b1781f3d1abb887e332ddcd453bef5c9b05fa8d
|
Python
|
ravitejavemuri/ML-Algorithms
|
/K-means/k-means.py
|
UTF-8
| 2,282 | 3.546875 | 4 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 18 13:13:09 2019
Psudo :
1.Get a Dataset
2.arbitarily choose K centroids in random
3.Assign the closest data points by distance to a centroid/cluster
4.Compute mean of the datapoints in the clusters excluding the centroids
5.The mean would be the new centroid and repeat from step 3 until the centroid doesnt change.
@author: Ravi
"""
from copy import deepcopy
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
plt.rcParams['figure.figsize'] = (16, 9)
plt.style.use('ggplot')
# Importing the data sets
data = pd.read_csv('data.csv')
#print(data.shape)
data.head()
#Plotting the values
d1= data['Dset1'].values
d2= data['Dset2'].values
X=np.array(list(zip(d1,d2)))
print('x iss',X)
plt.scatter(d1, d2, c='blue', s=7)
#Distance
def dist(a, b, ax=1):
return np.linalg.norm(a-b, axis=ax)
#Picking centroids at random
k=4
C_x = np.random.randint(0, np.max(X)-20, size=k)
C_y = np.random.randint(0, np.max(X)-20, size=k)
C= np.array(list(zip(C_x, C_y)), dtype=np.float32)
print(C)
#plotting with cetroids
plt.scatter(d1,d2, c ='#050505', s=7)
plt.scatter(C_x, C_y, marker='*', s=200, c='g')
#Storing the value of centroids when it updates
C_old = np.zeros(C.shape)
#print(C_old)
clusters = np.zeros(len(X))
#distance between the new centroid and old centroid
Cdist = dist(C,C_old, None)
while Cdist != 0 :
for i in range(len(X)):
print( 'x i is',X[i])
distances = dist(X[i], C)
print(distances)
cluster = np.argmin(distances)
clusters[i] = cluster
#storing the old centroid
C_old = deepcopy(C)
#finding the new centroids by taking the average value
for i in range(k):
points = [X[j] for j in range(len(X)) if clusters[j] == i]
#print(points)
C[i] = np.mean(points, axis=0)
Cdist = dist(C, C_old, None)
colors = ['r','g','b','y','c','m']
fig, ax = plt.subplots()
for i in range(k):
points = np.array([X[j] for j in range(len(X))if clusters[j] == i])
ax.scatter(points[:, 0], points[:,1], s=7, c=colors[i])
ax.scatter(C[:,0], C[:, 1], marker='*', s=200, c='#050505')
| true |
cb74f99d17f6f3e2d592fe812390f6036acfd879
|
Python
|
Elcoss/Python-Curso-em-Video
|
/Mundo1/desafio6.py
|
UTF-8
| 159 | 3.59375 | 4 |
[] |
no_license
|
n1=int(input('digite seu numero: '))
n2= n1*2
n3= n1*3
n4= n1**(1/2)
print(f'o dobro do seu numero e {n2} o triplo e {n3} a raiz quadrada dele e {n4}')
| true |
e1c8441b35d68c6c440ce5d1359a7d254a953005
|
Python
|
ursho/Project-Euler
|
/tests/testFibonacciGenerator.py
|
UTF-8
| 1,217 | 3.625 | 4 |
[] |
no_license
|
import unittest
from problems.FibonacciGenerator import FibonacciGenerator
class TestFibonacciGenerator(unittest.TestCase):
def test_Fibonacci(self):
self.assertEqual(0, fibonacci(1))
self.assertEqual(1, fibonacci(2))
self.assertEqual(1, fibonacci(3))
self.assertEqual(2, fibonacci(4))
self.assertEqual(3, fibonacci(5))
self.assertEqual(5, fibonacci(6))
def test_nextFibonacci(self):
fg = FibonacciGenerator()
self.assertEqual(fibonacci(1), fg.next())
self.assertEqual(fibonacci(2), fg.next())
self.assertEqual(fibonacci(3), fg.next())
self.assertEqual(fibonacci(4), fg.next())
self.assertEqual(fibonacci(5), fg.next())
self.assertEqual(fibonacci(6), fg.next())
def test_fibonacciOverflow(self):
with self.assertRaises(OverflowError):
list(FibonacciGenerator())
def fibonacci(n):
if n <= 0:
print("Incorrect input")
# First Fibonacci number is 0
elif n == 1:
return 0
# Second Fibonacci number is 1
elif n == 2:
return 1
else:
return fibonacci(n - 1) + fibonacci(n - 2)
if __name__ == '__main__':
unittest.main()
| true |
3707c418d6dce0abd30f17853a48ef57190a93fd
|
Python
|
j1fig/euler
|
/16/main.py
|
UTF-8
| 230 | 2.609375 | 3 |
[] |
no_license
|
import sys
import cProfile
def brute(arg):
return reduce(lambda x, y: x + int(y), str(2**arg), 0)
if __name__ == "__main__":
arg = int(sys.argv[1])
def main():
print brute(arg)
cProfile.run('main()')
| true |
cc6979eb902a306740989885480d0063a98bc1fd
|
Python
|
SUREYAPRAGAASH09/ArrayQuestions
|
/25.2ndSmallestNumber/2ndSmallestNumber.py
|
UTF-8
| 261 | 3.109375 | 3 |
[] |
no_license
|
import find_min
def secondsmallestNumber(array):
v = find_min.findMin(array)
for i in array:
if v == i:
array.remove(i)
maxi = find_min.findMin(array)
return maxi
array = [3,1,6,9,3]
print(secondsmallestNumber(array))
| true |
dddccee9cd8d45f0702060530c95388c1656c218
|
Python
|
Catxiaobai/project
|
/lxd_Safety(out)/graphTraversal-submit2/mymodules/sclexer.py
|
UTF-8
| 2,844 | 2.671875 | 3 |
[] |
no_license
|
# An lexer for simple C Langrage
import lex
#from ply import *
reserved = (
# 'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST', 'CONTINUE', 'DEFAULT', 'DO', 'DOUBLE',
# 'ELSE', 'ENUM', 'EXTERN', 'FLOAT', 'FOR', 'GOTO', 'IF', 'INT', 'LONG', 'REGISTER',
# 'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT', 'SWITCH', 'TYPEDEF',
# 'UNION', 'UNSIGNED', 'VOID', 'VOLATILE', 'WHILE',
# 'READ','PRINT','WRITE',
'TRUE','FALSE',
)
tokens = reserved + (
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=)
'EQUALS',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
)
# Completely ignored characters
t_ignore = ' \t\x0c'
# Newlines
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
reserved_map = { }
for r in reserved:
reserved_map[r.lower()] = r
def t_ID(t):
r'[A-Za-z_][\w_]*'
t.type = reserved_map.get(t.value,"ID")
return t
# Integer literal
t_ICONST = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FCONST = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_SCONST = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CCONST = r'(L)?\'([^\\\n]|(\\.))*?\''
# # String literal
# t_STRING = r'\"([^\\\n]|(\\.))*?\"'
# #t_STRING = r'\".*?\"'
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MOD = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
# # Comments
# def t_comment(t):
# r'/\*(.|\n)*?\*/'
# t.lexer.lineno += t.value.count('\n')
def t_error(t):
print "Illegal character", t.value[0]
t.lexer.skip(1)
lex.lex()
| true |
cad792f0c8f6a47486fa4d6fe971ec48089dbe00
|
Python
|
syurskyi/Python_Topics
|
/115_testing/_exercises/_templates/temp/Github/_Level_1/Python_Unittest_Suite-master/Python_Unittest_Patch_Methods.py
|
UTF-8
| 2,885 | 3.46875 | 3 |
[] |
no_license
|
# Python Unittest
# unittest.mock � mock object library
# unittest.mock is a library for testing in Python.
# It allows you to replace parts of your system under test with mock objects and make assertions about how they have been used.
# unittest.mock provides a core Mock class removing the need to create a host of stubs throughout your test suite.
# After performing an action, you can make assertions about which methods / attributes were used and arguments they were called with.
# You can also specify return values and set needed attributes in the normal way.
#
# Additionally, mock provides a patch() decorator that handles patching module and class level attributes within the scope of a test, along with sentinel
# for creating unique objects.
#
# Mock is very easy to use and is designed for use with unittest. Mock is based on the �action -> assertion� pattern instead of �record -> replay� used by
# many mocking frameworks.
#
#
# patch methods: start and stop.
# All the patchers have start() and stop() methods.
# These make it simpler to do patching in setUp methods or where you want to do multiple patches without nesting decorators or with statements.
# To use them call patch(), patch.object() or patch.dict() as normal and keep a reference to the returned patcher object.
# You can then call start() to put the patch in place and stop() to undo it.
# If you are using patch() to create a mock for you then it will be returned by the call to patcher.start.
#
patcher _ patch('package.module.ClassName')
____ package ______ module
original _ module.ClassName
new_mock _ patcher.start()
a.. module.ClassName __ no. original
a.. module.ClassName __ new_mock
patcher.stop()
a.. module.ClassName __ original
a.. module.ClassName __ no. new_mock
#
# A typical use case for this might be for doing multiple patches in the setUp method of a TestCase:
#
c_ MyTest(T..
___ setUp
patcher1 _ patch('package.module.Class1')
patcher2 _ patch('package.module.Class2')
MockClass1 _ patcher1.start()
MockClass2 _ patcher2.start()
___ tearDown
patcher1.stop()
patcher2.stop()
___ test_something
a.. package.module.Class1 __ MockClass1
a.. package.module.Class2 __ MockClass2
MyTest('test_something').run()
#
# Caution:
# If you use this technique you must ensure that the patching is �undone� by calling stop.
# This can be fiddlier than you might think, because if an exception is raised in the setUp then tearDown is not called.
# unittest.TestCase.addCleanup() makes this easier:
#
c_ MyTest(T..
___ setUp
patcher _ patch('package.module.Class')
MockClass _ patcher.start()
addCleanup(patcher.stop)
___ test_something
a.. package.module.Class __ MockClass
| true |
7a6db244d6501882789016473d740863701e660a
|
Python
|
jcmarsh/drseus
|
/scripts/socket_file_server.py
|
UTF-8
| 2,401 | 2.84375 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
from socket import AF_INET, SOCK_STREAM, socket
from threading import Thread
from os import remove
def receive_server():
with socket(AF_INET, SOCK_STREAM) as sock:
sock.bind(('', 60124))
sock.listen(5)
while True:
connection, address = sock.accept()
file_to_receive = connection.recv(4096).decode('utf-8', 'replace')
while '\n' not in file_to_receive:
file_to_receive += connection.recv(4096).decode('utf-8',
'replace')
file_to_receive = file_to_receive.split('\n')[0]
connection.close()
connection, address = sock.accept()
with open(file_to_receive, 'wb') as file_to_receive:
data = connection.recv(4096)
while data:
file_to_receive.write(data)
data = connection.recv(4096)
connection.close()
def send_server():
with socket(AF_INET, SOCK_STREAM) as sock:
sock.bind(('', 60123))
sock.listen(5)
while True:
connection, address = sock.accept()
file_to_send = connection.recv(4096).decode('utf-8', 'replace')
while '\n' not in file_to_send:
file_to_send += connection.recv(4096).decode('utf-8', 'replace')
file_to_send = file_to_send.split('\n')[0]
if ' ' in file_to_send:
args = file_to_send.split(' ')
file_to_send = args[0]
delete = args[1] == '-r'
else:
delete = False
try:
with open(file_to_send, 'rb') as data:
connection.sendall(data.read())
except:
print('socket_file_server.py: could not open file:',
file_to_send)
else:
try:
if delete:
remove(file_to_send)
print('socket_file_server.py: deleted file:',
file_to_send)
except:
print('socket_file_server.py: could not delete file:',
file_to_send)
finally:
connection.close()
Thread(target=receive_server).start()
Thread(target=send_server).start()
| true |
e8669d2f92a66e62d55904b67217aba188e06c20
|
Python
|
kevinqqnj/sudo-dynamic-solve
|
/sudo_recur.py
|
UTF-8
| 18,986 | 3 | 3 |
[
"Apache-2.0"
] |
permissive
|
# coding:utf-8
# python3
# original: u"杨仕航"
# modified: @kevinqqnj
import logging
import numpy as np
from queue import Queue, LifoQueue
import time
import copy
# DEBUG INFO WARNING ERROR CRITICAL
logging.basicConfig(level=logging.WARN,
format='%(asctime)s %(levelname)s %(message)s')
# format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)-7s %(message)s')
logger = logging.getLogger(__name__)
class Record:
point = None # 进行猜测的点
point_index = 0 # 猜测候选列表使用的值的索引
value = None # 回溯记录的值
class Sudo:
def __init__(self, _data):
# 数据初始化(二维的object数组)
self.value = np.array([[0] * 9] * 9, dtype=object) # 数独的值,包括未解决和已解决的
self.new_points = Queue() # 先进先出,新解(已解决值)的坐标
self.record_queue = LifoQueue() # 先进后出,回溯器
self.guess_times = 0 # 猜测次数
self.time_cost = '0' # 猜测time
self.time_start = time.time()
self.guess_record = [] # 记录猜测,用于回放
# 九宫格的基准列表
self.base_points = [[0, 0], [0, 3], [0, 6], [3, 0], [3, 3], [3, 6], [6, 0], [6, 3], [6, 6]]
# 整理数据
self.puzzle = np.array(_data).reshape(9, -1)
for r in range(0, 9):
for c in range(0, 9):
if self.puzzle[r, c]: # if not Zero
# numpy default is int32, convert to int
self.value[r, c] = int(self.puzzle[r, c])
# 新的确认的值添加到列表中,以便遍历
self.new_points.put((r, c))
# logger.debug(f'init: answer={self.value[r, c]} at {(r, c)}')
else: # if Zero, guess no. is 1-9
self.value[r, c] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# self.guess_record.append({'value':eval(f'{self.value.tolist()}'),'desc':f'载入数据'})
# 剔除数字
def _cut_num(self, point):
r, c = point
val = self.value[r, c]
remove_r = remove_c = remove_b = False
value_snap = eval(f'{self.value.tolist()}')
point_snap = value_snap[r][c]
# 行
for i, item in enumerate(self.value[r]):
if isinstance(item, list):
if item.count(val):
item.remove(val)
remove_r = True
# 判断移除后,是否剩下一个元素
if len(item) == 1:
self.new_points.put((r, i)) # 添加坐标到“已解决”列表
logger.debug(f'only one in row: answer={self.value[r, i]} at {(r, i)}')
self.value[r, i] = item[0]
# if remove_r: self.guess_record.append({'value':self.value.tolist(),'desc':'排除同一行已有的数字'})
# 列
for i, item in enumerate(self.value[:, c]):
if isinstance(item, list):
if item.count(val):
item.remove(val)
remove_c = True
# 判断移除后,是否剩下一个元素
if len(item) == 1:
self.new_points.put((i, c))
logger.debug(f'only one in col: answer={self.value[i, c]} at {(i, c)}')
self.value[i, c] = item[0]
# if remove_c: self.guess_record.append({'value':self.value.tolist(),'desc':'排除同一列已有的数字'})
# 所在九宫格(3x3的数组)
b_r, b_c = map(lambda x: x // 3 * 3, point) # 九宫格基准点
for m_r, row in enumerate(self.value[b_r:b_r + 3, b_c:b_c + 3]):
for m_c, item in enumerate(row):
if isinstance(item, list):
if item.count(val):
item.remove(val)
remove_b = True
# 判断移除后,是否剩下一个元素
if len(item) == 1:
r = b_r + m_r
c = b_c + m_c
self.new_points.put((r, c))
logger.debug(f'only one in block: answer={self.value[r, c]} at {(r, c)}')
self.value[r, c] = item[0]
if remove_b or remove_c or remove_r:
self.guess_record.append({
'value': value_snap,
'desc':f'排除同一行、列、九宫格: {point_snap}',
'highlight': point})
# 同一行、列或九宫格中, List里,可能性只有一个的情况
def _check_one_possbile(self):
# 同一行只有一个数字的情况
for r in range(0, 9):
# 只取出是这一行是List的格子
values = list(filter(lambda x: isinstance(x, list), self.value[r]))
for c, item in enumerate(self.value[r]):
if isinstance(item, list):
for value in item:
if sum(map(lambda x: x.count(value), values)) == 1:
self.value[r, c] = value
self.new_points.put((r, c))
logger.debug(f'list val is only one in row: answer={self.value[r, c]} at {(r, c)}')
return True
# 同一列只有一个数字的情况
for c in range(0, 9):
values = list(filter(lambda x: isinstance(x, list), self.value[:, c]))
for r, item in enumerate(self.value[:, c]):
if isinstance(item, list):
for value in item:
if sum(map(lambda x: x.count(value), values)) == 1:
self.value[r, c] = value
self.new_points.put((r, c))
logger.debug(f'list val is only one in col: answer={self.value[r, c]} at {(r, c)}')
return True
# 九宫格内的单元格只有一个数字的情况
for r, c in self.base_points:
# reshape: 3x3 改为1维数组
values = list(filter(lambda x: isinstance(x, list), self.value[r:r + 3, c:c + 3].reshape(1, -1)[0]))
for m_r, row in enumerate(self.value[r:r + 3, c:c + 3]):
for m_c, item in enumerate(row):
if isinstance(item, list):
for value in item:
if sum(map(lambda x: x.count(value), values)) == 1:
self.value[r + m_r, c + m_c] = value
self.new_points.put((r + m_r, c + m_c))
logger.debug(f'list val is only one in block: answer={self.value[r + m_r, c +m_c]} at '
f'{(r + m_r, c +m_c)}')
return True
# 同一个九宫格内数字在同一行或同一列处理(同行列隐性排除)
def _check_implicit(self):
for b_r, b_c in self.base_points:
block = self.value[b_r:b_r + 3, b_c:b_c + 3]
# 判断数字1~9在该九宫格的分布情况
_data = block.reshape(1, -1)[0]
for i in range(1, 10):
result = map(lambda x: 0 if not isinstance(x[1], list) else x[0] + 1 if x[1].count(i) else 0,
enumerate(_data))
result = list(filter(lambda x: x > 0, result))
r_count = len(result)
if r_count in [2, 3]:
# 2或3个元素才有可能同一行或同一列
rows = list(map(lambda x: (x - 1) // 3, result))
cols = list(map(lambda x: (x - 1) % 3, result))
if len(set(rows)) == 1:
# 同一行,去掉其他行的数字
result = list(map(lambda x: b_c + (x - 1) % 3, result))
row = b_r + rows[0]
for col in range(0, 9):
if col not in result:
item = self.value[row, col]
if isinstance(item, list):
if item.count(i):
item.remove(i)
# 判断移除后,是否剩下一个元素
if len(item) == 1:
self.new_points.put((row, col))
logger.debug(
f'block compare row: answer={self.value[row, col]} at {(row, col)}')
self.guess_record.append({
'value':eval(f'{self.value.tolist()}'),
'desc':f'九宫格隐性排除row: {i}',
'highlight': (row, col)})
self.value[row, col] = item[0]
return True
elif len(set(cols)) == 1:
# 同一列
result = list(map(lambda x: b_r + (x - 1) // 3, result))
col = b_c + cols[0]
for row in range(0, 9):
if row not in result:
item = self.value[row, col]
if isinstance(item, list):
if item.count(i):
item.remove(i)
# 判断移除后,是否剩下一个元素
if len(item) == 1:
self.new_points.put((row, col))
logger.debug(
f'block compare col: answer={self.value[row, col]} at {(row, col)}')
self.guess_record.append({
'value':eval(f'{self.value.tolist()}'),
'desc':f'九宫格隐性排除col: {i}',
'highlight': (row, col)})
self.value[row, col] = item[0]
return True
# 排除法解题
def sudo_exclude(self):
implicit_exist = True
new_point_exist = True
while implicit_exist:
while new_point_exist:
# 剔除数字
while not self.new_points.empty():
point = self.new_points.get() # 先进先出
self._cut_num(point)
# 检查List里值为单个数字的情况,如有新answer则加入new_points Queue,立即_cut_num
new_point_exist = self._check_one_possbile()
# 检查同行或列的情况
implicit_exist = self._check_implicit()
new_point_exist = True
# 得到有多少个确定的数字
def get_num_count(self):
return sum(map(lambda x: 1 if isinstance(x, int) else 0, self.value.reshape(1, -1)[0]))
# 评分,找到最佳的猜测坐标
def get_best_point(self):
best_score = 0
best_point = (0, 0)
for r, row in enumerate(self.value):
for c, item in enumerate(row):
point_score = self._get_point_score((r, c))
if best_score < point_score:
best_score = point_score
best_point = (r, c)
return best_point
# 计算某坐标的评分
def _get_point_score(self, point):
# 评分标准 (10-候选个数) + 同行确定数字个数 + 同列确定数字个数
r, c = point
item = self.value[r, c]
if isinstance(item, list):
score = 10 - len(item)
score += sum(map(lambda x: 1 if isinstance(x, int) else 0, self.value[r]))
score += sum(map(lambda x: 1 if isinstance(x, int) else 0, self.value[:, c]))
return score
else:
return 0
# 验证有没错误
def verify_value(self):
# 行
r = 0
for row in self.value:
nums = []
lists = []
for item in row:
(lists if isinstance(item, list) else nums).append(item)
if len(set(nums)) != len(nums):
# logger.error(f'verify failed. dup in row {r}')
logger.debug(f'verify failed. dup in row {r}')
self.guess_record.append({
'value':eval(f'{self.value.tolist()}'),
'desc':f'验证错误 in row: {r+1}'
})
return False # 数字要不重复
if len(list(filter(lambda x: len(x) == 0, lists))):
return False # 候选列表不能为空集
r += 1
# 列
for c in range(0, 9):
nums = []
lists = []
col = self.value[:, c]
for item in col:
(lists if isinstance(item, list) else nums).append(item)
if len(set(nums)) != len(nums):
logger.debug(f'verify failed. dup in col {c}')
self.guess_record.append({
'value':eval(f'{self.value.tolist()}'),
'desc':f'验证错误 in col: {c+1}'
})
return False # 数字要不重复
if len(list(filter(lambda x: len(x) == 0, lists))):
return False # 候选列表不能为空集
# 九宫格
for b_r, b_c in self.base_points:
nums = []
lists = []
block = self.value[b_r:b_r + 3, b_c:b_c + 3].reshape(1, -1)[0]
for item in block:
(lists if isinstance(item, list) else nums).append(item)
if len(set(nums)) != len(nums):
logger.debug(f'verify failed. dup in block {b_r, b_c}')
self.guess_record.append({
'value':eval(f'{self.value.tolist()}'),
'desc':f'验证错误 in block: {b_r+1, b_c+1}'
})
return False # 数字要不重复
if len(list(filter(lambda x: len(x) == 0, lists))):
return False # 候选列表不能为空集
return True
def add_to_queue(self, point, index):
record = Record()
record.point = point
record.point_index = index
# recorder.value = self.value.copy() #numpy的copy不行
record.value = copy.deepcopy(self.value)
self.record_queue.put(record)
items = self.value[point]
self.value[point] = items[index]
self.new_points.put(point)
return items
def sudo_solve_iter(self):
# 排除法解题
self.sudo_exclude()
# logger.debug(f'excluded, current result:\n{self.value}')
if self.verify_value():
if self.get_num_count() == 81:
# solve success
self.time_cost = f'{time.time() - self.time_start:.3f}'
self.guess_record.append({
'value':self.value.tolist(),
'desc':f'恭喜你,solved!'
})
return
else:
logger.info(f'current no. of fixed answers: {self.get_num_count()}')
point = self.get_best_point()
index = 0
items = self.add_to_queue(point, index)
logger.info(f'add to LIFO queue and guessing {items[index]}/{items}: '
f'{[x.point for x in self.record_queue.queue]}')
self.guess_times += 1
self.guess_record.append({
'value':self.value.tolist(),
'desc':f'第{self.guess_times}次猜测, 从{items}里选 {items[index]}',
'highlight': point,
'highlight_type': 'assume',
'type': 'assume',
})
return self.sudo_solve_iter()
while True:
if self.record_queue.empty():
# raise Exception('Sudo is wrong, no answer!')
self.time_cost = f'{time.time() - self.time_start:.3f}'
logger.error(f'Guessed {self.guess_times} times. Sudo is wrong, no answer!')
exit()
# check value ERROR, need to try next index or rollback
record = self.record_queue.get()
point = record.point
index = record.point_index + 1
items = record.value[point]
self.value = record.value
logger.info(f'Recall! Pop previous point, {items} @{point}')
# 判断索引是否超出范围
# if not exceed,则再回溯一次
if index < len(items):
items = self.add_to_queue(point, index)
logger.info(f'guessing next index: answer={items[index]}/{items} @{point}')
self.guess_times += 1
self.guess_record.append({
'value':self.value.tolist(),
'desc':f'回溯, 第{self.guess_times}次猜测, 从{items}里选 {items[index]}',
'highlight': point,
'highlight_type': 'assume',
'type': 'assume',
})
return self.sudo_solve_iter()
if __name__ == '__main__':
# 数独题目 http://cn.sudokupuzzle.org/
# data[0]: 号称最难的数独
data = [[8, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 3, 6, 0, 0, 0, 0, 0,
0, 7, 0, 0, 9, 0, 2, 0, 0,
0, 5, 0, 0, 0, 7, 0, 0, 0,
0, 0, 0, 0, 4, 5, 7, 0, 0,
0, 0, 0, 1, 0, 0, 0, 3, 0,
0, 0, 1, 0, 0, 0, 0, 6, 8,
0, 0, 8, 5, 0, 0, 0, 1, 0,
0, 9, 0, 0, 0, 0, 4, 0, 0],
[0, 0, 0, 0, 5, 0, 2, 0, 0,
0, 9, 0, 0, 0, 0, 0, 4, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 4, 0, 6, 0, 8, 0,
0, 0, 7, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 8, 0, 9, 4,
2, 0, 1, 0, 7, 0, 0, 0, 0,
0, 0, 5, 0, 0, 0, 0, 0, 0]]
# try:
t1 = time.time()
puzzle = data[0]
sudo = Sudo(puzzle)
sudo.sudo_solve_iter()
logger.warning(f'Done! guessed {sudo.guess_times} times, in {sudo.time_cost}sec')
logger.warning(f'Puzzle:\n{sudo.puzzle}\nAnswer:\n{sudo.value}')
# except:
# logger.error(f'ERROR: {sudo.value}', exc_info=True)
| true |
c93277bff968a3ad0d5f66d03d54812ead49a4bf
|
Python
|
ajleeson/Self-Driving-Car-Simulation
|
/Algorithm/track.py
|
UTF-8
| 1,811 | 3.46875 | 3 |
[] |
no_license
|
class Track():
"""creates the tracks for all of the cars"""
# makes sure pixel resolution is high
def __init__(self, rows, cols, width, height, timeStep):
self.rows = rows # number of horizontal lanes
self.cols = cols # number of vertical lanes
self.width = width # pixels wides
self.height = height # pixels high
#######################################################
# returns the number of horizontal lanes on the track
def getRows(self):
return self.rows
# returns the number of vertical lanes on the track
def getCols(self):
return self.cols
# returns the width of the track in pixels
def getWidth(self):
return self.width
# returns the height of the track in pixels
def getHeight(self):
return self.height
# returns the number of pixels between each row
def getRowSpacing(self):
rowSpacing = (self.height-self.rows)/(self.rows+1)
return rowSpacing
# returns the number of pixels between each column
def getColSpacing(self):
colSpacing = (self.width-self.cols)/(self.cols+1)
return colSpacing
# returns a list of tuples, with the x and y coordinate of each intersection contained in the tuple
def getIntersections(self):
intersections = []
for i in range(self.rows):
for j in range(self.cols):
# account fot the width of each lane
# determine the coordinate of each intersection
x_intersect = (j+1)*self.getColSpacing() + i
y_intersect = (i+1)*self.getRowSpacing() + j
intersection = [(x_intersect, y_intersect)]
intersections += intersection
return intersections
| true |
5a89d53a45a842faa0f9d05d78b2e45f98841e81
|
Python
|
rizwan2000rm/interview-prep
|
/Python/DS/tuple.py
|
UTF-8
| 381 | 4.4375 | 4 |
[
"MIT"
] |
permissive
|
# Tuples are immutable
print("============ tuples ============")
print()
tuples = (12345, 54321, 'hello!')
print(tuples)
u = tuples, (1, 2, 3, 4, 5)
print(u)
# The statement t = 12345, 54321, 'hello!' is an example of tuple packing:
# the values 12345, 54321 and 'hello!'
# are packed together in a tuple. The reverse operation is also possible
x, y, z = tuples
print(x, y, z)
| true |
83b08e56b1c76fbe0d232cfd74b5e55a6ba091d2
|
Python
|
CashFu/selenium3Fu
|
/seleium3/selenium_lfj/find_element.py
|
UTF-8
| 836 | 2.59375 | 3 |
[] |
no_license
|
#coding=utf-8
from util.read_ini import ReadIni
class FindElement():
def __init__(self,driver):
self.driver = driver
def get_element(self,key):
read_ini = ReadIni()
data_ini = read_ini.get_value(key)
by = data_ini.split('>')[0]
value = data_ini.split('>')[1]
try:
if by =='id':
return self.driver.find_element_by_id(value)
elif by=='name':
return self.driver.find_element_by_name(value)
elif by=='className':
return self.driver.find_element_by_class_name(value)
else:
return self.driver.find_element_by_xpath(value)
except:
self.driver.save_screenshot(r"G:\unittets_lfj\seleium3\Image\%s.png"%value)
return None
| true |
3291ec6e6d7d563367a61be37d23a743077a9ad7
|
Python
|
poweihuang17/practice_leetcode_and_interview
|
/Leetcode/Greedy/757_Set_Intersection_Size_At_Least_Two.py
|
UTF-8
| 1,200 | 3.296875 | 3 |
[] |
no_license
|
class Solution(object):
def intersectionSizeTwo(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
intervals.sort()
filtered_intervals=[]
#print intervals
for interval in intervals:
while filtered_intervals and filtered_intervals[-1][1]>=interval[1]:
filtered_intervals.pop()
filtered_intervals.append(interval)
result=0
p1,p2=float('-inf'),float('-inf')
#print filtered_intervals
for interval in filtered_intervals:
condition1=interval[0]<=p1<=interval[1]
condition2=interval[0]<=p2<=interval[1]
if condition1 and condition2:
continue
elif condition1:
p2=interval[1]
result+=1
elif condition2:
p1=interval[1]
result+=1
else:
p2=interval[1]
p1=p2-1
result+=2
return result
s=Solution()
print s.intersectionSizeTwo([[1, 3], [1, 4], [2, 5], [3, 5]])
print s.intersectionSizeTwo([[1, 2], [2, 3], [2, 4], [4, 5]])
print s.intersectionSizeTwo([[1,24],[10,16],[14,25],[0,18],[16,17]])
| true |
57c24a8a95e689732d67e4281cc5dbcb69a729d3
|
Python
|
git208/AutoFrameRegressionTestF10
|
/common/test_cases_select.py
|
UTF-8
| 3,230 | 2.703125 | 3 |
[] |
no_license
|
import json
import os
from common.yaml_RW import YamlRW
from config.logConfig import LogCustom,logging
from common.parse_excel import ParseExcel
def testCaseSelect(file,file_type='excel',
testcase_matching=None,
sheet_names=None,
isFuzzy=False,
isAll=False):
"""
:param file: 用例所在的路径
:param file_type: 用例所属文件类型,可以是yaml,excel
:param testcase_matching:list 用例匹配关键字列表,isFuzzy为True时,根据列表中关键字模糊匹配用例,isFuzzy为False时,该参数为用例列表
:param isFuzzy: 是否根据关键字列表进行模糊匹配
:param isAll: 为True时执行所有用例
"""
print('开始创建用例执行列表')
ym = YamlRW('../source/testCaseDriver.yaml')
if isAll == False:
if isFuzzy == False:
ym.wightYaml(testcase_matching)
else:
temp_list = []
if file_type.lower() == 'yaml':
for _ in os.listdir('../testCase/yamls'):
__: str
for __ in testcase_matching:
if __ in _:
temp_list.append(_)
elif file_type.lower() == 'excel':
excel = ParseExcel(file)
excel.get_excel_new()
if sheet_names != None:
if type(sheet_names) == list:
for sheet_name in sheet_names:
for _ in excel.excel_data[sheet_name][1].keys:
temp_list.append((sheet_name,_))
else:
for _ in excel.excel_data[sheet_names][1].keys:
temp_list.append((sheet_names, _))
else:
for sheet_name in testcase_matching.keys():
for __ in excel.excel_data[sheet_name][1].keys:
for _ in testcase_matching[sheet_name]:
if __ in _:
temp_list.append((sheet_name, _))
else:
LogCustom().logger().error('文件类型非yaml、excel,创建用例执行列表失败')
ym.wightYaml(temp_list)
else:
if file_type.lower() == 'yaml':
ym.wightYaml(os.listdir('../testCase/yamls'))
elif file_type.lower() == 'excel':
temp_list = []
excel = ParseExcel(file)
excel.get_excel_new()
i = 0
A = excel.excel_data.keys()
for _ in A:
i += 1
j = 0
B = excel.excel_data[_][1].keys()
for __ in B:
j += 1
temp_list.append([_,__])
print(f'总共{len(A)}个接口,当前为第{i}个接口,总共包含{len(B)}条用例,选择至第{j}条用例,数据{(_,__)}')
ym.wightYaml(temp_list)
# if __name__ == '__main__':
# testCaseSelect(FILE,isAll=True)
# with open('../source/testCaseDriver.json', mode='r', encoding='utf-8') as a:
# print(a.read())
| true |
6b916309205853e112e1ce746da8af660b2ea869
|
Python
|
francosbenitez/unsam
|
/04-listas-y-listas/01-debugger/debugger.py
|
UTF-8
| 1,073 | 4.28125 | 4 |
[] |
no_license
|
"""
Ejercicio 4.1: Debugger
Ingresá y corré el siguiente código en tu IDE:
def invertir_lista(lista):
'''Recibe una lista L y la develve invertida.'''
invertida = []
i=len(lista)
while i > 0: # tomo el último elemento
i=i-1
invertida.append (lista.pop(i)) #
return invertida
l = [1, 2, 3, 4, 5]
m = invertir_lista(l)
print(f'Entrada {l}, Salida: {m}')
Deberías observar que la función modifica el valor de la lista de entrada. Eso no debería ocurrir: una función nunca debería modificar los parámetros salvo que sea lo esperado. Usá el debugger y el explorador de variables para determinar cuál es el primer paso clave en el que se modifica el valor de esta variable.
"""
def invertir_lista(lista):
'''Recibe una lista L y la develve invertida.'''
invertida = []
i=len(lista)
while i > 0: # tomo el último elemento
i=i-1
invertida.append(lista[i]) # la función pop quita
return invertida
l = [1, 2, 3, 4, 5]
m = invertir_lista(l)
print(f'Entrada {l}, Salida: {m}')
| true |
1493757adaaa0918e76b211c85051a989bd94c95
|
Python
|
pdekeulenaer/sennai
|
/simulation.py
|
UTF-8
| 1,035 | 3.03125 | 3 |
[] |
no_license
|
import game, population
import random
# config parameters
# Population
n_cars = 10
start = (50,50)
# Brain
layers = 10
neurons = 20
# evolution
mutation_rate = 0.10
parents_to_keep = 0.33
# generate the brains
# brains = []
# for i in range(0, n_cars):
# seed = random.random()
# brains += [population.NeuronBrain(1,1,layers,neurons, seed)]
# print seed
brains = [population.NeuronBrain(5,2,layers,neurons, random.random()) for i in range(0,n_cars)]
cars = [population.CarSpecimen(start[0],start[1], brain=brains[i], name="Car {0}".format(i)) for i in range(0,n_cars)]
# # # nparents to keep
# for b in brains:
# print b.dimension()
# parents = cars[1:int(n_cars * parents_to_keep)]
# parent_brains = [x.brain for x in parents]
# mutation_func = lambda l: population.mutate(l, mutation_rate, 0.5)
# nbrains = population.breed(parent_brains, n_cars, mutation_func, True)
# print nbrains
# print [x in nbrains for x in parent_brains]
# create the application
# print cars
app = game.App(players=cars)
app.on_execute()
| true |
59acb29b1e14e36b1c69230bfc320e122295e66f
|
Python
|
jeremyperthuis/UVSQ_BioInformatique
|
/td2/pgm8.py
|
UTF-8
| 241 | 3.609375 | 4 |
[] |
no_license
|
sq1 = raw_input("inserer une sequence ADN :")
i=0
n=len(sq1)-1
x=0
while i<n :
if sq1[i]==sq1[n] :
x=x+1
i=i+1
if x == (len(sq1)-1)/2 :
print "cette sequence est un palindrome"
else :
print"cette sequence n'est pas un palindrome"
| true |
1b276b69af3d8b7c304ffbfee9d891bb2a5fc6c7
|
Python
|
wadimiusz/hseling-repo-diachrony-webvectors
|
/hseling_lib_diachrony_webvectors/hseling_lib_diachrony_webvectors/algos/global_anchors.py
|
UTF-8
| 2,582 | 3.140625 | 3 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
import gensim
import numpy as np
import copy
from tqdm.auto import tqdm
from utils import log, intersection_align_gensim
from gensim.matutils import unitvec
class GlobalAnchors(object):
def __init__(self, w2v1, w2v2, assume_vocabs_are_identical=False):
if not assume_vocabs_are_identical:
w2v1, w2v2 = intersection_align_gensim(copy.copy(w2v1),
copy.copy(w2v2))
self.w2v1 = w2v1
self.w2v2 = w2v2
def __repr__(self):
return "GlobalAnchors"
def get_global_anchors(self, word: str, w2v: gensim.models.KeyedVectors):
"""
This takes in a word and a KeyedVectors model and returns a vector of cosine distances
between this word and each word in the vocab.
:param word:
:param w2v:
:return: np.array of distances shaped (len(w2v.vocab),)
"""
word_vector = w2v.get_vector(word)
similarities = gensim.models.KeyedVectors.cosine_similarities(word_vector, w2v.vectors)
return unitvec(similarities)
def get_score(self, word: str):
w2v1_anchors = self.get_global_anchors(word, self.w2v1)
w2v2_anchors = self.get_global_anchors(word, self.w2v2)
score = np.dot(w2v1_anchors, w2v2_anchors)
return score
def get_changes(self, top_n_changed_words: int):
"""
This method uses approach described in
Yin, Zi, Vin Sachidananda, and Balaji Prabhakar.
"The global anchor method for quantifying linguistic shifts and
domain adaptation." Advances in Neural Information Processing Systems. 2018.
It can be described as follows. To evaluate how much the meaning of a given word differs in
two given corpora, we take cosine distance from the given word to all words
in the vocabulary; those values make up a vector with as many components as there are words
in the vocab. We do it for both corpora and then compute the cosine distance
between those two vectors
:param top_n_changed_words: we will output n words that differ the most in the given corpora
:return: list of pairs (word, score), where score indicates how much a word has changed
"""
log('Doing global anchors')
result = list()
for word in tqdm(self.w2v1.wv.vocab.keys()):
score = self.get_score(word)
result.append((word, score))
result = sorted(result, key=lambda x: x[1])
result = result[:top_n_changed_words]
log('\nDone')
return result
| true |
7bde88600f52f45f9e8b1f99707aa6a01e719b72
|
Python
|
PAVANANUTHALAPATI/python-
|
/range.py
|
UTF-8
| 88 | 3.296875 | 3 |
[] |
no_license
|
pav=int(raw_input())
if pav in range (1,10):
print("yes")
else:
print("no")
| true |
97b0a68ee463f34a5ef2d2d429dad41b49121f51
|
Python
|
GPUOpen-Drivers/llpc
|
/script/gc-amdvlk-docker-images.py
|
UTF-8
| 4,235 | 2.8125 | 3 |
[
"MIT",
"Apache-2.0",
"NCSA"
] |
permissive
|
#! /usr/bin/env python3
"""Script to garbage collect old amdvlk docker images created by the public CI on GitHub.
Requires python 3.8 or later.
"""
import argparse
import json
import logging
import subprocess
import sys
from collections import defaultdict
from typing import Any, Dict, List, Optional, Tuple
def _run_cmd(cmd: List[str]) -> Tuple[bool, str]:
"""
Runs a shell command capturing its output.
Args:
cmd: List of strings to invoke as a subprocess
Returns:
Tuple: success, stdout. The first value is True on success.
"""
logging.info('Running command: %s', ' '.join(cmd))
result = subprocess.run(cmd, capture_output=True, check=False, text=True)
if result.returncode != 0:
logging.info('%s', result.stderr)
return False, ''
return True, result.stdout
def query_images(artifact_repository_url: str) -> Optional[List[Dict[str, Any]]]:
"""
Returns a list of JSON objects representing docker images found under
|artifact_repository_url|, or None on error.
Sample JSON object:
{
"createTime": "2022-07-11T20:20:23.577823Z",
"package": "us-docker.pkg.dev/stadia-open-source/amdvlk-public-ci/amdvlk_release_gcc_assertions",
"tags": "",
"updateTime": "2022-07-11T20:20:23.577823Z",
"version": "sha256:e101b6336fa78014e4008df59667dd84616dc8d1b60c2240f3246ab9a1ed6b20"
}
"""
ok, text = _run_cmd(['gcloud', 'artifacts', 'docker', 'images', 'list',
artifact_repository_url, '--format=json', '--quiet'])
if not ok:
return None
return list(json.loads(text))
def find_images_to_gc(images: List[Dict[str, Any]], num_last_to_keep) -> List[Dict[str, Any]]:
"""
Returns a subset of |images| that should be garbage collected. Preserves tagged
images and also the most recent |num_last_to_keep| for each package.
"""
package_to_images = defaultdict(list)
for image in images:
package_to_images[image['package']].append(image)
to_gc = []
for _, images in package_to_images.items():
# Because the time format is ISO 8601, the lexicographic order is also chronological.
images.sort(key=lambda x: x['createTime'])
for image in images[:-num_last_to_keep]:
if not image['tags']:
to_gc.append(image)
return to_gc
def delete_images(images: List[Dict[str, Any]], dry_run: bool) -> None:
"""
Deletes all |images| from the repository. When |dry_run| is True, synthesizes the delete
commands and logs but does not execute them.
"""
for image in images:
image_path = image['package'] + '@' + image['version']
cmd = ['gcloud', 'artifacts', 'docker', 'images', 'delete',
image_path, '--quiet']
if dry_run:
logging.info('Dry run: %s', ' '.join(cmd))
continue
ok, _ = _run_cmd(cmd)
if not ok:
logging.warning('Failed to delete image:\n%s', image)
def main() -> int:
logging.basicConfig(
format='%(levelname)s %(asctime)s %(filename)s:%(lineno)d %(message)s',
level=logging.INFO)
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help='Do not delete or modify any docker images (default: %(default)s).')
parser.add_argument(
'--keep-last',
default=8,
help='The number of the most recent images to keep for each package (default: %(default)s).')
parser.add_argument(
'--repository-url',
default='us-docker.pkg.dev/stadia-open-source/amdvlk-public-ci',
help='The repository with docker images to garbage collect (default: %(default)s).'
)
args = parser.parse_args()
all_images = query_images(args.repository_url)
if all_images is None:
logging.error('Failed to list docker images under \'%s\'', args.repository_url)
return 1
to_gc = find_images_to_gc(all_images, args.keep_last)
delete_images(to_gc, args.dry_run)
return 0
if __name__ == '__main__':
sys.exit(main())
| true |
d710863243bb183e1be2960d5b8fc0b1602a7756
|
Python
|
Dhirajpatel121/IPL-Predictive-Analytics
|
/IPL/MIvsCSK.py
|
UTF-8
| 7,233 | 2.8125 | 3 |
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings; warnings.simplefilter('ignore')
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
deliveries = pd.read_csv('C:/Users/SONY/Desktop/IPL/deliveries.csv')
deliveries
# In[3]:
matches = pd.read_csv('C:/Users/SONY/Desktop/IPL/matches.csv')
matches
# In[4]:
#### Records of MI vs CSK (matches dataset)
micsk =matches[np.logical_or(np.logical_and(matches['team1']=='Mumbai Indians',matches['team2']=='Chennai Super Kings'),np.logical_and(matches['team2']=='Mumbai Indians',matches['team1']=='Chennai Super Kings'))]
micsk
# In[5]:
# Head to head MI vs CSK across all season's
sns.set(style='dark')
fig=plt.gcf()
fig.set_size_inches(8,5.2)
sns.countplot(micsk['winner'],order=micsk['winner'].value_counts().index)
plt.text(-0.1,5,str(micsk['winner'].value_counts()['Mumbai Indians']),size=29,color='black')
plt.text(0.9,3,str(micsk['winner'].value_counts()['Chennai Super Kings']),size=29,color='black')
plt.xlabel('Winner',fontsize=15)
plt.ylabel('Count',fontsize=15)
plt.yticks(fontsize=0)
plt.title('MI vs CSK - head to head')
plt.show()
# In[6]:
#H2H previous 2 season's
df_season_record =micsk[micsk['season'] >=2018]
df_season_record_df = df_season_record[['season','winner','id']]
df_season_record_df
# In[7]:
##### Head to head mi vs csk last 2 season's
sns.set(style='dark')
fig=plt.gcf()
fig.set_size_inches(10,8)
sns.countplot(df_season_record_df['winner'],order=df_season_record_df['winner'].value_counts().index)
plt.text(-0.1,2.5,str(df_season_record_df['winner'].value_counts()['Mumbai Indians']),size=29,color='black')
plt.text(0.95,0.5,str(df_season_record_df['winner'].value_counts()['Chennai Super Kings']),size=29,color='black')
plt.xlabel('Winner',fontsize=15)
plt.ylabel('Count',fontsize=15)
plt.yticks(fontsize=0)
plt.title('MI vs CSK - head to head last 2 season')
plt.show()
# # Looking at previous 2 season's record(recent form) & overall season's record in head to head,MI have dominated CSK.
#
# # Hence according to the recent form and analysis MI will win today's match.
# In[8]:
#For deliveries dataset
cskmi=deliveries[np.logical_or(np.logical_and(deliveries['batting_team'] == 'Mumbai Indians',deliveries['bowling_team']== 'Chennai Super Kings'),np.logical_and(deliveries['bowling_team']=='Mumbai Indians',deliveries['batting_team']=='Chennai Super Kings'))]
cskmi
# In[9]:
# Previous 2 season's records of deliveries dataset (filtered with the help of match_id)
cskmi_2season = cskmi[cskmi['match_id'] >= 7894]
cskmi_2season
# # De kock
# In[10]:
## Records of Q de kock against CSK in first 10 balls (played only 1 season with MI & 4 matches against CSK)
def dekock(matchid):
df_dekock = cskmi_2season[cskmi_2season.batsman == 'Q de Kock']
x = df_dekock[df_dekock['match_id'] == matchid]
y = x[['match_id','batsman_runs']].head(10)
z = y[y.batsman_runs >= 4 ]
print(z['batsman_runs'].sum())
# In[11]:
# 1st match against csk in 2019;total boundary runs=4
dekock(11151)
# In[12]:
# 2nd match against csk in 2019;total boundary runs=10
dekock(11335)
# In[13]:
# 3rd match against csk in 2019;total boundary runs=8
dekock(11412)
# ## Looking at last 3 matches of qdk against csk in twice of those matches he has scored less than 10 runs in boundaries in first 10 balls.
# ## Hence, according to analysis & prediction today qdk will score total runs of boundaries in range less than 10 runs in first 10 balls.
# # Dot balls ratio
# In[15]:
# dot balls
def dotballs(bowler_name):
df_bowler = cskmi_2season[cskmi_2season.bowler == bowler_name]
total_dot = df_bowler[df_bowler['total_runs'] == 0]
dots_bowler = total_dot['total_runs'].count()
total_balls=df_bowler['ball'].count() - df_bowler[df_bowler.wide_runs >= 1].count() - df_bowler[df_bowler.noball_runs >= 1].count()
total_balls_df = total_balls['ball']
print((dots_bowler/total_balls_df).round(3)*100)
# In[16]:
# Chahar dot balls ratio
print("Rahul Chahar dot balls in % :")
dotballs('RD Chahar')
# In[17]:
# bumrah dot balls ratio
print("Jasprit Bumrah dot balls in % :")
dotballs('JJ Bumrah')
# In[18]:
# hardik dotball ratio
print("Hardik pandya dot balls in % :")
dotballs('HH Pandya')
# In[19]:
# krunal dot ball ratio
print("Krunal Pandya dot balls in % :")
dotballs('KH Pandya')
# In[20]:
## For boult dot ball ratio
csk = deliveries[deliveries.batting_team == 'Chennai Super Kings']
boult_against_csk = csk[csk.bowler == 'TA Boult']
dotballs = boult_against_csk[boult_against_csk['total_runs'] == 0].count()
final_dotballs = dotballs['total_runs']
total_balls = boult_against_csk['ball'].count() - boult_against_csk[boult_against_csk.wide_runs >= 1].count() - boult_against_csk[boult_against_csk.noball_runs >= 1].count()
dfx = (final_dotballs/total_balls)*100
print("Boult dot balls ratio against csk in % =",dfx['ball'].round())
# ## Dot balls ratio of two highest performers have been rahul chahar and krunal.
# ## However in current season chahar has bowled more dot balls and has better economy than krunal
# ## Hence,according to current form and analysis Rahul chahar will have highest dot ball ratio among Mumbai Bowlers.
# # BLS
# In[21]:
## BLS
def BLS(bowlername):
record = cskmi_2season[cskmi_2season.bowler == bowlername]
record_wickets = record['dismissal_kind'].count()
avg_wickets = record_wickets/cskmi_2season['match_id'].nunique()
total_dot = record[record['total_runs'] == 0]
avg_dots_bowler = total_dot['total_runs'].count()/cskmi_2season['match_id'].nunique()
total_balls= record['ball'].count() - record[record.wide_runs >= 1].count() - record[record.noball_runs >= 1].count()
total_balls_df = total_balls['ball'] /cskmi_2season['match_id'].nunique()
total_boundaries = record[record.batsman_runs >= 4]
total_boundaries_final =total_boundaries['batsman_runs'].count()
total_boundaries_runs = total_boundaries['batsman_runs'].sum()
final = (avg_wickets + avg_dots_bowler - (total_boundaries_runs/total_boundaries_final))/total_balls_df
print('BLS score =' ,final.round(3))
# In[22]:
print("1. Bumrah")
BLS('JJ Bumrah')
print("2. Rahul chahar")
BLS('RD Chahar')
print("3. Krunal pandya")
BLS('KH Pandya')
print("4. Tahir")
BLS('Imran Tahir')
print("5. Deepak chahar")
BLS('DL Chahar')
print("6. SN Thakur")
BLS('SN Thakur')
print("7. HH Pandya")
BLS('HH Pandya')
print("RA Jadeja")
BLS('RA Jadeja')
# ## The BLS score has been highest for deepak chahar.
# ## Hence, according to analysis deepak chahar will have highest BLS score.
# In[27]:
## Looking for last 3 matches 4 & 6 in same over
def match(matchid):
df = cskmi_2season[cskmi_2season.match_id == matchid]
dfx = df[df.batsman_runs >= 4]
dataframe = pd.DataFrame(dfx.groupby(['match_id','over','ball','inning']).sum()['batsman_runs'])
print(dataframe)
# In[24]:
match(11335)
# In[25]:
match(11412)
# In[26]:
match(11415)
# ## Looking at last 3 matches record & recent season record(2020) an average of 5-6 overs have happened at sharjah where 4 & 6 have been scored in same over
# ## Hence according to analysis 5-6 overs is the answer.
| true |
3541096c6c8edd5bcc12e74e32dadbffe14fcc02
|
Python
|
helsinkithinkcompany/wide
|
/FennicaTrends/serious-spin-master/data/python/countRelativeWeights.py
|
UTF-8
| 1,828 | 2.890625 | 3 |
[
"MIT"
] |
permissive
|
import json, sys
from math import pow
# FILE HANDLING #
def writeJsonToFile(json_data, file_path):
try:
with open(file_path, 'w') as outfile:
json.dump(json_data, outfile)
return True
except Exception as e:
print(e)
print('Failed to dump json to file ' + file_path)
return False
def getJsonFromFile(file_path):
try:
with open(file_path) as infile:
json_data = json.load(infile)
return json_data
except Exception as e:
print(e)
print('Failed to get json from file ' + file_path)
return False
if len(sys.argv) < 2:
print("Usage: %s fennica-all.json"%sys.argv[0])
sys.exit()
fennica_all = getJsonFromFile(sys.argv[1])
PATH_TO_FENNICA_ALL_JSON_FILE = './fennica-graph.json'
# DATA HANDLING #
def countMagicValue(this, mean, max):
if int(this) - int(mean) == 0:
return 50
elif int(this) < int(mean):
diff = 1 + (int(mean) - int(this)) / mean
return int(50 - 50 * (1 - 1 / diff))
elif int(this) > int(mean):
diff = 1 + (int(this) - int(mean))/ (max - mean)
return int(50 + 50 * (1 - 1 / diff))
else:
return 50
def getMeanAndMaxOfYear(json_data, year):
sum = 0
count = 0
max = 0
for word in json_data[year]:
count = count + 1
sum = sum + json_data[year][word]
if max < json_data[year][word]:
max = json_data[year][word]
return float(sum)/float(count), float(max)
def changeWordWeightsToRelativeOfMeanByYear(json_data, year):
mean, max = getMeanAndMaxOfYear(json_data, year)
for word in json_data[year]:
json_data[year][word] = countMagicValue(float(json_data[year][word]), mean, max)
def changeWordWeightsToRelative(json_data):
for year in json_data:
changeWordWeightsToRelativeOfMeanByYear(json_data, year)
return json_data
fennica_all_relative = changeWordWeightsToRelative(fennica_all)
writeJsonToFile(fennica_all_relative, 'fennica-graph.json')
| true |
66f5a6d7bef3707c974e3210da2db94f6e393a4a
|
Python
|
schuCS50/CS33a
|
/finalproject/games/cards/models.py
|
UTF-8
| 4,162 | 2.703125 | 3 |
[] |
no_license
|
from django.contrib.auth.models import AbstractUser
from django.db import models
# Extended user class
class User(AbstractUser):
def __str__(self):
return f"User {self.id}: {self.username}"
# Two Player Game extendable
class TwoPlayerGame(models.Model):
player1 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="p1_games")
player2 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="p2_games")
createdTimestamp = models.DateTimeField(auto_now_add=True)
updatedTimestamp = models.DateTimeField(auto_now=True)
winner = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="won_games",
blank=True,
null=True)
class Meta:
abstract = True
#TicTacToe Game
class TicTacToe(TwoPlayerGame):
cell1 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell1",
blank=True,
null=True)
cell2 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell2",
blank=True,
null=True)
cell3 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell3",
blank=True,
null=True)
cell4 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell4",
blank=True,
null=True)
cell5 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell5",
blank=True,
null=True)
cell6 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell6",
blank=True,
null=True)
cell7 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell7",
blank=True,
null=True)
cell8 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell8",
blank=True,
null=True)
cell9 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell9",
blank=True,
null=True)
# Function to return formatted output
def serialize(self):
return {
"player1": self.player1.username,
"player2": self.player2.username,
"createdTimestamp": self.createdTimestamp.strftime(
"%b %d %Y, %I:%M %p"),
"updatedTimestamp": self.updatedTimestamp.strftime(
"%b %d %Y, %I:%M %p"),
"winner": self.winner.username if self.winner else None,
"cells": [self.cell1.username if self.cell1 else None,
self.cell2.username if self.cell2 else None,
self.cell3.username if self.cell3 else None,
self.cell4.username if self.cell4 else None,
self.cell5.username if self.cell5 else None,
self.cell6.username if self.cell6 else None,
self.cell7.username if self.cell7 else None,
self.cell8.username if self.cell8 else None,
self.cell9.username if self.cell9 else None]
}
| true |
20ed11ef0f52d20c8f5abfc8c2e88cd5aa19a6d4
|
Python
|
alaalial/relancer-artifact
|
/relancer-exp/original_notebooks/pavansubhasht_ibm-hr-analytics-attrition-dataset/imbalanceddata-predictivemodelling-by-ibm-dataset.py
|
UTF-8
| 19,857 | 3.390625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
# coding: utf-8
# # IBM HR Employee Attrition & Performance.
# ## [Please star/upvote in case you find it helpful.]
# In[ ]:
from IPython.display import Image
Image("../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/imagesibm/image-logo.png")
# ## CONTENTS ::->
# [ **1 ) Exploratory Data Analysis**](#content1)
# [ **2) Corelation b/w Features**](#content2)
# [** 3) Feature Selection**](#content3)
# [** 4) Preparing Dataset**](#content4)
# [ **5) Modelling**](#content5)
#
# Note that this notebook uses traditional ML algorithms. I have another notebook in which I have used an ANN on the same dataset. To check it out please follow the below link-->
#
# https://www.kaggle.com/rajmehra03/an-introduction-to-ann-keras-with-ibm-hr-dataset/
# [ **6) Conclusions**](#content6)
# <a id="content1"></a>
# ## 1 ) Exploratory Data Analysis
# ## 1.1 ) Importing Various Modules
# In[ ]:
# Ignore the warnings
import warnings
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
# data visualisation and manipulation
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
import missingno as msno
#configure
# sets matplotlib to inline and displays graphs below the corressponding cell.
style.use('fivethirtyeight')
sns.set(style='whitegrid',color_codes=True)
#import the necessary modelling algos.
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
#model selection
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score,precision_score,recall_score,confusion_matrix,roc_curve,roc_auc_score
from sklearn.model_selection import GridSearchCV
from imblearn.over_sampling import SMOTE
#preprocess.
from sklearn.preprocessing import MinMaxScaler,StandardScaler,Imputer,LabelEncoder,OneHotEncoder
# ## 1.2 ) Reading the data from a CSV file
# In[ ]:
df=pd.read_csv(r"../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv")
# In[ ]:
df.head()
# In[ ]:
df.shape
# In[ ]:
df.columns
# ## 1.3 ) Missing Values Treatment
# In[ ]:
df.info() # no null or 'Nan' values.
# In[ ]:
df.isnull().sum()
# In[ ]:
msno.matrix(df) # just to visualize. one final time.
# ## 1.4 ) The Features and the 'Target'
# In[ ]:
df.columns
# In[ ]:
df.head()
# In all we have 34 features consisting of both the categorical as well as the numerical features. The target variable is the
# 'Attrition' of the employee which can be either a Yes or a No. This is what we have to predict.
# **Hence this is a Binary Classification problem. **
# ## 1.5 ) Univariate Analysis
# In this section I have done the univariate analysis i.e. I have analysed the range or distribution of the values that various features take. To better analyze the results I have plotted various graphs and visualizations wherever necessary. Univariate analysis helps us identify the outliers in the data.
# In[ ]:
df.describe()
# Let us first analyze the various numeric features. To do this we can actually plot a boxplot showing all the numeric features. Also the distplot or a histogram is a reasonable choice in such cases.
# In[ ]:
sns.factorplot(data=df,kind='box',size=10,aspect=3)
# Note that all the features have pretty different scales and so plotting a boxplot is not a good idea. Instead what we can do is plot histograms of various continuously distributed features.
#
# We can also plot a kdeplot showing the distribution of the feature. Below I have plotted a kdeplot for the 'Age' feature.
# Similarly we plot for other numeric features also. Similarly we can also use a distplot from seaborn library which combines most..
# In[ ]:
sns.kdeplot(df['Age'],shade=True,color='#ff4125')
# In[ ]:
sns.distplot(df['Age'])
# Similarly we can do this for all the numerical features. Below I have plotted the subplots for the other features.
# In[ ]:
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
fig,ax = plt.subplots(5,2, figsize=(9,9))
sns.distplot(df['TotalWorkingYears'], ax = ax[0,0])
sns.distplot(df['MonthlyIncome'], ax = ax[0,1])
sns.distplot(df['YearsAtCompany'], ax = ax[1,0])
sns.distplot(df['DistanceFromHome'], ax = ax[1,1])
sns.distplot(df['YearsInCurrentRole'], ax = ax[2,0])
sns.distplot(df['YearsWithCurrManager'], ax = ax[2,1])
sns.distplot(df['YearsSinceLastPromotion'], ax = ax[3,0])
sns.distplot(df['PercentSalaryHike'], ax = ax[3,1])
sns.distplot(df['YearsSinceLastPromotion'], ax = ax[4,0])
sns.distplot(df['TrainingTimesLastYear'], ax = ax[4,1])
plt.tight_layout()
print()
# Let us now analyze the various categorical features. Note that in these cases the best way is to use a count plot to show the relative count of observations of different categories.
# In[ ]:
cat_df=df.select_dtypes(include='object')
# In[ ]:
cat_df.columns
# In[ ]:
def plot_cat(attr,labels=None):
if(attr=='JobRole'):
sns.factorplot(data=df,kind='count',size=5,aspect=3,x=attr)
return
sns.factorplot(data=df,kind='count',size=5,aspect=1.5,x=attr)
# I have made a function that accepts the name of a string. In our case this string will be the name of the column or attribute which we want to analyze. The function then plots the countplot for that feature which makes it easier to visualize.
# In[ ]:
plot_cat('Attrition')
# **Note that the number of observations belonging to the 'No' category is way greater than that belonging to 'Yes' category. Hence we have skewed classes and this is a typical example of the 'Imbalanced Classification Problem'. To handle such types of problems we need to use the over-sampling or under-sampling techniques. I shall come back to this point later.**
# **Let us now similalry analyze other categorical features.**
# In[ ]:
plot_cat('BusinessTravel')
# The above plot clearly shows that most of the people belong to the 'Travel_Rarely' class. This indicates that most of the people did not have a job which asked them for frequent travelling.
# In[ ]:
plot_cat('OverTime')
# In[ ]:
plot_cat('Department')
# In[ ]:
plot_cat('EducationField')
# In[ ]:
plot_cat('Gender')
# Note that males are present in higher number.
# In[ ]:
plot_cat('JobRole')
# ** Similarly we can continue for other categorical features. **
# **Note that the same function can also be used to better analyze the numeric discrete features like 'Education','JobSatisfaction' etc...
# In[ ]:
# just uncomment the following cell.
# In[ ]:
# num_disc=['Education','EnvironmentSatisfaction','JobInvolvement','JobSatisfaction','WorkLifeBalance','RelationshipSatisfaction','PerformanceRating']
# for i in num_disc:
# plot_cat(i)
# similarly we can intrepret these graphs.
# <a id="content2"></a>
# ## 2 ) Corelation b/w Features
#
# In[ ]:
#corelation matrix.
cor_mat= df.corr()
mask = np.array(cor_mat)
mask[np.tril_indices_from(mask)] = False
fig=plt.gcf()
fig.set_size_inches(30,12)
# ###### SOME INFERENCES FROM THE ABOVE HEATMAP
#
# 1. Self relation ie of a feature to itself is equal to 1 as expected.
#
# 2. JobLevel is highly related to Age as expected as aged employees will generally tend to occupy higher positions in the company.
#
# 3. MonthlyIncome is very strongly related to joblevel as expected as senior employees will definately earn more.
#
# 4. PerformanceRating is highly related to PercentSalaryHike which is quite obvious.
#
# 5. Also note that TotalWorkingYears is highly related to JobLevel which is expected as senior employees must have worked for a larger span of time.
#
# 6. YearsWithCurrManager is highly related to YearsAtCompany.
#
# 7. YearsAtCompany is related to YearsInCurrentRole.
#
#
# **Note that we can drop some highly corelated features as they add redundancy to the model but since the corelation is very less in genral let us keep all the features for now. In case of highly corelated features we can use something like Principal Component Analysis(PCA) to reduce our feature space.**
# In[ ]:
df.columns
# <a id="content3"></a>
# ## 3 ) Feature Selection
#
# ## 3.1 ) Plotting the Features against the 'Target' variable.
# #### 3.1.1 ) Age
# Note that Age is a continuous quantity and therefore we can plot it against the Attrition using a boxplot.
# In[ ]:
sns.factorplot(data=df,y='Age',x='Attrition',size=5,aspect=1,kind='box')
# Note that the median as well the maximum age of the peole with 'No' attrition is higher than that of the 'Yes' category. This shows that people with higher age have lesser tendency to leave the organisation which makes sense as they may have settled in the organisation.
# #### 3.1.2 ) Department
# Note that both Attrition(Target) as well as the Deaprtment are categorical. In such cases a cross-tabulation is the most reasonable way to analyze the trends; which shows clearly the number of observaftions for each class which makes it easier to analyze the results.
# In[ ]:
df.Department.value_counts()
# In[ ]:
sns.factorplot(data=df,kind='count',x='Attrition',col='Department')
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.Department],margins=True,normalize='index') # set normalize=index to view rowwise %.
# Note that most of the observations corresspond to 'No' as we saw previously also. About 81 % of the people in HR dont want to leave the organisation and only 19 % want to leave. Similar conclusions can be drawn for other departments too from the above cross-tabulation.
# #### 3.1.3 ) Gender
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.Gender],margins=True,normalize='index') # set normalize=index to view rowwise %.
# About 85 % of females want to stay in the organisation while only 15 % want to leave the organisation. All in all 83 % of employees want to be in the organisation with only being 16% wanting to leave the organisation or the company.
# #### 3.1.4 ) Job Level
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.JobLevel],margins=True,normalize='index') # set normalize=index to view rowwise %.
# People in Joblevel 4 have a very high percent for a 'No' and a low percent for a 'Yes'. Similar inferences can be made for other job levels.
# #### 3.1.5 ) Monthly Income
# In[ ]:
sns.factorplot(data=df,kind='bar',x='Attrition',y='MonthlyIncome')
# Note that the average income for 'No' class is quite higher and it is obvious as those earning well will certainly not be willing to exit the organisation. Similarly those employees who are probably not earning well will certainly want to change the company.
# #### 3.1.6 ) Job Satisfaction
# In[ ]:
sns.factorplot(data=df,kind='count',x='Attrition',col='JobSatisfaction')
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.JobSatisfaction],margins=True,normalize='index') # set normalize=index to view rowwise %.
# Note this shows an interesting trend. Note that for higher values of job satisfaction( ie more a person is satisfied with his job) lesser percent of them say a 'Yes' which is quite obvious as highly contented workers will obvioulsy not like to leave the organisation.
# #### 3.1.7 ) Environment Satisfaction
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.EnvironmentSatisfaction],margins=True,normalize='index') # set normalize=index to view rowwise %.
# Again we can notice that the relative percent of 'No' in people with higher grade of environment satisfacftion which is expected.
# #### 3.1.8 ) Job Involvement
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.JobInvolvement],margins=True,normalize='index') # set normalize=index to view rowwise %.
# #### 3.1.9 ) Work Life Balance
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.WorkLifeBalance],margins=True,normalize='index') # set normalize=index to view rowwise %.
# Again we notice a similar trend as people with better work life balance dont want to leave the organisation.
# #### 3.1.10 ) RelationshipSatisfaction
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.RelationshipSatisfaction],margins=True,normalize='index') # set normalize=index to view rowwise %.
# ###### Notice that I have plotted just some of the important features against out 'Target' variable i.e. Attrition in our case. Similarly we can plot other features against the 'Target' variable and analye the trends i.e. how the feature effects the 'Target' variable.
# ## 3.2 ) Feature Selection
# The feature Selection is one of the main steps of the preprocessing phase as the features which we choose directly effects the model performance. While some of the features seem to be less useful in terms of the context; others seem to equally useful. The better features we use the better our model will perform. **After all Garbage in Garbage out;)**.
#
# We can also use the Recusrive Feature Elimination technique (a wrapper method) to choose the desired number of most important features.
# The Recursive Feature Elimination (or RFE) works by recursively removing attributes and building a model on those attributes that remain.
#
# It uses the model accuracy to identify which attributes (and/or combination of attributes) contribute the most to predicting the target attribute.
#
# We can use it directly from the scikit library by importing the RFE module or function provided by the scikit. But note that since it tries different combinations or the subset of features;it is quite computationally expensive and I shall ignore it here.
# In[ ]:
df.drop(['BusinessTravel','DailyRate','EmployeeCount','EmployeeNumber','HourlyRate','MonthlyRate','NumCompaniesWorked','Over18','StandardHours', 'StockOptionLevel','TrainingTimesLastYear'],axis=1,inplace=True)
#
# <a id="content4"></a>
# ## 4 ) Preparing Dataset
#
# ## 4.1 ) Feature Encoding
# I have used the Label Encoder from the scikit library to encode all the categorical features.
# In[ ]:
def transform(feature):
le=LabelEncoder()
df[feature]=le.fit_transform(df[feature])
print(le.classes_)
# In[ ]:
cat_df=df.select_dtypes(include='object')
cat_df.columns
# In[ ]:
for col in cat_df.columns:
transform(col)
# In[ ]:
df.head() # just to verify.
# ## 4.2 ) Feature Scaling.
# The scikit library provides various types of scalers including MinMax Scaler and the StandardScaler. Below I have used the StandardScaler to scale the data.
# In[ ]:
scaler=StandardScaler()
scaled_df=scaler.fit_transform(df.drop('Attrition',axis=1))
X=scaled_df
Y=df['Attrition'].as_matrix()
# ## 4.3 ) Splitting the data into training and validation sets
# In[ ]:
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.25,random_state=42)
# <a id="content5"></a>
# ## 5 ) Modelling
#
# ## 5.1 ) Handling the Imbalanced dataset
# Note that we have a imbalanced dataset with majority of observations being of one type ('NO') in our case. In this dataset for example we have about 84 % of observations having 'No' and only 16 % of 'Yes' and hence this is an imbalanced dataset.
#
# To deal with such a imbalanced dataset we have to take certain measures, otherwise the performance of our model can be significantly affected. In this section I have discussed two approaches to curb such datasets.
# ## 5.1.1 ) Oversampling the Minority or Undersampling the Majority Class
#
#
# In an imbalanced dataset the main problem is that the data is highly skewed ie the number of observations of certain class is more than that of the other. Therefore what we do in this approach is to either increase the number of observations corressponding to the minority class (oversampling) or decrease the number of observations for the majority class (undersampling).
#
# Note that in our case the number of observations is already pretty low and so oversampling will be more appropriate.
#
# Below I have used an oversampling technique known as the SMOTE(Synthetic Minority Oversampling Technique) which randomly creates some 'Synthetic' instances of the minority class so that the net observations of both the class get balanced out.
#
# One thing more to take of is to use the SMOTE before the cross validation step; just to ensure that our model does not overfit the data; just as in the case of feature selection.
# In[ ]:
oversampler=SMOTE(random_state=42)
x_train_smote, y_train_smote = oversampler.fit_sample(x_train,y_train)
# ## 5.1.2 ) Using the Right Evaluation Metric
# Another important point while dealing with the imbalanced classes is the choice of right evaluation metrics.
#
# Note that accuracy is not a good choice. This is because since the data is skewed even an algorithm classifying the target as that belonging to the majority class at all times will achieve a very high accuracy.
# For eg if we have 20 observations of one type 980 of another ; a classifier predicting the majority class at all times will also attain a accuracy of 98 % but doesnt convey any useful information.
#
# Hence in these type of cases we may use other metrics such as -->
#
#
# **'Precision'**-- (true positives)/(true positives+false positives)
#
# **'Recall'**-- (true positives)/(true positives+false negatives)
#
# **'F1 Score'**-- The harmonic mean of 'precision' and 'recall'
#
# '**AUC ROC'**-- ROC curve is a plot between 'senstivity' (Recall) and '1-specificity' (Specificity=Precision)
#
# **'Confusion Matrix'**-- Plot the entire confusion matrix
# ## 5.2 ) Building A Model & Making Predictions
# In this section I have used different models from the scikit library and trained them on the previously oversampled data and then used them for the prediction purposes.
# In[ ]:
def compare(model):
clf=model
clf.fit(x_train_smote,y_train_smote)
pred=clf.predict(x_test)
# Calculating various metrics
acc.append(accuracy_score(pred,y_test))
prec.append(precision_score(pred,y_test))
rec.append(recall_score(pred,y_test))
auroc.append(roc_auc_score(pred,y_test))
# In[ ]:
acc=[]
prec=[]
rec=[]
auroc=[]
models=[SVC(kernel='rbf'),RandomForestClassifier(),GradientBoostingClassifier()]
model_names=['rbfSVM','RandomForestClassifier','GradientBoostingClassifier']
for model in range(len(models)):
compare(models[model])
d={'Modelling Algo':model_names,'Accuracy':acc,'Precision':prec,'Recall':rec,'Area Under ROC Curve':auroc}
met_df=pd.DataFrame(d)
met_df
# ## 5.3 ) Comparing Different Models
# In[ ]:
def comp_models(met_df,metric):
sns.factorplot(data=met_df,x=metric,y='Modelling Algo',size=5,aspect=1.5,kind='bar')
sns.factorplot(data=met_df,y=metric,x='Modelling Algo',size=7,aspect=2,kind='point')
# In[ ]:
comp_models(met_df,'Accuracy')
# In[ ]:
comp_models(met_df,'Precision')
# In[ ]:
comp_models(met_df,'Recall')
# In[ ]:
comp_models(met_df,'Area Under ROC Curve')
# The above data frame and the visualizations summarize the resuts after training different models on the given dataset.
# <a id="content6"></a>
# ## 6) Conclusions
#
# ###### Hence we have completed the analysis of the data and also made predictions using the various ML models.
# In[ ]:
# In[ ]:
Image("../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/imagesibm/image-hr.jpg")
# In[ ]:
# # THE END.
# ## [Please star/upvote if u found it helpful.]##
# In[ ]:
| true |
1b75c4e46d9e350474eef9c2c62b0a8be7811c3f
|
Python
|
CapAsdour/code-n-stitch
|
/Password_strength_Checker/output.py
|
UTF-8
| 384 | 3.5 | 4 |
[
"MIT"
] |
permissive
|
import re
v=input("Enter the password to check:")
if(len(v)>=8):
if(bool(re.match('((?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%^&*]).{8,30})',v))==True):
print("Good going Password is Strong.")
elif(bool(re.match('((\d*)([a-z]*)([A-Z]*)([!@#$%^&*]*).{8,30})',v))==True):
print("try Something stronger!!")
else:
print("You have entered an invalid password.")
| true |
8c42b2cb1e91f49b57b8b67eda41cea9289907e8
|
Python
|
wmm1996528/movie
|
/maoyan.py
|
UTF-8
| 3,578 | 2.734375 | 3 |
[] |
no_license
|
import requests
from pyquery import PyQuery as pq
import re
import time
import pymongo
from movie_douban import HotMovie
class mongdbs():
def __init__(self):
self.host = '127.0.0.1'
self.port = 27017
self.dbName = 'maoyan'
self.conn = pymongo.MongoClient(self.host, self.port)
self.db = self.conn[self.dbName]
class maoyan(object):
def __init__(self,keyword):
self.url='http://maoyan.com/query?&kw='+keyword
self.baseurl = 'http://maoyan.com/cinemas?movieId='
self.headers={
'Host':'maoyan.com',
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.3964.2 Safari/537.36'
}
res = requests.get(self.url, headers=self.headers)
html = pq(res.text)
self.movieId = re.findall('movieid:(\d+)',html('div.movie-item').eq(0).find('a').attr('data-val'))[0]
self.mongo = mongdbs()
self.session = requests.Session()
self.movieName = keyword
def page(self):
page = 0
cinemas_list = []
while True:
cinemasHtml = requests.get(self.baseurl + str(self.movieId) + '&offset=' + str(page), headers=self.headers)
print(cinemasHtml.url)
if '抱歉,没有找到相关结果' in cinemasHtml.text:
break
cinemasHtml = pq(cinemasHtml.text)
cinemas = cinemasHtml('div.cinema-cell')
for i in range(len(cinemas)):
name = cinemas.eq(i).find('div.cinema-info a').text()
addr = cinemas.eq(i).find('div.cinema-info p').text()
url = cinemas.eq(i).find('div.buy-btn a').attr('href')
data = {
'name':name,
'addr':addr,
'url':'http://maoyan.com'+url
}
print('this is ',data)
self.mongo.db[self.movieName+'cinemas'].insert(data)
cinemas_list.append(data)
time.sleep(4)
page += 12
for i in cinemas_list:
print(i['name'])
url = i['url']
time.sleep(2)
self.search_price(url,i['name'])
def search_price(self,url,name):
req = self.session.get(url,headers=self.headers)
html = pq(req.text)
divs = html('div.show-list')
lists = []
for i in range(len(divs)):
if '黑豹' in divs.eq(i).find('div.movie-info h3').text():
for j in range(len(divs.eq(i).find('table.plist tbody tr'))):
begin = divs.eq(i).find('div.plist-container table.plist tbody tr').eq(j).find(
'td span.begin-time').text()
end = divs.eq(i).find('div.plist-container table.plist tbody tr').eq(j).find(
'td span.end-time').text()
price = divs.eq(i).find('div.plist-container table.plist tbody tr').eq(j).find(
'td span.sell-price span.stonefont').html().encode()
appearance = {
'begin': begin+'开场',
'end': end,
'price': price
}
lists.append(appearance)
print(lists)
self.mongo.db[self.movieName].insert({
'name':name,
'changci':lists
})
hotmovie = HotMovie()
print(hotmovie)
for i in hotmovie:
print('正在爬:%s' % i)
s = maoyan(i)
s.page()
| true |
72bd060c16cf2e8034334c0643533764f52687d6
|
Python
|
vvilq27/Python_port_OV7675
|
/port/busCheck.py
|
UTF-8
| 1,977 | 2.625 | 3 |
[] |
no_license
|
import time
import serial
from serial import Serial
import os
import re
import numpy as np
from matplotlib import pyplot as plt
a = list()
t = []
pic = [ ['00' for i in range(320)] for j in range(240)]
s = serial.Serial('COM8', 2000000)
while b"\r\n" not in s.readline():
pass
c = 0
while True:
l = s.readline()
y = l.decode("UTF-8").rstrip('\r\n').split(',')
c += 1
if c > 239:
break
a.append(l.decode("UTF-8").rstrip('\r\n').split(','))
# for row in a:
# print(row)
for row in a:
pic[int(row[-1])] = row;
s.close()
# DATA gathered, make PIC
for row in pic:
# pop frame number if frame not "zero"
if len(set(row)) != 1:
row.pop()
# else:
tmp = []
for i in row:
for j in re.findall('..', i):
tmp.append(int(j, 16))
if len(tmp) > 319:
break
# fill missing data cells with '00'
r = 320 - len(tmp)
for i in range(r):
tmp.append('00')
if len(tmp) > 320:
# print(tmp)
# print(len(tmp))
for i in range(len(tmp) - 320):
tmp.pop()
t.append(tmp)
# print(len(t))
# for row in t:
# print(len(row))
# tab = [[0 for i in range(320)] for j in range(240)]
# print(len(pic))
# print(len(pic[0]))
# for i in range(240):
# for j in range(320):
# try:
# tab[i][j] = int(t[i][j])
# except (IndexError, ValueError) as e:
# print('ero {}, {}'.format(i,j))
plt.imshow(np.array(t, dtype='uint8'), interpolation='nearest', cmap='gray')
plt.show()
# img = Image.frombytes(mode='L', size =tuple([50,50]), data= np.array(tab))
# img.save('test.png')
# file = open("data.txt", "w")
# for line in pic:
# lineInt = ''
# for i in line:
# lineInt += '{},'.format(str(i))
# # print(lineInt)
# file.write(lineInt)
# print(pic[0])
# for row in pic:
# for i in row:
# if i == '':
# print('hey')
# continue
# i = bytes(int(i))
# # print(pic[0])
# k = s.readline().decode("UTF-8").rstrip('\r\n').split(',')[-1]
# if int(k) == 100:
# print(k)
# mode L is for 8bit gray scale
| true |
0c466cb695f150472a3e3494053fe474d629708b
|
Python
|
bluecube/heating
|
/db_net/registers.py
|
UTF-8
| 6,678 | 2.796875 | 3 |
[] |
no_license
|
import numpy
import re
import itertools
import struct
import db_net
def group(lst, n):
"""group([0,3,4,10,2,3], 2) => iterator
Group an iterable into an n-tuples iterable. Incomplete tuples
are discarded e.g.
>>> list(group(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8)]
from http://code.activestate.com/recipes/303060-group-a-list-into-sequential-n-tuples/#c5
"""
return zip(*[itertools.islice(lst, i, None, n) for i in range(n)])
class Type:
TYPES = {
'I': (0x00, struct.Struct('<H'), numpy.int16),
'L': (0x01, struct.Struct('<I'), numpy.int32),
'F': (0x02, struct.Struct('<f'), numpy.float32)
}
MATRIX_MASK = 0x20
TYPE_MASK = 0x03
@classmethod
def _tuple_by_code(cls, code):
for letter, (c, unpacker, dtype) in cls.TYPES.items():
if code & cls.TYPE_MASK == c & cls.TYPE_MASK:
return letter, unpacker, dtype
raise Exception('Invalid type')
def __init__(self, type_code, matrix = False):
if isinstance(type_code, str):
self.code, self.unpacker, self.dtype = self.TYPES[type_code]
else:
letter, self.unpacker, self.dtype = self._tuple_by_code(type_code)
self.code = type_code
self.matrix = matrix
if matrix:
self.code |= self.MATRIX_MASK
if matrix:
self.size = matrix[0] * matrix[1] * self.unpacker.size
else:
self.size = self.unpacker.size
@classmethod
def from_string(cls, string):
matched = re.match(r'^(M?)([A-Z])(?:\[(\d+),(\d+)\])?$', string)
if not matched:
raise Exception("Invalid DBNet type string")
matrix, type_code, rows, columns = matched.groups()
if matrix == 'M':
try:
rows = int(rows)
columns = int(columns)
except (TypeError, ValueError):
raise Exception("Invalid or missing matrix dimensions")
matrix = (rows, columns)
else:
matrix = False
return cls(type_code, matrix)
def __str__(self):
letter, unpacker, dtype = self._tuple_by_code(self.code)
if self.matrix:
return 'M{}[{},{}]'.format(letter, self.matrix[0], self.matrix[1])
else:
return letter
class ReadRequest:
PACKER = struct.Struct('<BBBBHHHH')
def __init__(self):
self.wid = None
self.type = None
self.i0 = None
self.j0 = None
self.rows = None
self.cols = None
self.msg_id = 0x4D
@classmethod
def from_bytes(cls, data):
if data[0] != 0x01:
raise Exception('Read request has invalid start byte')
if (data[1] & Type.MATRIX_MASK) != Type.MATRIX_MASK:
raise Exception('Only matrix reads are supported now')
self = cls()
unused, code, wid_lo, wid_hi, self.i0, self.j0, self.rows, self.cols = \
cls.PACKER.unpack(data)
self.wid = wid_hi << 8 | wid_lo
self.type = Type(code, (self.rows, self.cols))
return self
def __bytes__(self):
wid_lo = self.wid & 0xff
wid_hi = (self.wid >> 8) & 0xff
return self.PACKER.pack(
0x1,
self.type.code,
wid_lo, wid_hi,
self.i0, self.j0, self.rows, self.cols)
def details_to_string(self):
return 'WID = {} ({}), {}x{} items from {}, {}'.format(
self.wid, self.type, self.rows, self.cols, self.i0, self.j0)
def __str__(self):
return 'Read request: ' + self.details_to_string()
class ReadResponse:
def __init__(self):
self.value = None
@classmethod
def from_bytes(cls, data, request):
if data is None:
raise Exception('Error processing the request (data is None).')
if data[0] != 0x81:
raise Exception('Read response has invalid start byte')
self = cls()
self.request = request
if len(data) != request.type.unpacker.size * request.rows * request.cols + 1:
raise Exception('Invalid length of reply')
flat_values = [request.type.unpacker.unpack(bytes(x))[0]
for x in group(data[1:], request.type.unpacker.size)]
self.value = list(group(flat_values, request.cols))
return self
def __str__(self):
return 'Read response: {}\n{}'.format(
self.request.details_to_string(), str(self.value))
class Register:
def __init__(self, connection, wid, data_type, auto_update = False):
self._connection = connection
self._wid = wid
self._type = Type.from_string(data_type)
self._auto_update = auto_update
if not self._type.matrix:
raise Exception('Only matrix registers are supported now.')
shorter_dim = min(*self._type.matrix)
longer_dim = max(*self._type.matrix)
# Maximal number of matrix items per transfer
max_items = (db_net.Packet.PAYLOAD_SIZE_LIMIT - 1) // self._type.unpacker.size
rows_per_batch = max_items // shorter_dim
if rows_per_batch == 0:
raise Exception('The matrix is too big, sorry')
batches = ((x, min(rows_per_batch, longer_dim - x))
for x in range(0, longer_dim, rows_per_batch))
self._batches = []
if self._type.matrix[0] > self._type.matrix[1]:
for start, length in batches:
self._batches.append((start, 0, length, self._type.matrix[1]))
else:
for start, length in batches:
self._batches.append((0, start, self._type.matrix[0], length))
def auto_update(self, val):
self._auto_update = val
def update(self):
self._value = numpy.empty(self._type.matrix, dtype = self._type.dtype)
for i0, j0, rows, cols in self._batches:
rq = ReadRequest()
rq.wid = self._wid
rq.type = self._type
rq.i0 = i0
rq.j0 = j0
rq.rows = rows
rq.cols = cols
resp_msg_id, resp_bytes = self._connection.transfer(rq.msg_id, bytes(rq))
if resp_bytes is None:
raise Exception("Reply: error!")
resp = ReadResponse.from_bytes(resp_bytes, rq)
for i, row in zip(range(i0, i0 + rows), resp.value):
for j, x in zip(range(j0, j0 + cols), row):
self._value[i, j] = x
@property
def value(self):
if self._auto_update:
self.update()
return self._value
| true |
4857fe4164e37c4dd73b20c5d7278b92bac48458
|
Python
|
SchoofsEbert/ASTinPython
|
/AST.py
|
UTF-8
| 586 | 2.921875 | 3 |
[] |
no_license
|
import ast
import astor
class AST:
def __init__(self, filename, transformer):
self.filename = filename
with open(filename, "r") as source:
self.AST = ast.parse(source.read())
self.transformer = transformer
def transform(self):
self.transformer.visit(self.AST)
def print(self):
print(astor.dump_tree(self.AST))
def compile(self, filename):
with open(filename, "w") as output:
output.write(astor.to_source(self.AST))
def execute(self, scope):
exec(astor.to_source(self.AST), scope)
| true |
d1a665d626b2aa17707165e080d4fe699022d763
|
Python
|
shlampley/learning
|
/learn python/fizz_buzz3.py
|
UTF-8
| 1,604 | 3.9375 | 4 |
[] |
no_license
|
number = 0
variables = ""
def fizz_buzz(num, var):
fizzarray = []
# doneTODO: Create an empty array outside the loop to store data
var = variables
num = number
while num < 100:
# Reset var to prevent issues of adding buzz to buzz or buzz to fizzbuz
var = ""
#print(num)
num += 1
# if num % 3 == 0 and num % 5 == 0:
# var = "fizz_buzz"
# continue
# DONE: Depending on which conditions are met set var to either the number of fizz or buzz or both
if num % 3 == 0:
var = "fizz"
if num % 5 == 0:
var += "buzz"
# else statement used in this instance will cause overwrighting of saved fizz data, must use if statement.
if var == "":
var = num
# doneTODO: add var to the end of the array
fizzarray.append(var)
# look up storing as list in an array
return fizzarray
def print_array(arr):
for item in arr:
print(item)
def out_put(fizzarray):
# for idx, x in enumerate(fizzarray):
# #print(idx, x)
for ind, x in enumerate(fizzarray):
if (ind + 1) % 2 == 0:
print("\t" + str(x))
else:
print(x)
#results.append(fizzarray)
#print(fizzarray)
# TODO: if the index is odd no indent
# TODO: if the index is even indent
# while i < fizzarray.len():
# # instead of x you would use:
# fizzarray[i]
# i = i + 1
fizzarray = fizz_buzz(number, variables)
# print_array(fizzarray)
out_put(fizzarray)
| true |
736d96574422b7b00e7b8756628dcf418e473476
|
Python
|
inhyuck222/python-ch2.4
|
/for.py
|
UTF-8
| 990 | 4.375 | 4 |
[] |
no_license
|
# 반복문
a = ['cat', 'cow', 'tiger']
for animal in a:
print(animal, end=' ')
else:
print('')
# 복합 자료형을 사용하는 for문
l = [('루피', 10), ('상디', 20), ('조로', 30)]
for data in l:
print('이름: %s, 나이: %d' % data)
for name, age in l:
print('이름: {0}, 나이: {1}'.format(name, age))
l = [{'name': '루피', 'age': 30}, {'name': '루', 'age': 31}, {'name': '피', 'age': 32}]
for data in l:
print('이름: %(name)s, 나이: %(age)d' % data)
# 1 ~ 10 합 구하기
s = 0
for i in range(1, 11):
s += i
else:
print(s)
# break
for i in range(10):
if i > 5:
break
print(i, end=' ')
else:
print('')
print('')
print('----------------')
# continue
for i in range(10):
if i < 5:
continue
print(i, end=' ')
else:
print()
# 구구단
for x in range(1, 10):
for y in range(1, 10):
print(str(y) + ' * ' + str(x) + ' = ' + str(x*y).rjust(2), end='\t')
else:
print('')
| true |
f66dd9735cfdcffa7c2070e219d517ff72496676
|
Python
|
queenie0708/Appium-Python
|
/alipay.py
|
UTF-8
| 1,075 | 2.59375 | 3 |
[] |
no_license
|
from appium import webdriver
import threading
from appium.webdriver.common.touch_action import TouchAction
from time import sleep
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '9'
desired_caps['deviceName'] = 'PDP'
desired_caps['appPackage'] = 'com.eg.android.AlipayGphone'
desired_caps['appActivity'] = '.AlipayLogin'
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
sleep(40)
print('time to wake up')
driver.find_element_by_android_uiautomator('text(\"Ant Forest")').click()
driver.implicitly_wait(5)
sleep(8)
def swipeDown(driver, n):
'''向下滑动屏幕'''
for i in range(n):
driver.swipe(300, 1000, 300, 100)
swipeDown(driver,7)
sleep(5)
print('is more friend there?')
driver.find_element_by_android_uiautomator('text(\"View more friends")').click()
driver.implicitly_wait(5)
friends = driver.find_element_by_class('android.view.View')
for friend in friend:
friend.click()
sleep(5)
driver.find_element_by_android_uiautomator('text(\"")').click()
driver.quit()
| true |
99636db6bcf420043a9a2c894ebfd7f9fbbb8042
|
Python
|
YOODS/rovi_utils
|
/mesh_aid/samples/degraded.py
|
UTF-8
| 1,987 | 3.265625 | 3 |
[] |
no_license
|
import open3d as o3d
import numpy as np
def degraded_copy_point_cloud(cloud, normal_radius, n_newpc):
"""
与えられた点群データからPoisson表面を再構成し、頂点データからランダムに点を取り出して
新たな点群を作成する.
Parameters
----------
cloud : open3d.geometry.PointCloud
入力点群
normal_radius : float
法線ベクトル計算時の近傍半径(Poisson表面構成では必要なので).既に法線ベクトルが
計算されていれば無視される.
n_newpc : int
新しい点群データに含まれる点の数
"""
if np.asarray(cloud.normals).shape[0] == 0:
cloud.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius=normal_radius, max_nn=30))
# Poisson表面作成
mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(cloud, depth=9)
# 密度値が低い所の面を削除
mesh.remove_vertices_by_mask(densities < np.quantile(densities, 0.1))
# メッシュの頂点データから入力の点群と同じ数の点を取り出す(randomで)
n_vertic = np.asarray(mesh.vertices).shape[0]
if n_vertic > n_newpc:
indices = np.random.choice(np.arange(n_vertic), size=n_newpc, replace=False)
points = np.asarray(mesh.vertices)[indices]
else:
print("Warning: Mesh vertices is {} (< {}).".format(n_vertic, n_newpc))
points = np.asarray(mesh.vertices)
# 新たな点群を作成する
newcl = o3d.geometry.PointCloud()
newcl.points = o3d.utility.Vector3dVector(points)
return newcl
if __name__ == '__main__':
cloud = o3d.io.read_point_cloud("surface.ply")
print("Original Point Cloud")
print(cloud)
print(np.asarray(cloud.normals).shape)
degcl = degraded_copy_point_cloud(cloud, 0.2, 10000)
degcl.paint_uniform_color([1, 0.706, 0])
o3d.visualization.draw_geometries([cloud, degcl])
| true |
94946ebf1eada337cbf53b95fa700d32e8c8d9a6
|
Python
|
HesterXu/Home
|
/Public/api/api_03_天气接口.py
|
UTF-8
| 437 | 2.53125 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
# @Time : 2018/12/3/16:15
# @Author : Hester Xu
# Email : xuruizhu@yeah.net
# @File : api_03_天气接口.py
# @Software : PyCharm
import requests
weather_url_1 = 'http://t.weather.sojson.com/api/weather/city/101030100'
weather_res_1 = requests.get(weather_url_1)
print(weather_res_1)
print(weather_res_1.text)
# weather_res_2 = requests.get(weather_url_2)
# print(weather_res_2)
# print(weather_res_2.text)
| true |
e3e53969c31c9d312829564901ad8861c6e11f72
|
Python
|
yhshu/OpenKE-Embedding-Service
|
/freebase_embedding_server.py
|
UTF-8
| 6,993 | 2.578125 | 3 |
[] |
no_license
|
import numpy as np
import datetime
from flask import Flask, request, jsonify, json
class FreebaseEmbeddingServer:
dir_path: str
entity_to_id: dict # entity mid -> entity id
relation_to_id: dict
entity_vec: np.memmap
relation_vec: np.memmap
dim: int # embedding dimension for each entity or relation
id_adj_list: dict # adjacency list
id_inverse_adj_list: dict # inverse adjacency list
def __init__(self, freebase_embedding_dir_path):
start_time = datetime.datetime.now()
print("[INFO] Loading OpenKE TransE for Freebase...")
# file paths
self.dir_path = freebase_embedding_dir_path.rstrip("/").rstrip("\\")
entity_emb_filepath = self.dir_path + "/embeddings/dimension_50/transe/entity2vec.bin"
relation_emb_filepath = self.dir_path + "/embeddings/dimension_50/transe/relation2vec.bin"
entity_to_id_filepath = self.dir_path + "/knowledge_graphs/entity2id.txt"
relation_to_id_filepath = self.dir_path + "/knowledge_graphs/relation2id.txt"
triple_to_id_filepath = self.dir_path + "/knowledge_graphs/triple2id.txt"
# initialize variables
self.entity_to_id = dict()
self.relation_to_id = dict()
self.id_adj_list = dict()
self.id_inverse_adj_list = dict()
self.entity_vec = np.memmap(entity_emb_filepath, dtype='float32', mode='r')
self.relation_vec = np.memmap(relation_emb_filepath, dtype='float32', mode='r')
self.dim = 50
# build self.entity_to_id
entity_to_id_file = open(entity_to_id_filepath)
for line in entity_to_id_file.readlines():
line.rstrip("\n")
if "\t" in line:
line_split = line.split("\t")
elif " " in line:
line_split = line.split(" ")
else:
continue
self.entity_to_id[line_split[0]] = line_split[1]
entity_to_id_file.close()
# build self.relation_to_id
relation_to_id_file = open(relation_to_id_filepath)
for line in relation_to_id_file.readlines():
line.rstrip("\n")
if "\t" in line:
line_split = line.split("\t")
elif " " in line:
line_split = line.split(" ")
else:
continue
self.relation_to_id[line_split[0]] = line_split[1]
relation_to_id_file.close()
# build adj_list and inverse_adj_list
triple_to_id_file = open(triple_to_id_filepath)
for line in triple_to_id_file.readlines():
line.rstrip("\n")
if "\t" in line:
line_split = line.split("\t")
elif " " in line:
line_split = line.split(" ")
else:
continue
subject_id = int(line_split[0])
object_id = int(line_split[1])
predicate_id = int(line_split[2])
# for adj list
if not (subject_id in self.id_adj_list.keys()):
self.id_adj_list[subject_id] = []
self.id_adj_list[subject_id].append((subject_id, object_id, predicate_id))
# for inverse adj list
if not (object_id in self.id_inverse_adj_list.keys()):
self.id_inverse_adj_list[object_id] = []
self.id_inverse_adj_list[object_id].append((subject_id, object_id, predicate_id))
triple_to_id_file.close()
print("[INFO] OpenKE TransE for Freebase has been loaded")
print("[INFO] time consumed: " + str(datetime.datetime.now() - start_time))
def get_entity_id_by_mid(self, mid: str) -> int:
return self.entity_to_id[mid]
def get_relation_id_by_relation(self, relation: str) -> int:
return self.relation_to_id[relation]
def get_entity_embedding_by_mid(self, mid: str):
return self.get_entity_embedding_by_eid(int(self.entity_to_id[mid]))
def get_entity_embedding_by_eid(self, idx: int):
return self.entity_vec[self.dim * idx:self.dim * (idx + 1)]
def get_relation_embedding_by_relation(self, relation: str):
return self.get_relation_embedding_by_rid(int(self.relation_to_id[relation]))
def get_relation_embedding_by_rid(self, idx: int):
return self.relation_vec[self.dim * idx:self.dim * (idx + 1)]
def get_adj_list(self, mid: str):
idx = int(self.entity_to_id[mid])
if idx in self.id_adj_list:
return self.id_adj_list[idx]
return None
def get_inverse_adj_list(self, mid: str):
idx = int(self.entity_to_id[mid])
if idx in self.id_inverse_adj_list:
return self.id_inverse_adj_list[idx]
return None
app = Flask(__name__)
service = FreebaseEmbeddingServer("/home2/yhshu/yhshu/workspace/Freebase")
@app.route('/entity_embedding_by_mid/', methods=['POST'])
def entity_embedding_by_mid_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_entity_embedding_by_mid(params['mid']).tolist()
return jsonify({'entity_embedding': res})
@app.route('/entity_embedding_by_eid/', methods=['POST'])
def entity_embedding_by_eid_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_entity_embedding_by_eid(params['eid']).tolist()
return jsonify({'entity_embedding': res})
@app.route('/relation_embedding_by_relation/', methods=['POST'])
def relation_embedding_by_relation_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_relation_embedding_by_relation(params['relation']).tolist()
return jsonify({'relation_embedding': res})
@app.route('/relation_embedding_by_rid/', methods=['POST'])
def relation_embedding_by_rid_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_relation_embedding_by_rid(params['rid']).tolist()
return jsonify({'relation_embedding': res})
@app.route('/adj_list/', methods=['POST'])
def adj_list_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_adj_list(params['mid'])
return jsonify({'adj_list': res})
@app.route('/inverse_adj_list/', methods=['POST'])
def inverse_adj_list_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_inverse_adj_list(params['mid'])
return jsonify({'inverse_adj_list': res})
@app.route('/entity_id_by_mid/', methods=['POST'])
def entity_id_by_mid_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_entity_id_by_mid(params['mid'])
return jsonify({'entity_id': res})
@app.route('/relation_id_by_relation/', methods=['POST'])
def relation_id_by_relation_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_relation_id_by_relation(params['relation'])
return jsonify({'relation_id': res})
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=False, port=8898) # '0.0.0.0' is necessary for visibility
| true |
50dcf5643d28de1a3059967341b2a596ed7b40fa
|
Python
|
rohanaggarwal7997/Studies
|
/Python new/inheritance.py
|
UTF-8
| 318 | 3.578125 | 4 |
[] |
no_license
|
class Parent:
def printlastname(self):
print('Aggarwal')
class Child(Parent): #inherited Parent class
def print_name(self):
print('Rohan')
def printlastname(self): #overwriting parent function
print('Aggar')
bucky=Child()
bucky.print_name()
bucky.printlastname()
| true |
0a60ed50abd1dcb8af906bf377beeed159d4e47f
|
Python
|
thautwarm/gkdtex
|
/gkdtex/wrap.py
|
UTF-8
| 859 | 2.5625 | 3 |
[
"MIT"
] |
permissive
|
import warnings
warnings.filterwarnings('ignore', category=SyntaxWarning, message='"is" with a literal')
from gkdtex.parse import *
from gkdtex.lex import *
_parse = mk_parser()
def parse(text: str, filename: str = "unknown"):
tokens = lexer(filename, text)
status, res_or_err = _parse(None, Tokens(tokens))
if status:
return res_or_err
msgs = []
lineno = None
colno = None
filename = None
offset = 0
msg = ""
for each in res_or_err:
i, msg = each
token = tokens[i]
lineno = token.lineno + 1
colno = token.colno
offset = token.offset
filename = token.filename
break
e = SyntaxError(msg)
e.lineno = lineno
e.colno = colno
e.filename = filename
e.text = text[offset - colno:text.find('\n', offset)]
e.offset = colno
raise e
| true |
13c7664efff8eb0ab25d6dd0f8e73e276b631438
|
Python
|
andreiqv/rotate_network
|
/make_data_dump.py
|
UTF-8
| 3,182 | 2.78125 | 3 |
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path
import sys
from PIL import Image, ImageDraw
import _pickle as pickle
import gzip
import random
import numpy as np
np.set_printoptions(precision=4, suppress=True)
def load_data(in_dir, shape=(540,540,1)):
img_size = shape[0], shape[1]
data = dict()
data['filenames'] = []
data['images'] = []
data['labels'] = []
files = os.listdir(in_dir)
random.shuffle(files)
for file_name in files:
file_path = in_dir + '/' + file_name
img = Image.open(file_path)
if shape[2]==3:
img = img.resize(img_size, Image.ANTIALIAS)
elif shape[2]==1:
img_gray = img.convert('L')
img = img_gray.resize(img_size, Image.ANTIALIAS)
else:
raise Exception('Bad shape[2]')
arr = np.array(img, dtype=np.float32) / 256
name = ''.join(file_name.split('.')[:-1])
angle = name.split('_')[-1]
lable = np.array([float(angle) / 360.0], dtype=np.float64)
if type(lable[0]) != np.float64:
print(lable[0])
print(type(lable[0]))
print('type(lable)!=float')
raise Exception('lable type is not float')
print('{0}: {1:.3f}, {2}' .format(angle, lable[0], file_name))
data['images'].append(arr)
data['labels'].append(lable)
data['filenames'].append(file_name)
return data
#return train, valid, test
def split_data(data, ratio=(6,1,3)):
len_data = len(data['images'])
assert len_data == len(data['labels'])
len_train = len_data * ratio[0] // sum(ratio)
len_valid = len_data * ratio[1] // sum(ratio)
len_test = len_data * ratio[2] // sum(ratio)
print(len_train, len_valid, len_test)
data_train = dict()
data_valid = dict()
data_test = dict()
data_train['images'] = data['images'][ : len_train]
data_train['labels'] = data['labels'][ : len_train]
data_train['filenames'] = data['filenames'][ : len_train]
data_valid['images'] = data['images'][len_train : len_train + len_valid]
data_valid['labels'] = data['labels'][len_train : len_train + len_valid]
data_valid['filenames'] = data['filenames'][len_train : len_train + len_valid]
data_test['images'] = data['images'][len_train + len_valid : ]
data_test['labels'] = data['labels'][len_train + len_valid : ]
data_test['filenames'] = data['filenames'][len_train + len_valid : ]
data_train['size'] = len(data_train['images'])
data_valid['size'] = len(data_valid['images'])
data_test['size'] = len(data_test['images'])
splited_data = {'train': data_train, 'valid': data_valid, 'test': data_test}
return splited_data
def get_data(in_dir, shape, ratio):
data1 = load_data(in_dir, shape=shape)
print(len(data1['images']))
print(len(data1['labels']))
data = split_data(data1, ratio=ratio)
print('train', data['train']['size'])
print('valid', data['valid']['size'])
print('test', data['test']['size'])
return data
if __name__ == '__main__':
data = get_data(shape=(540,540,1), ratio=(6,1,3))
# add_pickle
dump = pickle.dumps(data)
print('dump.pickle')
GZIP = True
if GZIP:
with gzip.open('dump.gz', 'wb') as f:
f.write(dump)
print('gzip dump was written')
else:
with open('dump.pickle', 'wb') as f:
pickle.dump(dump, f, protocol=4)
print('dump was written')
| true |
9594b96038df1f0d2c02de4bbf9ca543ed97ab5c
|
Python
|
MiguelAbadia/TIC-Abad-a
|
/Programms Python/ejercicio8.py
|
UTF-8
| 198 | 3.40625 | 3 |
[] |
no_license
|
def ejercicio8():
n=input("Dime un numero entero positivo")
if n>0:
print "Los cuadrados son",n,n*n,n*n*n,n*n*n*n
else:
print "Eso es negativo"
ejercicio8()
| true |
ad3b213de470c3a58659c325ac83fb9671b5ebf8
|
Python
|
segimanzanares/acs-djangoapi
|
/src/shows/serializers.py
|
UTF-8
| 1,764 | 2.546875 | 3 |
[] |
no_license
|
from rest_framework import serializers
from shows.models import Show, Episode
from django.utils import timezone
import os
class EpisodeSerializer(serializers.ModelSerializer):
class Meta:
model = Episode
fields = ('id', 'show', 'title', 'description', 'cover')
def create(self, validated_data):
"""
Create and return a new `Episode` instance, given the validated data.
"""
instance = Episode.objects.create(**validated_data)
# Save file into the model directory
instance.cover.save(os.path.basename(instance.cover.name), instance.cover, save=True)
return instance
def update(self, instance, validated_data):
"""
Update and return an existing `Show` instance, given the validated data.
"""
cover = validated_data.get('cover', None)
instance.title = validated_data.get('title', instance.title)
instance.description = validated_data.get('description', instance.description)
if cover:
instance.cover = cover
instance.save()
return instance
class ShowSerializer(serializers.ModelSerializer):
episodes = EpisodeSerializer(many=True, read_only=True)
class Meta:
model = Show
fields = ('id', 'title', 'episodes')
def create(self, validated_data):
"""
Create and return a new `Show` instance, given the validated data.
"""
return Show.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Update and return an existing `Show` instance, given the validated data.
"""
instance.title = validated_data.get('title', instance.title)
instance.save()
return instance
| true |
2792d988960038d69fa8cf4df7c84be7733a9751
|
Python
|
TangleSpace/swole
|
/swole/core/application.py
|
UTF-8
| 3,849 | 2.78125 | 3 |
[] |
no_license
|
import os
import enum
from typing import Dict
from fastapi import FastAPI
from starlette.responses import FileResponse
import uvicorn
from swole.core.page import Page, HOME_ROUTE
from swole.core.utils import route_to_filename
from swole.widgets import Widget
SWOLE_CACHE = "~/.cache/swole" #: Default directory to use for caching.
class Application():
""" Class representing an application. Application are used to serve
declared pages.
Attributes:
fapi (`fastapi.FastAPI`): FastAPI app.
"""
def __init__(self):
self.files = None
self.fapi = FastAPI()
def assign_orphan_widgets(self):
""" Method finding orphan widgets if any, and assigning it to the Home
page. If the home page does not exist, create it. This allow a very
simple and easy way to use the library.
"""
if HOME_ROUTE not in Page._dict:
# No home page : create one
Page()
home = Page._dict[HOME_ROUTE]
assigned_widgets = set().union(*[page.widgets for page in Page._dict.values()])
for w in Widget._declared:
if w not in assigned_widgets: # Orphan !
home.add(w)
def write(self, folder=SWOLE_CACHE):
""" Method to write the HTML of the application to files, in order to
later serve it.
Arguments:
folder (`str`, optional): Folder where to save HTML files. Defaults
to :const:`~swole.core.application.SWOLE_CACHE`.
"""
os.makedirs(folder, exist_ok=True)
self.files = {} # Route -> HTML file
self.callbacks = {} # Callback ID -> (Page, Ajax)
for route, page in Page._dict.items():
# Write HTML of the page
html_str = page.html().render()
path = os.path.join(folder, "{}.html".format(route_to_filename(route)))
with open(path, 'w') as f:
f.write(html_str)
self.files[route] = path
# Save also callbacks (along with their page)
for aj in page.ajax():
self.callbacks[aj.id] = (page, aj)
def define_routes(self):
""" Method defining the routes in the FastAPI app, to display the right
HTML file.
"""
# Define the pages' routes
for route, html_file in self.files.items():
@self.fapi.get(route)
def index():
return FileResponse(html_file)
# Define the callback route
if len(self.callbacks) != 0:
# Define a dynamic enum to ensure only specific callback ID are valid
cbe = enum.IntEnum('CallbackEnum', {str(c_id): c_id for c_id in self.callbacks.keys()})
@self.fapi.post("/callback/{callback_id}")
def callback(callback_id: cbe, inputs: Dict[str, str]):
page, ajax = self.callbacks[callback_id]
return ajax(page, inputs)
def serve(self, folder=SWOLE_CACHE, host='0.0.0.0', port=8000, log_level='info'):
""" Method to fire up the FastAPI server !
Arguments:
folder (`str`, optional): Folder where to save HTML files. Defaults
to :const:`~swole.core.application.SWOLE_CACHE`.
host (`str`, optional): Run FastAPI on this host. Defaults to
`0.0.0.0`.
port (`int`, optional): Run FastAPI on this port. Defaults to
`8000`.
log_level (`str`, optional): Log level to use for FastAPI. Can be
[`critical`, `error`, `warning`, `info`, `debug`, `trace`].
Defaults to `info`.
"""
self.assign_orphan_widgets()
self.write(folder=folder)
self.define_routes()
uvicorn.run(self.fapi, host=host, port=port, log_level=log_level)
| true |
6adaa62ac1986dcd4d811aeec82cad429178a601
|
Python
|
chen19901225/SimplePyCode
|
/SimpleCode/PY_CookBook/chapter11/1_http_simple_get.py
|
UTF-8
| 189 | 2.6875 | 3 |
[] |
no_license
|
import urllib
url='http://www.baidu.com'
params=dict(name1='value1',name2='value2')
querystring=urllib.urlencode(params)
u=urllib.urlopen(url+'?'+querystring)
resp=u.read()
print resp
| true |
02087c6ead589bf24ddcbcd6f0309fa0df9bf0cd
|
Python
|
andoniabedul/cmd-cryptocurrency-watcher
|
/services/Ticket.py
|
UTF-8
| 2,456 | 2.703125 | 3 |
[
"MIT"
] |
permissive
|
import json
#from helpers.format_response import exchanges as format_response
from helpers.messages import messages as messages
from api.call import exchanges as api_call
class Ticket:
def __init__(self, base_pair, pair):
self.base_pair = base_pair
self.pair = pair
self.exchanges = ['coinbase', 'bitfinex', 'poloniex', 'gemini']
def valid_pair(self, exchange):
with open('./constants/pairs_by_exchange.json') as pairs:
pairs_exchange = json.load(pairs)[exchange]
if self.base_pair in pairs_exchange:
if self.pair in pairs_exchange[self.base_pair]:
return True
return False
def get_url(self, exchange):
with open('./constants/urls.json') as urls:
tickets_url = json.load(urls)
url = tickets_url['tickets'][exchange]
formated_pairs = self.format_pairs(self.base_pair, self.pair, exchange)
return url.format(formated_pairs)
def get_pairs(self):
return '{}{}'.format(self.base_pair, self.pair)
def format_pairs(self, base_pair, pair, exchange):
if exchange is 'bitfinex':
return '{}{}'.format(base_pair, pair)
if exchange is 'poloniex':
return '{}_{}'.format(base_pair, pair)
if exchange is 'coinbase':
return '{}-{}'.format(base_pair.upper(), pair.upper())
if exchange is 'gemini':
return '{}{}'.format(base_pair, pair)
@staticmethod
def get(ticket):
response_list = []
for exchange in ticket.exchanges:
pairs = ticket.format_pairs(ticket.base_pair, ticket.pair, exchange)
if ticket.valid_pair(exchange):
response = ticket.get_data(exchange, pairs)
if response['success']:
formated_response = messages['responses']['success'](response['data'], exchange)
response_list.append(formated_response)
else:
formated_response = messages['responses']['error'](response['error'], exchange)
response_list.append(formated_response)
else:
formated_response = messages['responses']['invalid_pair'](ticket.get_pairs(), exchange)
response_list.append(formated_response)
return response_list
def get_data(self, exchange, pairs):
url = self.get_url(exchange)
response = api_call[exchange](url, pairs)
if not hasattr(response, 'error'):
return {
'success': True,
'data': response
}
else:
return {
'error': response['error']
}
| true |
e3c5999afa33a13d1a3271b55e648f365694c35e
|
Python
|
luckydimdim/grokking
|
/in_place_reversal_of_a_linked_list/reverse_every_k_element_sub_list/main.py
|
UTF-8
| 3,388 | 4.125 | 4 |
[] |
no_license
|
from __future__ import print_function
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
def print_list(self):
temp = self
while temp is not None:
print(temp.value, end=" ")
temp = temp.next
print()
def reverse_every_k_elements2(head, k):
'''
Given the head of a LinkedList and a number ‘k’,
reverse every ‘k’ sized sub-list starting from the head.
If, in the end, you are left with a sub-list with
less than ‘k’ elements, reverse it too.
'''
if k <= 1 or head is None:
return head
curr, prev, new_head = head, None, False
while True:
last_node_of_previous_part = prev
last_node_of_sub_list = curr
counter = 0
while counter < k and curr is not None:
next = curr.next
curr.next = prev
prev = curr
curr = next
counter += 1
# connect with the previous part
if last_node_of_previous_part is not None:
last_node_of_previous_part.next = prev
if new_head == False:
new_head = True
head = prev
# connect with the next part
last_node_of_sub_list.next = curr
if curr is None:
break
prev = last_node_of_sub_list
return head
def reverse_every_k_elements3(head, k):
'''
Given the head of a LinkedList and a number ‘k’,
reverse every ‘k’ sized sub-list starting from the head.
If, in the end, you are left with a sub-list with
less than ‘k’ elements, reverse it too.
'''
if k <= 1 or head is None:
return head
counter, prev, curr = 0, None, head
is_new_head = False
while True:
link = prev
tail = curr
counter = 0
while counter < k and curr is not None:
counter += 1
next = curr.next
curr.next = prev
prev = curr
curr = next
if is_new_head == False:
head = prev
is_new_head = True
if link is not None:
link.next = prev
tail.next = curr
if curr is None:
break
prev = tail
return head
def reverse_every_k_elements(head, k):
'''
Given the head of a LinkedList and a number ‘k’,
reverse every ‘k’ sized sub-list starting from the head.
If, in the end, you are left with a sub-list with
less than ‘k’ elements, reverse it too.
'''
if head is None or k <= 0:
return head
prev, curr = None, head
is_new_head = False
while True:
tail_of_first_part = prev
tail_of_second_part = curr
counter = 0
while counter < k and curr is not None:
next = curr.next
curr.next = prev
prev = curr
curr = next
counter += 1
if is_new_head == False:
is_new_head = True
head = prev
if tail_of_first_part is not None:
tail_of_first_part.next = prev
tail_of_second_part.next = curr
if curr is None:
break
prev = tail_of_second_part
return head
def main():
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
head.next.next.next.next.next = Node(6)
head.next.next.next.next.next.next = Node(7)
head.next.next.next.next.next.next.next = Node(8)
print("Nodes of original LinkedList are: ", end='')
head.print_list()
result = reverse_every_k_elements(head, 3)
print("Nodes of reversed LinkedList are: ", end='')
result.print_list()
main()
| true |
252753f358e2106a377fe0abd8311512c351cc0d
|
Python
|
znorm/Euler
|
/006.py
|
UTF-8
| 221 | 3.796875 | 4 |
[] |
no_license
|
sumofsquares = 0
squareofsum = 0
for x in range(1,101):
sumofsquares = sumofsquares + (x**2)
squareofsum = squareofsum + x
squareofsum = squareofsum ** 2
print( squareofsum - sumofsquares)
#269147
| true |
28ea5a719de789bf35197e427659db6fbe96093a
|
Python
|
AndersonHJB/PyCharm_Coder
|
/Coder_Old/pycharm_daima/爬虫大师班/插件合集/参数转换/headers_app.py
|
UTF-8
| 468 | 3.203125 | 3 |
[] |
no_license
|
import re
def headers_to_dict(data):
global headers
for value in data:
try:
hea_data = re.findall(r'(.*?): (.*?)\n', value)[0]
headers.setdefault(hea_data[0], hea_data[1])
except IndexError:
hea_data = value.split(': ', 1)
headers.setdefault(hea_data[0], hea_data[1])
headers = {}
res = input('请输入你所复制的请求头:>>>\n')
headers_to_dict(res)
print(headers)
| true |
9d03c8b7f3b90341d68c9b4d8e4a99f9863befb9
|
Python
|
Eric-cv/QF_Python
|
/day2/input_demo.py
|
UTF-8
| 638 | 4.125 | 4 |
[] |
no_license
|
# 输入:input()
#name = input()
#print(name)
#name = input('请输入你的名字:') #阻塞式
#print(name)
'''
练习:
游戏:捕鱼达人
输入参与游戏者用户名
输入密码:
充值: 500
'''
print('''
*********************
捕鱼达人
*********************
''')
username = input('请输入参与游戏者的用户名:\n')
password = input('输入密码:\n')
print('%s请充值才能进入游戏\n' %username)
coins = input('请充值:\n') # input键盘输入的都是字符串类型
print(type(coins))
coins = int(coins)
print('%s充值成功!当前游戏币是:%d'%(username, coins))
| true |
d41df6088fd195dc8263760aeef440c05b77b30a
|
Python
|
AngieGD/MCTS_Juego_uno
|
/TicTacToe/juegoAlternativa.py
|
UTF-8
| 3,936 | 3.5625 | 4 |
[] |
no_license
|
import numpy as np
import pandas as pd
from os import system
class Table():
def __init__(self):
self.table = [[' ',' ',' '],
[' ',' ',' '],
[' ',' ',' ']]
def getMoves(self):
moves = []
for x in range(3):
for y in range(3):
if self.table[x][y] == ' ':
moves.append((x,y))
return moves
def insertarMarca(self,pos,marca):
#método para insertar una marca
self.table[pos[0]][pos[1]] = marca
#función que me muestra el tablero
def mostrarTablero(self):
salida = ""
for fila in self.table:
salida+= repr(fila)+"\n"
print(salida)
class SuperTable():
tablero = np.zeros((3,3)).astype('object')
def __init__(self):
self.crearTablero()
def crearTablero(self):
for row in range(self.tablero.shape[0]):
for column in range(self.tablero.shape[1]):
self.tablero[row,column] = Table()
def mostrarTablero(self):
salida = ""
matriz = ""
for fila in self.tablero:
for i in range(3):
for tablero in fila:
salida += repr(tablero.table[i])+" "
matriz+=salida+"\n"
salida = ""
matriz+="\n"
print(matriz)
def numeroMovimientos(self):
count = 0
for fila in self.tablero:
for table in fila:
count+=len(table.getMoves())
return count
#método para obtener la posición
def obtenerTablero(opcion, tablero):
i = 0
for row in range(tablero.tablero.shape[0]):
for column in range(tablero.tablero.shape[1]):
if i==opcion-1:
return (row,column)
i+=1
return None
#método para validar la jugada
def validarJugada(pos, tablero):
if pos in tablero.getMoves():
return True
return False
def seleccionarMovimiento(pos, tablero, jugador):
print("\nTablero: ", pos)
#tablero.tablero[pos].mostrarTablero()
print(tablero.tablero[pos].mostrarTablero())
print('\nMovimientos disponibles: ', tablero.tablero[pos].getMoves())
coordenada = input('Seleccione la el movimiento que desea realizar(x y): ')
posicion = coordenada.split(' ')
posicion = (int(posicion[0]), int(posicion[1])) #recibe la posicion
while not validarJugada(pos, tablero.tablero[pos]):
print('Por favor, digite un movimiento correspondiente')
print('\nMovimientos disponibles: ', tablero[pos].getMoves())
coordenada = input('Seleccione la el movimiento que desea realizar(x y): ')
posicion = coordenada.split(' ')
posicion = (int(posicion[0]), int(posicion[1])) #recibe la posicion
tablero.tablero[pos].insertarMarca(posicion, jugador['marca'])
return tablero, posicion
# implementación
def turnoJugador(jugador):
pass
# implementación
def turnoMaquina(maquina):
pass
if __name__ =='__main__':
system('clear')
#primero se crea un tablero
tablero = SuperTable()
tablero.mostrarTablero()
print('Numero de movimientos: ',tablero.numeroMovimientos())
jugadores = [{'nombre':'Eduardo','movimientos':[], 'marca':'X','tipo':'normal'},{'nombre':'bot','movimientos':[],'marca':'O','tipo':'IA'}]
#generarTurnoAleatorio
jugador = jugadores[np.random.randint(len(jugadores))]
opcion = int(input('Seleccione un tablero(1:9): '))
#valida la opción
while (opcion-1 <0) or (opcion-1>8):
print('Por favor, seleccione un tablero: ')
opcion = int(input('Seleccione un tablero(1:9): '))
#posición del tablero
pos = obtenerTablero(opcion, tablero)
tablero, pos = seleccionarMovimiento(pos, tablero, jugador)
tablero.mostrarTablero()
| true |
52de409311d4836172986571fec8825b316f644d
|
Python
|
mithem/serverly
|
/test_utils.py
|
UTF-8
| 6,154 | 3 | 3 |
[
"MIT"
] |
permissive
|
import pytest
import serverly.utils
from serverly.utils import *
def test_parse_role_hierarchy():
e1 = {
"normal": "normal",
"admin": "normal"
}
e2 = {
"normal": "normal",
"admin": "normal",
"staff": "admin",
"root": "staff",
"god": "staff",
}
e3 = {
"normal": "normal",
"admin": "normal",
"staff": "normal",
"root": {"admin", "staff"},
"god": "root"
}
r1 = parse_role_hierarchy(e1)
r2 = parse_role_hierarchy(e2)
r3 = parse_role_hierarchy(e3)
assert r1 == {"normal": {"normal"}, "admin": {"normal"}}
assert r2 == {"normal": {"normal"}, "admin": {"normal"}, "staff": {
"admin", "normal"}, "root": {"admin", "normal", "staff"}, "god": {"admin", "normal", "staff"}}
assert r3 == {"normal": {"normal"}, "admin": {"normal"},
"staff": {"normal"}, "root": {"admin", "normal", "staff"}, "god": {"admin", "normal", "staff", "root"}}
def test_ranstr():
s = []
for _ in range(10000):
r = ranstr()
assert len(r) == 20
assert not r in s
s.append(r)
def test_guess_response_headers():
c1 = "<html lang='en_US'><h1>Hello World!</h1></html>"
h1 = {"content-type": "text/html"}
assert guess_response_headers(c1) == h1
c2 = "Hello there!"
h2 = {"content-type": "text/plain"}
assert guess_response_headers(c2) == h2
c3 = {"hello": True}
h3 = {"content-type": "application/json"}
assert guess_response_headers(c3) == h3
c4 = {"id": 1, "password": "totallyhashed",
"salt": "totallyrandom", "username": "oh yeah!"}
h4 = {"content-type": "application/json"}
assert guess_response_headers(c4) == h4
c5 = open("test_utils.py", "r")
h5 = {"content-type": "text/x-python"}
assert guess_response_headers(c5) == h5
c5.close()
c6 = open("temporary.notanactualfilenamesomimetypescantguessit", "w+")
h6 = {"content-type": "text/plain"}
assert guess_response_headers(c6) == h6
c6.close()
os.remove("temporary.notanactualfilenamesomimetypescantguessit")
c7 = bytes("hello world", "utf-8")
h7 = {"content-type": "application/octet-stream"}
assert guess_response_headers(c7) == h7
def test_get_server_address():
valid = ("localhost", 8080)
assert get_server_address(("localhost", 8080)) == valid
assert get_server_address("localhost,8080") == valid
assert get_server_address("localhost, 8080") == valid
assert get_server_address("localhost;8080") == valid
assert get_server_address("localhost; 8080") == valid
assert get_server_address("localhost:8080") == valid
assert get_server_address("localhost::8080") == valid
assert get_server_address("localhost|8080") == valid
assert get_server_address("localhost||8080") == valid
def test_get_server_address_2():
valid = ("localhost", 20000)
typy_errory = [True, {"hostname": "localhost", "port": 20000}, 42]
value_errory = [(True, "localhost"), ("whats", "up"), (42, 3.1415926535)]
assert get_server_address((20000, "localhost")) == valid
for i in typy_errory:
with pytest.raises(TypeError):
get_server_address(i)
for i in value_errory:
with pytest.raises(ValueError):
get_server_address(i)
def test_get_server_address_3():
valid = ("localhost", 8080)
with pytest.raises(Exception):
with pytest.warns(UserWarning):
serverly.Server._get_server_address((8080, "local#ost"))
def test_check_relative_path():
falsy_values = ["hello", "whatsupp", ""]
typy_errors = [bytes("hello there", "utf-8"),
open("test_utils.py", "r"), True, 23.7]
goodish_ones = ["/hello", "/hello-world", "/whatss/up"]
for i in falsy_values:
with pytest.raises(ValueError):
check_relative_path(i)
for i in typy_errors:
with pytest.raises(TypeError):
check_relative_path(i)
for i in goodish_ones:
assert check_relative_path(i)
def test_check_relative_file_path():
with pytest.raises(FileNotFoundError):
check_relative_file_path(
"definetelynotafile.definetelynotafile.definetelynotafile!")
bad_ones = [True, open("test_utils.py", "r"), 42]
for i in bad_ones:
with pytest.raises(TypeError):
check_relative_file_path(i)
assert check_relative_file_path("test_utils.py") == "test_utils.py"
def test_get_http_method_type():
false_ones = ["GETT", "PooST", "puUT", "DEL", "del",
"head", "CONNECT", "options", "TRACE", "patch"]
good_ones = {"GET": "get", "PoSt": "post",
"Put": "put", "DelEtE": "delete"}
for i in false_ones:
with pytest.raises(ValueError):
get_http_method_type(i)
for k, v in good_ones.items():
assert get_http_method_type(k) == v
def test_parse_scope_list():
assert parse_scope_list("hello;world;whatsup") == [
"hello", "world", "whatsup"]
assert parse_scope_list("19;") == ['19']
assert parse_scope_list("42;1829;sajki;") == ["42", "1829", "sajki"]
assert parse_scope_list("") == []
def test_get_scope_list():
assert get_scope_list("admin") == "admin"
assert get_scope_list(["admin", "financial"]) == "admin;financial"
assert get_scope_list("") == ""
def test_get_chunked_response():
r = serverly.objects.Response(body="Hello world")
assert get_chunked_response(r) == ["Hello world"]
r.bandwidth = 4
assert get_chunked_response(r) == ["Hell", "o wo", "rld"]
def test_lowercase_dict():
d = {"Hello World": True, "WhatssUpp": "Yoo"}
assert lowercase_dict(d) == {"hello world": True, "whatssupp": "Yoo"}
assert lowercase_dict(d, True) == {"hello world": True, "whatssupp": "yoo"}
def test_get_bytes():
assert get_bytes("hello world") == bytes("hello world", "utf-8")
assert get_bytes(
"hello world", "application/octet-stream") == b"hello world"
assert get_bytes(
{"helele": 42}, "application/octet-stream") == {"helele": 42}
assert get_bytes(True) == True
| true |
e3c75609405865a1b44c1a4c295c56e6027268a9
|
Python
|
coy0725/leetcode
|
/python/405_Convert_a_Number_to_Hexadecimal.py
|
UTF-8
| 848 | 3.03125 | 3 |
[
"MIT"
] |
permissive
|
class Solution(object):
def toHex(self, num):
"""
:type num: int
:rtype: str
"""
if num == 0:
return '0'
# letter map
mp = '0123456789abcdef'
ans = ''
for _ in range(8):
# get last 4 digits
# num & 1111b
n = num & 15
# hex letter for current 1111
c = mp[n]
ans = c + ans
# num = num / 16
num = num >> 4
#strip leading zeroes
return ans.lstrip('0')
# def toHex(self, num):
# def tohex(val, nbits):
# return hex((val + (1 << nbits)) % (1 << nbits))
# return tohex(num, 32)[2:]
# def toHex(self, num, h=''):
# return (not num or h[7:]) and h or self.toHex(num / 16, '0123456789abcdef'[num % 16] + h)
| true |
0c1c2070bd92dca3273bc5db8f336d924f16755a
|
Python
|
lollyxsrinand/ChristmasGame
|
/main.py
|
UTF-8
| 3,190 | 3.4375 | 3 |
[] |
no_license
|
import math
from random import randint
import pygame as pg
from pygame import mixer as mx
""" INITIALISING PYGAME """
pg.init()
""" CREAITNG SCREEN """
screen = pg.display.set_mode((800, 600))
""" BACKGROUND MUSIC """
# mx.music.load('lofi_background.wav')
# mx.music.set_volume(0.8)
# mx.music.play(-1)
background_music = mx.Sound("lofi_background.wav")
background_music.set_volume(0.8)
background_music.play()
""" TITLE """
pg.display.set_caption("ChristmasGame")
""" CREATING BACKGROUND IMAGE """
background = pg.image.load('bg.jpg')
""" CREATING PLAYER """
playerImg = pg.image.load("player.png")
playerX = 52
playerY = 5
playerX_change = 0
playerY_change = 0
""" CREATING CANDY """
candy = pg.image.load("candy.png")
candyX = randint(0,750)
candyY = randint(0,550)
score = 0
font = pg.font.Font("freesansbold.ttf",32)
over_text = pg.font.Font("freesansbold.ttf",64)
time_left = pg.font.Font("freesansbold.ttf",32)
""" SHOWING SCORE """
def show_score():
score_text = font.render(f"Score : {score}",True, (255,255,255))
screen.blit(score_text,(10,10))
""" SHOWING DEATH SCREEN """
def show_death(score):
won = score>=15
# if not done:win_music()
text = "You Won!" if won else "You lose"
over_text = font.render(text, True, (255,255,255))
screen.blit(over_text, (300,300))
""" UPDATING THE PLAYER POSITION """
def player(x, y):
screen.blit(playerImg,(x, y))
ticks = pg.time.get_ticks()
player_alive = True
""" GAME LOOP """
running = True
while running:
screen.fill((0, 0, 0)) #FILLING BACKGROUND WITH BLACK, CHANGING THIS SPOILS EVERYTHING
screen.blit(background,((0,0)))
screen.blit(candy, (candyX,candyY))
""" COLLISION """
if math.sqrt((candyX-playerX)**2+(candyY-playerY)**2)<=40:
candyX=randint(0,750)
candyY=randint(0,550)
score += 1
# print(score)
""" O(n^2) stuff """
for event in pg.event.get():
if player_alive or event.type == pg.QUIT:
if event.type == pg.KEYDOWN:
if event.key == pg.K_SPACE:
playerY_change=-0.4
if event.key == pg.K_LEFT:
playerX_change = -0.5
if event.key == pg.K_RIGHT:
playerX_change = 0.5
if event.type == pg.KEYUP:
if event.key == pg.K_LEFT or event.key == pg.K_RIGHT:
playerX_change = 0
""" QUITTING GAME ON EXIT BUTTON """
if event.type == pg.QUIT:running = False
""" FAKE GRAVITY """
playerY_change+=0.0009
playerY+=playerY_change
if playerY>=540:playerY=540
if playerY<=5:playerY=5
""" MOVING LEFT OR RIGHT """
playerX+=playerX_change
if playerX<=0:
playerX=0
elif playerX>=736:
playerX=736
show_score()
""" CHANGING POSITION OF PLYER"""
player(playerX, playerY)
seconds = (pg.time.get_ticks()-ticks)/1000
if seconds>20:
player_alive = False
show_death(score)
pg.display.update()
| true |
57d5f1d8de021f5a9aee01fb1af5d80bb2bf811d
|
Python
|
ddannenb/sentence-transformers
|
/examples/training_quora_duplicate_questions/application_Information_Retrieval.py
|
UTF-8
| 3,060 | 3.234375 | 3 |
[
"Apache-2.0"
] |
permissive
|
"""
This is an interactive demonstration for information retrieval. We will encode a large corpus with 500k+ questions.
This is done once and the result is stored on disc.
Then, we can enter new questions. The new question is encoded and we perform a brute force cosine similarity search
and retrieve the top 5 questions in the corpus with the highest cosine similarity.
For larger datasets, it can make sense to use a vector index server like https://github.com/spotify/annoy or https://github.com/facebookresearch/faiss
"""
from sentence_transformers import SentenceTransformer, util
import os
from zipfile import ZipFile
import pickle
import time
model_name = 'distilbert-base-nli-stsb-quora-ranking'
embedding_cache_path = 'quora-embeddings-{}.pkl'.format(model_name.replace('/', '_'))
max_corpus_size = 100000
model = SentenceTransformer(model_name)
#Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
dataset_path = 'quora-IR-dataset'
if not os.path.exists(dataset_path):
print("Dataset not found. Download")
zip_save_path = 'quora-IR-dataset.zip'
util.http_get(url='https://public.ukp.informatik.tu-darmstadt.de/reimers/sentence-transformers/datasets/quora-IR-dataset.zip', path=zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(dataset_path)
corpus_sentences = []
with open(os.path.join(dataset_path, 'graph/sentences.tsv'), encoding='utf8') as fIn:
next(fIn) #Skip header
for line in fIn:
qid, sentence = line.strip().split('\t')
corpus_sentences.append(sentence)
if len(corpus_sentences) >= max_corpus_size:
break
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({'sentences': corpus_sentences, 'embeddings': corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data['sentences'][0:max_corpus_size]
corpus_embeddings = cache_data['embeddings'][0:max_corpus_size]
###############################
print("Corpus loaded with {} sentences / embeddings".format(len(corpus_sentences)))
while True:
inp_question = input("Please enter a question: ")
start_time = time.time()
question_embedding = model.encode(inp_question)
hits = util.information_retrieval(question_embedding, corpus_embeddings)
end_time = time.time()
hits = hits[0] #Get the hits for the first query
print("Input question:", inp_question)
print("Results (after {:.3f} seconds):".format(end_time-start_time))
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit['score'], corpus_sentences[hit['corpus_id']]))
print("\n\n========\n")
| true |
f07a84f01826b5b7d196bcedeaf3f7cfc1802d30
|
Python
|
WolfireGames/overgrowth
|
/Libraries/freetype-2.12.1/builds/meson/extract_freetype_version.py
|
UTF-8
| 2,997 | 3 | 3 |
[
"FTL",
"GPL-1.0-or-later",
"BSD-3-Clause",
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"GPL-3.0-only",
"LicenseRef-scancode-unknown",
"Zlib",
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
#!/usr/bin/env python3
#
# Copyright (C) 2020-2022 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used, modified,
# and distributed under the terms of the FreeType project license,
# LICENSE.TXT. By continuing to use, modify, or distribute this file you
# indicate that you have read the license and understand and accept it
# fully.
"""Extract the FreeType version numbers from `<freetype/freetype.h>`.
This script parses the header to extract the version number defined there.
By default, the full dotted version number is printed, but `--major`,
`--minor` or `--patch` can be used to only print one of these values
instead.
"""
from __future__ import print_function
import argparse
import os
import re
import sys
# Expected input:
#
# ...
# #define FREETYPE_MAJOR 2
# #define FREETYPE_MINOR 10
# #define FREETYPE_PATCH 2
# ...
RE_MAJOR = re.compile(r"^ \#define \s+ FREETYPE_MAJOR \s+ (.*) $", re.X)
RE_MINOR = re.compile(r"^ \#define \s+ FREETYPE_MINOR \s+ (.*) $", re.X)
RE_PATCH = re.compile(r"^ \#define \s+ FREETYPE_PATCH \s+ (.*) $", re.X)
def parse_freetype_header(header):
major = None
minor = None
patch = None
for line in header.splitlines():
line = line.rstrip()
m = RE_MAJOR.match(line)
if m:
assert major == None, "FREETYPE_MAJOR appears more than once!"
major = m.group(1)
continue
m = RE_MINOR.match(line)
if m:
assert minor == None, "FREETYPE_MINOR appears more than once!"
minor = m.group(1)
continue
m = RE_PATCH.match(line)
if m:
assert patch == None, "FREETYPE_PATCH appears more than once!"
patch = m.group(1)
continue
assert (
major and minor and patch
), "This header is missing one of FREETYPE_MAJOR, FREETYPE_MINOR or FREETYPE_PATCH!"
return (major, minor, patch)
def main():
parser = argparse.ArgumentParser(description=__doc__)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--major",
action="store_true",
help="Only print the major version number.",
)
group.add_argument(
"--minor",
action="store_true",
help="Only print the minor version number.",
)
group.add_argument(
"--patch",
action="store_true",
help="Only print the patch version number.",
)
parser.add_argument(
"input",
metavar="FREETYPE_H",
help="The input freetype.h header to parse.",
)
args = parser.parse_args()
with open(args.input) as f:
header = f.read()
version = parse_freetype_header(header)
if args.major:
print(version[0])
elif args.minor:
print(version[1])
elif args.patch:
print(version[2])
else:
print("%s.%s.%s" % version)
return 0
if __name__ == "__main__":
sys.exit(main())
| true |
b5043ccef979bc25c293b102dbcd993d3c1b5ef5
|
Python
|
Maxpa1n/modules_pytorch
|
/models/MAML-wave/learnmodel.py
|
UTF-8
| 1,275 | 2.796875 | 3 |
[] |
no_license
|
import torch
import torch.nn as nn
from torch.nn import functional as F
class Compute(nn.Module):
def __init__(self, hid_dim):
super(Compute, self).__init__()
self.input_layer = nn.Linear(1, hid_dim)
# self.hid_layer = nn.Linear(hid_dim, hid_dim)
self.output_layer = nn.Linear(hid_dim, 1)
self.relu = nn.ReLU()
def forward(self, X):
hid = self.relu(self.input_layer(X))
# hid = self.relu(self.hid_layer(hid))
output = self.output_layer(hid)
return output
class Learner(nn.Module):
def __init__(self, hid_dim):
super().__init__()
self.com = Compute(hid_dim)
# self.com_temp = Compute(hid_dim)
def forward(self, x, com=None):
if com is not None:
x = F.linear(x, com[0], com[1])
# x = F.linear(x, com[2], com[3])
y = F.linear(x, com[2], com[3])
return y
else:
y = self.com(x)
return y
if __name__ == '__main__':
x = torch.randn(25, 1)
com = Compute(64)
lea = Learner(64)
para_dic = dict()
for key, val in lea.named_parameters():
para_dic[key] = val
print(key, val.grad)
y = lea(x, com)
print('output shape {}'.format(y.shape))
| true |
8e779cf6391028a37be8cb20c5b01c587ab0362c
|
Python
|
MohammadAsif206/BankAPI-Pzero
|
/ProjectZero/services/account_service.py
|
UTF-8
| 1,366 | 2.765625 | 3 |
[] |
no_license
|
from abc import ABC, abstractmethod
from entities.account import Account
class AccountService(ABC):
# General CRUD functionality
@abstractmethod
def create_account_by_customer_id(self, account: Account, customer_id: int):
pass
@abstractmethod
def retrieve_all_accounts_by_cid(self, customer_id: int) -> list[Account]:
pass
@abstractmethod
def retrieve_account_by_cid_and_balance_range(self, customer_id: int, lower_limit: float, upper_limit: float) ->[Account]:
pass
@abstractmethod
def retrieve_account_by_cid_and_aid(self, customer_id: int, account_number: int) ->Account:
pass
@abstractmethod
def update_account_by_cid_and_aid(self, account: Account, customer_id: int, account_number: int) -> Account:
pass
@abstractmethod
def delete_account_by_cid_and_aid(self, customer_id: int, account_number: int) -> bool:
pass
@abstractmethod
def do_trans_on_account_by_cid_and_aid(self, customer_id: int, account_number: int,
withdraw: float, deposit: float) -> Account:
pass
@abstractmethod
def transfer_fund_between_account_of_a_client_by_aids(self, s_account_number: int, r_account_number: int,
amount: float) -> [Account]:
pass
| true |
8def9170ec61069e564024dd50482b1f999e365d
|
Python
|
rosechellejoy/cmsc128-ay2015-16-assign001-py
|
/oraa_pa.py
|
UTF-8
| 7,813 | 3.484375 | 3 |
[] |
no_license
|
"""
Rosechelle Joy C. Oraa
2013-11066
CMSC 128 AB-3L
"""
import sys
"""
numToWords() accepts an input number and outputs its equivalent in words
temp_num : input by the user
"""
def numToWords(temp_num):
if len(temp_num)>7: #if number inputed is greater than 7 digits: invalid
print 'Invalid: input can only have at most 7 digits'
return
str_num = '' #word equivalent of temp_num
length = len(temp_num) #input length
pos =0 #current position
j=0
str_form = ''
for i in range(6, -1, -1): #places input value in an array of 7 elements(for each digit)
if i>length-1: #if current length is less than current max number of digits
str_num=str_num+'0'
else:
while j!=length: #while input is not fully transferred to str_num
str_num=str_num+temp_num[j]
j=j+1
x=str_num #holds input in its 7 digit representation
while pos < 7 :
if pos == 4 and (x[pos-1]!='0' or x[pos-2]!='0' or x[pos-3]!='0') and length>3:
str_form = str_form+'thousand ' #if at 4th pos and 3 previous digits are not == 0
if x[pos]=='0': #if number is 0
if pos == 6 and length == 1:
str_form = str_form + 'zero'
elif pos != 2 and pos != 5: #ones digit
if x[pos]=='1':
str_form = str_form+'one '
elif x[pos]=='2':
str_form=str_form+'two '
elif x[pos]=='3':
str_form=str_form+'three '
elif x[pos] =='4':
str_form=str_form+'four '
elif x[pos]=='5':
str_form=str_form+'five '
elif x[pos]=='6':
str_form=str_form+'six '
elif x[pos]=='7':
str_form=str_form+'seven '
elif x[pos]=='8':
str_form=str_form+'eight '
elif x[pos]=='9':
str_form=str_form+'nine '
if pos == 0:
str_form = str_form+'million '
elif pos == 1 or pos == 4:
str_form = str_form+'hundred '
else: #tens digit
if pos == 2 or pos == 5:
if x[pos]== '1':
pos=pos+1
if x[pos]== '0':
str_form = str_form+'ten '
elif x[pos]== '1':
str_form = str_form+'eleven '
elif x[pos]== '2':
str_form = str_form+'twelve '
elif x[pos]== '3':
str_form = str_form+'thirteen '
elif x[pos]== '4':
str_form = str_form+'fourteen '
elif x[pos]== '5':
str_form = str_form+'fifteen '
elif x[pos]== '6':
str_form = str_form+'sixteen '
elif x[pos]== '7':
str_form = str_form+'seventeen '
elif x[pos]== '8':
str_form = str_form+'eighteen '
elif x[pos]== '9':
str_form = str_form+'nineteen '
elif x[pos]== '2':
str_form = str_form+'twenty '
elif x[pos]== '3':
str_form = str_form+'thirty '
elif x[pos]== '4':
str_form = str_form+'forty '
elif x[pos]== '5':
str_form = str_form+'fifty '
elif x[pos]== '6':
str_form = str_form+'sixty '
elif x[pos]== '7':
str_form = str_form+'seventy '
elif x[pos]== '8':
str_form = str_form+'eighty '
elif x[pos]== '9':
str_form = str_form+'ninety '
if pos == 2 or pos == 5:
pos= pos+1
if x[pos]=='1': #single digit after tens
str_form = str_form+'one '
elif x[pos]=='2':
str_form=str_form+'two '
elif x[pos]=='3':
str_form=str_form+'three '
elif x[pos] =='4':
str_form=str_form+'four '
elif x[pos]=='5':
str_form=str_form+'five '
elif x[pos]=='6':
str_form=str_form+'six '
elif x[pos]=='7':
str_form=str_form+'seven '
elif x[pos]=='8':
str_form=str_form+'eight '
elif x[pos]=='9':
str_form=str_form+'nine '
pos = pos+1 #increment pos
print str_form #print word representation
return
"""
wordsToNum() accepts word(s) then prints its numerical equivalent
word - string input
"""
def wordsToNum(word):
word = word.split() #word- list of words from word
gen_num = 0 #total value
temp = 0 #current integer
mill_num=0 #total million value of word
hund_thou = 0 #total hundred thousand value of word
hund = 0 #total hundred value of word
wLen = len(word)# number of words in word
flag=0 # is equal to 1 if there should be no more thousands
i=0
while i < wLen: #iterates through each word in word(list)
if word[i] == 'one':
temp+=1
elif word[i] == 'two':
temp+=2
elif word[i] == 'three':
temp+=3
elif word[i] == 'four':
temp+=4
elif word[i] == 'five':
temp+=5
elif word[i] == 'six':
temp+=6
elif word[i] == 'seven':
temp+=7
elif word[i] == 'eight':
temp+=8
elif word[i] == 'nine':
temp+=9
elif word[i] == 'ten':
temp += 10
elif word[i] == 'eleven':
temp += 11
elif word[i] == 'twelve':
temp += 12
elif word[i] == 'thirteen':
temp += 13
elif word[i] == 'fourteen':
temp += 14
elif word[i] == 'fifteen':
temp += 15
elif word[i] == 'sixteen':
temp += 16
elif word[i] == 'seventeen':
temp += 17
elif word[i] == 'eighteen':
temp += 18
elif word[i] == 'nineteen':
temp += 19
elif word[i] == 'twenty':
temp += 20
elif word[i] == 'thirty':
temp += 30
elif word[i] == 'forty':
temp += 40
elif word[i] == 'fifty':
temp += 50
elif word[i] == 'sixty':
temp += 60
elif word[i] == 'seventy':
temp += 70
elif word[i] == 'eighty':
temp += 80
elif word[i] == 'ninety':
temp += 90
elif word[i] == 'million': #multiply previous number(temp) to 1000000
mill_num= temp*1000000 #place in mill_num
temp=0
elif word[i] == 'hundred': #multiply value in temp to 100
temp= temp*100
elif word[i] == 'thousand': #multiply hund to 1000 then place in hund_thou
hund_thou = hund*1000
hund=0
temp=0
hund = temp;
i+=1 #increment i then next iteration
gen_num= mill_num+hund_thou+hund #gen_num= accumulated value of millions, hundred thousands, and hundreds
print gen_num #print total number
return
"""
wordsToCurrency() accepts two inputs then generates string in number form with given currency
word - number in words inputed by the user
cur- currency given by the user
"""
def wordsToCurrency(word, cur):
if cur=='USD' or cur=='JPY' or cur == 'PHP': #checks if currency given is valid
sys.stdout.write(cur) #print currency
wordsToNum(word) #print word in its numerical value
else:
print 'Invalid!'
return
"""
numberDelimitered() accepts three inputs then prints the number with a delimiter in the position given by the user
temp - number
delimiter - delimiter given by the user
jumps - # of jumps from the right
"""
def numberDelimitered(temp, delimiter, jumps):
temp= str(temp) #typecast temp to a string
rev='' #will hold temp in reverse
i=0
if len(temp) > 7:
print 'Invalid!: exceeded max no. of digits'
return
for i in range(0, len(temp)): #reverse number input
rev=temp[i]+rev
temp=''
for i in range(0, len(rev)): #iterates through all digits in rev
if jumps== i: #if i == jumps
temp= delimiter+temp #concatenate delimiter with temp
temp= rev[i]+temp #concatenate rev[i] with temp
print temp
return
"""
prints menu and lets the user choose feature to be used
"""
print 'MENU'
print '[1] Number to Word'
print '[2] Word to Number'
print '[3] Word to Currency'
print '[4] Number Delimitered '
ch = input('choice: ')
if(ch==1): #number to words
temp_num = raw_input('Enter number: ')
numToWords(temp_num);
elif(ch==2): #word to number
word= raw_input("Enter input: ")
wordsToNum(word);
elif(ch==3): #number to currency
word= raw_input("Enter number in words: ")
cur= raw_input("Enter currency: ")
wordsToCurrency(word, cur)
elif(ch==4): #number delimitered
temp = raw_input('Enter number: ')
delimiter = raw_input('Enter delimiter: ')
jumps = input('Enter # of jumps: ')
numberDelimitered(temp, delimiter, jumps);
else:
print 'Invalid!'
| true |
499c8883c63e328da19afada8b240fd244c777d8
|
Python
|
tushargupta14/compositional_semantics
|
/create_fastText_dict2.py
|
UTF-8
| 2,091 | 2.78125 | 3 |
[] |
no_license
|
import json
from collections import defaultdict
def create_dictionaries(path_to_files):
print "Creating Dictionaries"
count = 0
source_fastText_dict = defaultdict(list)
with open(path_to_files+"source_fastText_output.txt","r") as source_file:
for line in source_file :
vector = []
vector = [value for value in line.rstrip().split(" ")]
source_word = vector[0]
source_fastText_dict[source_word] = vector[1:]
count+=1
print count,len(vector)
derived_fastText_dict = defaultdict(list)
count =0
with open(path_to_files+"derived_fastText_output.txt","rb+") as source_file:
for line in source_file :
vector = []
vector = [value for value in line.rstrip().split(" ")]
derived_word = vector[0]
derived_fastText_dict[derived_word] = vector[1:]
count+=1
print count, len(vector)
affix_fastText_dict = defaultdict(list)
count = 0
with open(path_to_files+"affix_fastText_output.txt","rb+") as source_file:
for line in source_file :
vector = []
vector = [value for value in line.rstrip().split(" ")]
affix_word = vector[0]
affix_fastText_dict[affix_word] = vector[1:]
count+=1
print count, len(vector)
with open(path_to_files+"source_fastText_350dim.json","wb+") as f:
json.dump(source_fastText_dict,f)
with open(path_to_files+"derived_fastText_350dim.json","wb+") as f:
json.dump(derived_fastText_dict,f)
if __name__ == "__main__" :
create_dictionaries("/home/du3/13CS30045/affix_final/lazaridou/create_vectors/data_files/")
| true |
a819b7c9b02372e48784b4ddced181e7c151cb7b
|
Python
|
jack-mcivor/afl-predictor
|
/afl/models.py
|
UTF-8
| 4,931 | 3.0625 | 3 |
[] |
no_license
|
from collections import defaultdict
from math import exp, log
import pandas as pd # optional
class Elo:
"""Base class to generate elo ratings
Includes the ability for some improvements over the original methodology:
* k decay: use a higher update speed early in the season
* crunch/carryover: shift every team's ratings closer to the mean between seasons
* interstate and home_advantage
* optimised initial ratings
Hyperparameters can be fit with a grid search, eg. sklearn.model_selection.GridSearchCV
Initial ratings can be fit with a logistic regression (equivalent to a static elo) eg. sklearn.linear_model.LogisticRegression
By default assumes a logistic distribution of ratings
"""
def __init__(self, k=30, home_advantage=20, interstate_advantage=5, width=400/log(10), carryover=0.75, k_decay=0.95,
initial_ratings=None, mean_rating=1500, target='home_win_draw_loss'):
self.k = k
self.home_advantage = home_advantage
self.interstate_advantage = interstate_advantage
self.width = width
self.carryover = carryover
self.k_decay = k_decay
self.mean_rating = mean_rating
self.initial_ratings = initial_ratings or {}
self.target = target # home_win_draw_loss, home_points_ratio, home_squashed_margin
def iterate_fixtures(self, fixtures, as_dataframe=True):
"""
Parameters
----------
fixtures : list of dict or pd.DataFrame
Must be ordered. Each record (row) must have (columns): home_team, away_team, round_number, is_interstate, <self.target>
Prefer a list of records as it's much faster
We use the python stdlib math.exp which seems faster in single computation than numpy's version and therefore speeds up parameter fitting
Profile code with lprun:
%load_ext line_profiler
elo = Elo()
%lprun -f elo.iterate_fixtures elo.iterate_fixtures(fxtrain, as_dataframe=True)
"""
# new teams are given self.initial_ratings
self.current_ratings_ = defaultdict(lambda: self.mean_rating, self.initial_ratings)
if isinstance(fixtures, pd.DataFrame):
# A list of records is faster and less prone to errors on update than a DataFrame
fixtures = fixtures.reset_index().to_dict('records')
for fx in fixtures:
home_team = fx['home_team']
away_team = fx['away_team']
home_actual_result = fx[self.target]
round_number = fx['round_number']
is_interstate = fx['is_interstate']
# home_expected_result = self.predict_result(home_team, away_team, is_interstate, round_number)
# -------
home_rating_pre = self.current_ratings_[home_team]
away_rating_pre = self.current_ratings_[away_team]
if round_number == 1:
# Crunch the start of the season
# Warning: this will make an in-place change the current ratings for the end of season
# TODO: don't crunch the first round of training
home_rating_pre = self.carryover*home_rating_pre + (1-self.carryover)*self.mean_rating
away_rating_pre = self.carryover*away_rating_pre + (1-self.carryover)*self.mean_rating
ratings_diff = home_rating_pre - away_rating_pre + self.home_advantage + self.interstate_advantage*is_interstate
home_expected_result = 1.0 / (1 + exp(-ratings_diff/self.width))
# self.update_ratings(home_actual_result, home_expected_result, round_number)
# ------
change_in_home_elo = self.k*self.k_decay**round_number*(home_actual_result - home_expected_result)
home_rating_post = home_rating_pre + change_in_home_elo
away_rating_post = away_rating_pre - change_in_home_elo
# update ratings
self.current_ratings_[home_team] = home_rating_post
self.current_ratings_[away_team] = away_rating_post
fx['home_rating_pre'] = home_rating_pre
fx['away_rating_pre'] = away_rating_pre
fx['home_expected_result'] = home_expected_result # proba
# fx['binary_expected_home_result'] = int(expected_home_result > 0.5) # prob
if as_dataframe:
# return pd.DataFrame(fixtures, columns=['matchid', 'home_expected_result']).set_index('matchid')
return pd.DataFrame(fixtures).set_index('matchid')
return fixtures
def fit(self, X):
# the only thing we really need to store is the *latest* rating (the system is memoryless)
# self.teams_ = ['myteam']
# self.current_ratings_ = {'myteam': 1500}
return X
def predict_proba(self):
return expected_home_result
def predict(self):
return int(expected_home_result > 0.5)
| true |
48f064838cbf993b4e54813f86f3344080368bf9
|
Python
|
noahwang07/python_start
|
/fresher_class.py
|
UTF-8
| 419 | 3.734375 | 4 |
[] |
no_license
|
class Human(object):
def __init__(self, name):
self.name = name
def walk(self):
print (self.name + " is walking")
def get_name(self):
return (self. name)
def set_name(self, name):
if len(name) <= 10:
self.name = name
human_a = Human("alan")
print (human_a.name)
human_a.set_name('bob')
print(human_a.name)
xy = Human('noah')
print (xy.name)
xy.walk()
xy.set_name('nova')
xy.walk()
| true |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 109