Форум сайта python.su
def getPixel(alt, data): try: x, y = data except TypeError as e: print("Error: Wrong x, y data in getPixel") return None hwnd = alt.hwnd # win32gui.InvalidateRect(hwnd, None, True) # win32gui.UpdateWindow(hwnd) hdc = win32gui.GetDC(hwnd) if hdc == 0: print("Error: cant get HDC") color = None try: color = win32gui.GetPixel(int(x), int(y)) except Exception as e: print(f"Error, cant get color: {e}") win32gui.ReleaseDC(hwnd, hdc) color = rgba(color) return color
hdc_window = win32gui.GetDC(hwnd) hdc_mem = win32ui.CreateDCFromHandle(win32gui.CreateCompatibleDC(hdc_window)) bmp = win32ui.CreateBitmap() bmp.CreateCompatibleBitmap(win32ui.CreateDCFromHandle(hdc_window), width, height) hdc_mem.SelectObject(bmp) hdc_mem.BitBlt((0, 0), (width, height), win32ui.CreateDCFromHandle(hdc_window), (0, 0), win32con.SRCCOPY) bmp_info = bmp.GetInfo() bmp_str = bmp.GetBitmapBits(True) img = Image.frombuffer('RGB', (bmp_info['bmWidth'], bmp_info['bmHeight']), bmp_str, 'raw', 'BGRX', 0, 1) win32gui.ReleaseDC(hwnd, hdc_window) hdc_mem.DeleteDC() bmp_handle = bmp.GetHandle() win32gui.DeleteObject(bmp_handle)
DWM_BLURBEHIND = ctypes.Structure DWMWA_USE_IMMERSIVE_DARK_MODE = 20 DWMWA_FORCE_ICONIC_REPRESENTATION = 7 DWMWA_CLOAKED = 14 user32 = ctypes.windll.user32 dwmapi = ctypes.windll.dwmapi def disable_dcomp(hwnd): value = wintypes.BOOL(False) dwmapi.DwmSetWindowAttribute(hwnd, DWMWA_USE_IMMERSIVE_DARK_MODE, ctypes.byref(value), ctypes.sizeof(value)) value = ctypes.c_int(0) # DirectComposition dwmapi.DwmSetWindowAttribute(hwnd, DWMWA_CLOAKED, ctypes.byref(value), ctypes.sizeof(value)) def disableGDI(alt, data): DWMWA_FORCE_ICONIC_REPRESENTATION = 7 DWMWA_CLOAKED = 14 hwnd = alt.hwnd value = ctypes.c_int(0) # Отключаем DirectComposition dwmapi.DwmSetWindowAttribute(hwnd, DWMWA_CLOAKED, ctypes.byref(value), ctypes.sizeof(value))
# From https://www.kaggle.com/code/freacle/part-1 # В исходной статье https://habr.com/ru/articles/869118/ # Очень много кусков программ с объяснениями import numpy as np Parameter = None def ParameterObj(): class Parameter: layers = [] calling = dict() def __init__(self, info): Parameter.layers.append(info[0]) Parameter.calling[info[0]] = info[1:] return Parameter class Module: def __init__(self): self._constructor_Parameter = ParameterObj() global Parameter Parameter = self._constructor_Parameter def forward(self): pass def __call__(self, x): return self.forward(x) def parameters(self): return self class Linear: def __init__(self, input_channels: int, output_channels: int, bias = True): self.input_channels = input_channels self.output_channels = output_channels self.bias = bias self.backward_list = [] if bias: Parameter([self, np.random.uniform(- 0.5, 0.5, size=(self.input_channels, self.output_channels)), np.random.uniform(- 0.5, 0.5, size=self.output_channels)]) else: Parameter([self, np.random.uniform(- 0.5, 0.5, size=(self.input_channels, self.output_channels)), np.zeros(self.output_channels)]) def __call__(self, x): self.x = np.array(x, copy=True) result = x @ Parameter.calling[self][0] + Parameter.calling[self][1] return result def backward(self, input_matrix): x_gradient = input_matrix @ self.weight.T self.weight_gradient = self.x.T @ input_matrix self.bias_gradient = input_matrix.mean(axis=0) return x_gradient class Flatten: def __init__(self): pass def __call__(self, x): return x.reshape(1, -1) class ReLU: def __init__(self): pass def __call__(self, x): self.x = x return np.maximum(0, x) def backward(self, input_matrix): return (self.x > 0) * input_matrix class Softmax(): def __init__(self): pass def __call__(self, z): return np.exp(z) / np.sum(np.exp(z), axis=1).reshape(-1, 1) class CrossEntropyLoss: def __init__(self): self.predicted = None self.true = None def __call__(self, logits, true): predicted = np.exp(logits) / np.sum(np.exp(logits), axis=1).reshape(-1, 1) # softmax self.predicted = np.array(predicted, copy=True) # сделаем копию входных матрицы для дальнейших вычислений self.true = np.array(true, copy=True) # сделаем копию входных матрицы для дальнейших вычислений number_of_classes = predicted.shape[1] # получим количество классов, в нашем случае 2 self.true = np.array(true, copy=True) # вычисляем значение лосс-функции прямо по формуле self.loss = -1 * np.sum(true * np.log(predicted + 1e-5), axis=1) return self def backward(self): loss = self.predicted - self.true # Итерируем по каждому слою в обратном порәдке, благодаря тому, что мы всё сохранили в Parameter.layers for index, layer in enumerate(Parameter.layers[::-1]): if type(layer).__name__ == 'Linear': changes_w = (layer.x.T @ loss) / loss.shape[0] # нормировка на loss.shape[0] нужна, так как величина изменений зависит от размера батча if layer.bias: changes_b = (np.sum(loss) / loss.shape[0]) else: changes_b = 0 layer.backward_list = [changes_w, changes_b] # Cчитаем градиент для следующих слоев loss = loss @ Parameter.calling[layer][0].T elif type(layer).__name__ == 'ReLU': loss = layer.backward(loss) class SGD: def __init__(self, model, learning_rate): self.model = model self.lr = learning_rate def step(self): for index, layer in enumerate(self.model._constructor_Parameter.layers[::-1]): if type(layer).__name__ == 'Linear': weight, bias = self.model._constructor_Parameter.calling[layer] weight_gradient, bias_gradient = layer.backward_list[0], layer.backward_list[1] self.model._constructor_Parameter.calling[layer] = [weight - lr * weight_gradient, bias - lr * bias_gradient] class SimpleNet(Module): def __init__(self): super().__init__() self.linear1 = Linear(input_channels=25, output_channels=10, bias=True) self.linear2 = Linear(input_channels=10, output_channels=2, bias=True) self.flatten = Flatten() self.relu = ReLU() self.softmax = Softmax() def forward(self, x): x_1 = self.flatten(x) x_2 = self.linear1(x_1) x_3 = self.relu(x_2) x_4 = self.linear2(x_3) return x_4 input_x = np.array([[ 0.99197708, -0.77980023, -0.8391331 , -0.41970686, 0.72636492], [ 0.85901409, -0.22374584, -1.95850625, -0.81685145, 0.96359871], [-0.42707937, -0.50053309, 0.34049477, 0.62106931, -0.76039365], [ 0.34206742, 2.15131285, 0.80851759, 0.28673013, 0.84706839], [-1.70231094, 0.36473216, 0.33631525, -0.92515589, -2.57602677]]) target_x = np.array([[1, 0]]) loss_fn = CrossEntropyLoss() model = SimpleNet() optim = SGD(model.parameters(), learning_rate = 0.01) for i in range(100): output = model(input_x) loss = loss_fn(output, target_x) loss.backward() lr = 0.01 optim.step() if (i % 20) == 0: print(loss.loss, i)
import numpy as np class Parameter: layers = [] calling = {} def __init__(self, info): self.layers.append(info[0]) self.calling[info[0]] = info[1:] class Module: def __init__(self): self.Parameter = Parameter def forward(self): pass def __call__(self, x): return self.forward(x) def parameters(self): return self class Linear(Module): def __init__(self, input_channels: int, output_channels: int, bias=True): self.input_channels = input_channels self.output_channels = output_channels self.bias = bias self.backward_list = [] if bias: self.Parameter([self, np.random.uniform(-0.5, 0.5, size=(input_channels, output_channels)), np.random.uniform(-0.5, 0.5, size=output_channels)]) else: self.Parameter([self, np.random.uniform(-0.5, 0.5, size=(input_channels, output_channels)), np.zeros(output_channels)]) def __call__(self, x): self.x = np.array(x, copy=True) result = x @ self.Parameter.calling[self][0] + self.Parameter.calling[self][1] return result def backward(self, input_matrix): x_gradient = input_matrix @ self.Parameter.calling[self][0].T self.backward_list = [input_matrix.T @ self.x, input_matrix.mean(axis=0)] return x_gradient class Flatten(Module): def __call__(self, x): return x.reshape(x.shape[0], -1) class ReLU(Module): def __call__(self, x): self.x = x return np.maximum(0, x) def backward(self, input_matrix): return (self.x > 0) * input_matrix class Softmax(Module): def __call__(self, z): exp_z = np.exp(z) return exp_z / exp_z.sum(axis=1, keepdims=True) class CrossEntropyLoss(Module): def __call__(self, logits, true): self.predicted = self.Softmax()(logits) self.true = np.array(true, copy=True) self.loss = -np.sum(true * np.log(self.predicted + 1e-5), axis=1) return self def backward(self): loss = self.predicted - self.true for layer in self.Parameter.layers[::-1]: if isinstance(layer, Linear): layer.backward_list[0] = (layer.x.T @ loss) / loss.shape[0] if layer.bias: layer.backward_list[1] = np.sum(loss, axis=0) / loss.shape[0] loss = loss @ layer.Parameter.calling[layer][0].T elif isinstance(layer, ReLU): loss = layer.backward(loss) class SGD: def __init__(self, model, learning_rate): self.model = model self.lr = learning_rate def step(self): for layer in self.model.Parameter.layers[::-1]: if isinstance(layer, Linear): weight, bias = layer.Parameter.calling[layer] weight_gradient, bias_gradient = layer.backward_list layer.Parameter.calling[layer] = [weight - self.lr * weight_gradient, bias - self.lr * bias_gradient] class SimpleNet(Module): def __init__(self): super().__init__() self.linear1 = Linear(input_channels=25, output_channels=10, bias=True) self.linear2 = Linear(input_channels=10, output_channels=2, bias=True) self.flatten = Flatten() self.relu = ReLU() self.softmax = Softmax() def forward(self, x): x_1 = self.flatten(x) x_2 = self.linear1(x_1) x_3 = self.relu(x_2) x_4 = self.linear2(x_3) return x_4 input_x = np.array([[ 0.99197708, -0.77980023, -0.8391331 , -0.41970686, 0.72636492], [ 0.85901409, -0.22374584, -1.95850625, -0.81685145, 0.96359871], [-0.42707937, -0.50053309, 0.34049477, 0.62106931, -0.76039365], [ 0.34206742, 2.15131285, 0.80851759, 0.28673013, 0.84706839], [-1.70231094, 0.36473216, 0.33631525, -0.92515589, -2.57602677]]) target_x = np.array([[1, 0]]) loss_fn = CrossEntropyLoss() model = SimpleNet() optim = SGD(model.parameters(), learning_rate = 0.01) for i in range(100): output = model(input_x) loss = loss_fn(output, target_x) loss.backward() lr = 0.01 optim.step() if (i % 20) == 0: print(loss.loss, i)
import psutil def check_python_program(program_name): for proc in psutil.process_iter(): try: if proc.name() == program_name: return True except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): pass return False if check_python_program("msedge.exe"): print("Программа запущена.") else: print("Программа не запущена.")
n_nd = 1 for i_l in range(n_lines): for i_n in range(n_nods[i_l]): G.add_node(1,label=str(n_nd)) n_nd += 1 G.add_edge(i_l,i_n) pos = {n_nd:(i_l,i_n) } nx.draw(G,with_label=True)
import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from tensorflow import keras from tensorflow.keras import layers # Создание искусственных данных data_size = 10000 np.random.seed(42) # Генерация данных data = { 'ti': np.random.randint(0, 24, data_size), # Вход 1 'te': np.random.uniform(-30, 50, data_size), # Вход 2 'en': np.random.randint(0, 41, data_size), # Вход 3 'tem': np.random.uniform(40, 45, data_size), # Вход 4 'energy_consumption': None # Выход } # Создание DataFrame df = pd.DataFrame(data) # Формирование зависимости: создание искусственных данных для потребления энергии # На основании простого линейного уравнения с добавлением некоторого шума df['energy_consumption'] = ( 50 + 20 * df['ti'] + 15 * df['en'] - 0.5 * df['te '] + 5 * (df['tem'] - 40) + np.random.normal(0, 10, data_size) # добавление случайного шума ) # Разделение данных на признаки и целевую переменную X1 = df[['ti', 'te', 'en', 'tem']] y = df['energy_consumption'] # Разделение данных на обучающую и тестовую выборки X_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2, random_state=42) # Масштабирование признаков scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) # Создание модели нейросети model = keras.Sequential([ layers.Dense(64, activation='relu', input_shape=(X_train_scaled.shape[1],)), layers.Dense(32, activation='relu'), layers.Dense(1) # Выходной слой, так как мы прогнозируем одно значение ]) # Компиляция модели model.compile(optimizer='adam', loss='mean_squared_error') # Обучение модели model.fit(X_train_scaled, y_train, epochs=100, batch_size=32, validation_split=0.2) # Оценка модели loss = model.evaluate(X_test_scaled, y_test) print(f"Тестовая потеря: {loss:.2f}") # Прогнозирование значений на тестовых данных y_pred = model.predict(X_test_scaled) # Вывод первых 5 прогнозируемых значений print("Первые 5 прогнозируемых значений:") print(y_pred[:5].flatten()) # Пример использования модели для прогноза h=[1, 2, 3, 4, 5, 6, 7, 8, 9,1] t=[10, 20, 30, 40, 30, 30, 20, 20, 20,10] e=[40, 40, 40, 40, 20, 20, 20, 20, 20,40] tg=[45, 45, 45, 45, 45, 45, 45, 45, 45,45] for i in range(10): sample_data = np.array([[h[i], t[i], e[i], tg[i]]]) sample_data_scaled = scaler.transform(sample_data) predicted_consumption = model.predict(sample_data_scaled) print(f"Прогноз =',{h[i]},'te=',{t[i]},' en=',{e[i]},'tem: {predicted_consumption[0][0]:.2f}")
import sqlite3 from aiogram import Bot, Dispatcher, types from aiogram.utils import executor from aiogram.dispatcher.filters.state import State, StatesGroup from aiogram.dispatcher import FSMContext from aiogram.contrib.fsm_storage.memory import MemoryStorage API_TOKEN = 'xxxxx' # Замените на свой токен бота bot = Bot(token=API_TOKEN) dp = Dispatcher(bot, storage=MemoryStorage()) # Создание базы данных (если еще не создана) db_name = 'mydatabase.db' conn = sqlite3.connect(db_name) cursor = conn.cursor() cursor.execute(''' CREATE TABLE IF NOT EXISTS data ( id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, description TEXT ) ''') conn.commit() conn.close() # Обработчик команды /start @dp.message_handler(commands=['start']) async def start_handler(message: types.Message): await message.answer("Привет! Я бот для поиска информации в базе данных.\n\n" "Введите поисковый запрос:") # Обработчик ввода поискового запроса @dp.message_handler() async def search_handler(message: types.Message): search_term = message.text results = search_data(db_name, search_term) if results: await message.answer("Результаты поиска:") for row in results: await message.answer(f"ID: {row[0]}, Название: {row[1]}, Описание: {row[2]}") else: await message.answer("Ничего не найдено.") # Функция поиска данных в базе данных def search_data(db_name, search_term): conn = sqlite3.connect(db_name) cursor = conn.cursor() cursor.execute("SELECT * FROM data WHERE name LIKE ? OR description LIKE ?", ('%' + search_term + '%', '%' + search_term + '%')) results = cursor.fetchall() conn.close() return results if __name__ == "__main__": executor.start_polling(dp, skip_updates=True)
from aiogram.utils import executor ImportError: cannot import name 'executor' from 'aiogram.utils'
from turtle import* from tkinter import* def com(event): event.keysym click =1 def turtlemouv(event): global click click+=1 t.config(text = click) w = Tk() w.geometry('200x130') w.bind("<Key>", turtlemouv) t=Label (w,text="0", font="Arial 35") t.pack() w.mainloop()