Форум сайта python.su
class NewSqlModel(QSqlQueryModel): def __init__(self, parent=None): super(NewSqlModel, self).__init__() def flags(self, index): """Предопределим флаги для дальнейшего их редактирования в модели""" f1 = QSqlQueryModel.flags(self, index) if index.column() == 0: f1 |= Qt.ItemIsEditable return f1 def setData(self, index, value, role=Qt.EditRole): """Метод для обновления данных""" self.data = QSqlQueryModel.data(self, index, role) if index.isValid(): self.data(index.row(), value) self.dataChanged.emit(index, index, role) return True return False # Функция с ошибкой def removeRows(self, position, index, parent=QModelIndex()): """Производим удаление выбранной строки""" self.beginRemoveRows(parent, position, position + index - 1) self.query = QSqlQuery('DELETE FROM calculation WHERE id = %d' % position) self.endRemoveRows() return True
def connect_column_calc(self): """Подключаем модель к БД и производим запрос к ней""" self.calculation_model = NewSqlModel() self.calculation_model.setQuery('SELECT id, number_invoice, inv_date, company_name, ' '(all_sum / 100.0) AS all_sum, signatory FROM calculation') def del_pushbutton(self): """Функция удаляет выделенную строку из БД. Запускается через кнопку""" if self.inv_tabView.selectionModel().hasSelection(): for index in self.inv_tabView.selectedIndexes() or []: print('Удаляется строка %d...' % index.row()) self.start_menu.model().removeRows(index.row(), index.row()) else: QMessageBox.warning(None, 'Ошибка удаления', 'Строки для удаления отсутствуют! \n' 'Выделите строку и повторите удаление.')
lst =[['n1', 1, 2, 3], ['n2', 11, 22, 33], ['n1', 1,222, 333], ['n2', 11, 333, 5]] res = {} for line in lst: name = ':'.join(map(str, line[0:2])) res.setdefault(name, 0) res[name] += line[2] if line[3] < 10 else 0 print(res)
df = pd.DataFrame([['n1', 1, 2, 3], ['n2', 11, 22, 33], ['n1', 1,222, 333], ['n2', 11, 333, 5]], columns=['nm1', 'nm2', 'v1', 'v2']) df['name'] = df.iloc[:, :2].apply(lambda r: ':'.join(r.values.astype(str)), axis=1)
df_filtered = df.loc[:, df['v2'] < 10] # IndexingError: Unalignable boolean Series provided as indexer (index of the boolean Series and of the indexed object do not match). df_filtered = df[df['v2'] < 10] # работает без ошибки
names = df_filtered.groupby('name')['v1'].sum()
df['v11'] = df['v1'] df['v11'][df['v2'] > 10] = 0 names = df.groupby('name')['v11'].sum()
Exception in thread “main” java.lang.NoClassDefFoundError: org/deeplearning4j/nn/weights/IWeightInit
at org.deeplearning4j.nn.modelimport.keras.layers.core.KerasDense.<init>(KerasDense.java:96)
at org.deeplearning4j.nn.modelimport.keras.utils.KerasLayerUtils.getKerasLayerFromConfig(KerasLayerUtils.java:220)
at org.deeplearning4j.nn.modelimport.keras.KerasModel.prepareLayers(KerasModel.java:218)
at org.deeplearning4j.nn.modelimport.keras.KerasSequentialModel.<init>(KerasSequentialModel.java:110)
at org.deeplearning4j.nn.modelimport.keras.KerasSequentialModel.<init>(KerasSequentialModel.java:57)
at org.deeplearning4j.nn.modelimport.keras.utils.KerasModelBuilder.buildSequential(KerasModelBuilder.java:322)
at org.deeplearning4j.nn.modelimport.keras.KerasModelImport.importKerasSequentialModelAndWeights(KerasModelImport.java:223)
at NeuralNetwork.main(NeuralNetwork.java:21)
Caused by: java.lang.ClassNotFoundException: org.deeplearning4j.nn.weights.IWeightInit
at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:349)
at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
… 8 more
model_fully_connected = Sequential() model_fully_connected.add(keras.layers.Dense(17, activation='tanh', input_shape=(x_train.shape[1],), W_regularizer=l2(l2_lambda))) model_fully_connected.add(keras.layers.Dense(17, activation='tanh', W_regularizer=l2(l2_lambda))) model_fully_connected.add(keras.layers.LeakyReLU (alpha=0.1)) model_fully_connected.add(keras.layers.Dense(17, activation='tanh', W_regularizer=l2(l2_lambda))) model_fully_connected.add(keras.layers.LeakyReLU (alpha=0.1)) model_fully_connected.add(keras.layers.Dense(17, activation='tanh', W_regularizer=l2(l2_lambda))) model_fully_connected.add(keras.layers.Dense(1)) model_fully_connected.compile(optimizer='adam', loss='mse', metrics=["mae", "mse"]) history=model_fully_connected.fit(x_train, y_train, epochs=10, batch_size=1, verbose=2, validation_data=(x_test, y_test)) # #Сохранение обученной нейронной сети model_fully_connected.save("trained _neural_network.H5",True,True)
MultiLayerNetwork modelMultiLayer=null; KerasModelImport kerasModelImport=new KerasModelImport(); try { modelMultiLayer=kerasModelImport.importKerasSequentialModelAndWeights("E:\\Java\\neuralwork\\trained _neural_network.H5"); } catch (IOException e) { e.printStackTrace(); } catch (InvalidKerasConfigurationException e) { e.printStackTrace(); } catch (UnsupportedKerasConfigurationException e) { e.printStackTrace(); } System.out.println(modelMultiLayer.conf());
<dependency> <groupId>org.deeplearning4j</groupId> <artifactId>deeplearning4j-core</artifactId> <version>1.0.0-beta2</version> </dependency> <dependency> <groupId>org.nd4j</groupId> <artifactId>nd4j-native-platform</artifactId> <version>1.0.0-beta2</version> </dependency> <dependency> <groupId>com.google.cloud.dataflow</groupId> <artifactId>google-cloud-dataflow-java-sdk-all</artifactId> <version>2.2.0</version> </dependency> <dependency> <groupId>org.deeplearning4j</groupId> <artifactId>deeplearning4j-modelimport</artifactId> <version>1.0.0-beta7</version> </dependency>
import numpy import pandas as pd import math from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from sklearn.metrics import accuracy_score # convert an array of values into a dataset matrix def create_dataset(dataset, look_back=0): dataX, dataY = [], [] for i in range(len(dataset)-look_back): a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return numpy.array(dataX), numpy.array(dataY) # fix random seed for reproducibility numpy.random.seed(7) # load the dataset file='test1.xlsx' xl=pd.ExcelFile(file) dataframe = xl.parse('Sheet1') dataset = dataframe.values dataset = dataset.astype('float32') # normalize the dataset scaler = MinMaxScaler(feature_range=(1, 3)) dataset = scaler.fit_transform(dataset) # split into train and test sets train_size = int(len(dataset) * 0.80) test_size = len(dataset) - train_size train, test = dataset[0:train_size,:],dataset[train_size:len(dataset),:] # reshape into X=t and Y=t+1 look_back = 1 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) # reshape input to be [samples, time steps, features] trainX = numpy.reshape(trainX,(trainX.shape[0],1,trainX.shape[1])) testX = numpy.reshape(testX,(testX.shape[0],1,testX.shape[1])) # create and fit the LSTM network model = Sequential() model.add(LSTM(4, input_shape=(1, look_back))) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam',metrics=['accuracy']) model.fit(trainX, trainY, epochs=2, batch_size=1, verbose=2) # make predictions trainPredict = model.predict(trainX) testPredict = model.predict(testX) # invert predictions trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) # calculate root mean squared error trainScore = math.sqrt(mean_squared_error(trainY[0],trainPredict[:,0])) print('Test Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0],testPredict[:,0])) print('Test Score: %.2f RMSE' % (testScore)) # print(len(dataset)) print(len(testX)+(len(trainX))) print(len(testY.T)+(len(trainY.T)))
Epoch 1/2 - 57s - loss: 1.1863 - accuracy: 1.0000 Epoch 2/2 - 57s - loss: 0.7782 - accuracy: 1.0000 Test Score: 0.87 RMSE Test Score: 0.84 RMSE 1503 1501 1501
import numpy import pandas as pd import math from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from sklearn.metrics import accuracy_score # convert an array of values into a dataset matrix def create_dataset(dataset, look_back=0): dataX, dataY = [], [] for i in range(len(dataset)-look_back): a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return numpy.array(dataX), numpy.array(dataY) # fix random seed for reproducibility numpy.random.seed(7) # load the dataset file='test1.xlsx' xl=pd.ExcelFile(file) dataframe = xl.parse('Sheet1') dataset = dataframe.values dataset = dataset.astype('float32') # normalize the dataset scaler = MinMaxScaler(feature_range=(1, 3)) dataset = scaler.fit_transform(dataset) # split into train and test sets train_size = int(len(dataset) * 0.80) test_size = len(dataset) - train_size train, test = dataset[0:train_size,:],dataset[train_size:len(dataset),:] # reshape into X=t and Y=t+1 look_back = 1 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) # reshape input to be [samples, time steps, features] trainX = numpy.reshape(trainX,(trainX.shape[0],1,trainX.shape[1])) testX = numpy.reshape(testX,(testX.shape[0],1,testX.shape[1])) # create and fit the LSTM network model = Sequential() model.add(LSTM(4, input_shape=(1, look_back))) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam',metrics=['accuracy']) model.fit(trainX, trainY, epochs=2, batch_size=1, verbose=2) # make predictions trainPredict = model.predict(trainX) testPredict = model.predict(testX) # invert predictions trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) # calculate root mean squared error trainScore = math.sqrt(mean_squared_error(trainY[0],trainPredict[:,0])) print('Test Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0],testPredict[:,0])) print('Test Score: %.2f RMSE' % (testScore)) # print(len(dataset)) print(len(testX)+(len(trainX))) print(len(testY.T)+(len(trainY.T)))
Epoch 1/2 - 57s - loss: 1.1863 - accuracy: 1.0000 Epoch 2/2 - 57s - loss: 0.7782 - accuracy: 1.0000 Test Score: 0.87 RMSE Test Score: 0.84 RMSE 1503 1501 1501
import pyttsx3 import speech_recognition as sr import pyaudio r = sr.Recognizer() with sr.Microphone(device_index=5) as source: print("скажите что нибудь") audio = r.listen(source) quere = r.recognize_google(audio, language="ru-RU") Exception has occurred: UnknownValueError File "C:\Users\1\xd\Untitled-1.py", line 8, in <module> quere = r.recognize_google(audio, language="ru-RU") print( quere.lower())
# -*- coding: utf-8 -*- import pafy url = "https://www.youtube.com/watch?v=eACohWVwTOc" video = pafy.new(url) streams = video.streams for i in streams: print(i) # получить лучшее разрешение независимо от формата best = video.getbest() print(best.resolution, best.extension) # Скачать видео best.download()