上传备份

master
王兵 5 months ago
commit b2798b24cc

21
.gitignore vendored

@ -0,0 +1,21 @@
target/
pom.xml.tag
pom.xml.releaseBackup
pom.xml.versionsBackup
pom.xml.next
release.properties
/.idea
*.iml
/.settings
/bin
/gen
/build
/gradle
/classes
.classpath
.project
*.gradle
gradlew
local.properties
node_modules/
data/

@ -0,0 +1 @@
## 深度学习

@ -0,0 +1,91 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>xyz.wbsite</groupId>
<artifactId>starter-dl4j</artifactId>
<version>0.0.1-SNAPSHOT</version>
<packaging>jar</packaging>
<properties>
<java.version>17</java.version>
<!-- 需要jdk17以上 -->
<langchain4j.version>1.0.0-beta2</langchain4j.version>
</properties>
<repositories>
<!-- 将中央仓库地址指向阿里云聚合仓库,提高下载速度 -->
<repository>
<id>central</id>
<name>Central Repository</name>
<layout>default</layout>
<url>https://maven.aliyun.com/repository/public</url>
</repository>
</repositories>
<pluginRepositories>
<!-- 将插件的仓库指向阿里云聚合仓库解决低版本maven下载插件异常或提高下载速度 -->
<pluginRepository>
<id>central</id>
<name>Central Repository</name>
<url>https://maven.aliyun.com/repository/public</url>
<layout>default</layout>
</pluginRepository>
</pluginRepositories>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>17</source>
<target>17</target>
</configuration>
</plugin>
</plugins>
</build>
<dependencies>
<!-- 糊涂工具包含常用API避免重复造轮子 -->
<dependency>
<groupId>cn.hutool</groupId>
<artifactId>hutool-all</artifactId>
<version>5.8.24</version>
</dependency>
<!-- Deeplearning4J核心库包含Word2Vec实现 -->
<dependency>
<groupId>org.deeplearning4j</groupId>
<artifactId>deeplearning4j-core</artifactId>
<version>1.0.0-M2.1</version>
</dependency>
<!-- ND4J数值计算库Deeplearning4J依赖 -->
<dependency>
<groupId>org.nd4j</groupId>
<artifactId>nd4j-native</artifactId>
<version>1.0.0-M2.1</version>
</dependency>
<dependency>
<groupId>org.nd4j</groupId>
<artifactId>nd4j-native-platform</artifactId>
<version>1.0.0-beta7</version>
</dependency>
<dependency>
<groupId>org.deeplearning4j</groupId>
<artifactId>arbiter-ui</artifactId>
<version>1.0.0-beta7</version>
</dependency>
<!-- 日志框架 -->
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.2.11</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.32</version>
</dependency>
</dependencies>
</project>

@ -0,0 +1,15 @@
package xyz.wbsite.ai;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
public class Dl4j_Example {
public static void main(String[] args) {
// https://blog.csdn.net/m290345792/article/details/147009026
// 测试
INDArray matrix = Nd4j.create(new float[]{1,2,3,4}, new int[]{2,2});
System.out.println("矩阵求和: " + matrix.sum());
}
}

@ -0,0 +1,158 @@
package xyz.wbsite.ai;
import org.apache.commons.io.FilenameUtils;
import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator;
import org.deeplearning4j.nn.conf.MultiLayerConfiguration;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.inputs.InputType;
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer;
import org.deeplearning4j.nn.conf.layers.DenseLayer;
import org.deeplearning4j.nn.conf.layers.OutputLayer;
import org.deeplearning4j.nn.conf.layers.PoolingType;
import org.deeplearning4j.nn.conf.layers.SubsamplingLayer;
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork;
import org.deeplearning4j.nn.weights.WeightInit;
import org.deeplearning4j.optimize.api.InvocationType;
import org.deeplearning4j.optimize.listeners.EvaluativeListener;
import org.deeplearning4j.optimize.listeners.ScoreIterationListener;
import org.nd4j.linalg.activations.Activation;
import org.nd4j.linalg.dataset.api.iterator.DataSetIterator;
import org.nd4j.linalg.learning.config.Adam;
import org.nd4j.linalg.lossfunctions.LossFunctions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
/**
*
* <p>
* <p>
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
* <p>
* SPDX-License-Identifier: Apache-2.0
* <p>
* *
* Apache2.0
* https://www.apache.org/licenses/LICENSE-2.0.
*
*
* <p>
*
*
*
*
*
* <p>
* SPDXApache-2.0
******************************************************************************/
public class Dl4j_LeNetMNIST {
private static final Logger log = LoggerFactory.getLogger(Dl4j_LeNetMNIST.class);
public static void main(String[] args) throws Exception {
int nChannels = 1; // Number of input channels 输入通道数量
int outputNum = 10; // The number of possible outcomes 可能结果的数量
int batchSize = 64; // Test batch size 试验批量
int nEpochs = 1; // Number of training epochs 训练周期数
int seed = 123; //
/*
* Create an iterator using the batch size for one iteration
* 使
*/
log.info("Load data....");
DataSetIterator mnistTrain = new MnistDataSetIterator(batchSize, true, 12345);
DataSetIterator mnistTest = new MnistDataSetIterator(batchSize, false, 12345);
/*
* Construct the neural network
*
*/
log.info("Build model....");
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.l2(0.0005)
.weightInit(WeightInit.XAVIER)
.updater(new Adam(1e-3))
.list()
.layer(new ConvolutionLayer.Builder(5, 5)
//nIn and nOut specify depth. nIn here is the nChannels and nOut is the number of filters to be applied
//nIn和nOut指定深度。n这里是nChannelnOut是要应用的过滤器数量
.nIn(nChannels)
.stride(1, 1)
.nOut(20)
.activation(Activation.IDENTITY)
.build())
.layer(new SubsamplingLayer.Builder(PoolingType.MAX)
.kernelSize(2, 2)
.stride(2, 2)
.build())
.layer(new ConvolutionLayer.Builder(5, 5)
//Note that nIn need not be specified in later layers
//请注意不需要在后面的层中指定nIn
.stride(1, 1)
.nOut(50)
.activation(Activation.IDENTITY)
.build())
.layer(new SubsamplingLayer.Builder(PoolingType.MAX)
.kernelSize(2, 2)
.stride(2, 2)
.build())
.layer(new DenseLayer.Builder().activation(Activation.RELU)
.nOut(500).build())
.layer(new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.nOut(outputNum)
.activation(Activation.SOFTMAX)
.build())
.setInputType(InputType.convolutionalFlat(28, 28, 1)) //See note below
.build();
/*
* Regarding the .setInputType(InputType.convolutionalFlat(28,28,1)) line: This does a few things.
* (a) It adds preprocessors, which handle things like the transition between the convolutional/subsampling layers
* and the dense layer
* (b) Does some additional configuration validation
* (c) Where necessary, sets the nIn (number of input neurons, or input depth in the case of CNNs) values for each
* layer based on the size of the previous layer (but it won't override values manually set by the user)
* InputTypes can be used with other layer types too (RNNs, MLPs etc) not just CNNs.
* For normal images (when using ImageRecordReader) use InputType.convolutional(height,width,depth).
* MNIST record reader is a special case, that outputs 28x28 pixel grayscale (nChannels=1) images, in a "flattened"
* row vector format (i.e., 1x784 vectors), hence the "convolutionalFlat" input type used here.
*
* .setInputTypeInputType.convolutionalFlat28,28,1
* a /
*
* b
* c nInCNN
*
* InputTypesRNNMLP使CNN
* 使ImageRecordReader使InputType.convolutional
* MNIST28x28nCannels=1
* 1x784使
*/
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
log.info("Train model...");
model.setListeners(new ScoreIterationListener(10), new EvaluativeListener(mnistTest, 1, InvocationType.EPOCH_END)); //Print score every 10 iterations and evaluate on test set every epoch
model.fit(mnistTrain, nEpochs);
String path = FilenameUtils.concat(System.getProperty("java.io.tmpdir"), "lenetmnist.zip");
log.info("Saving model to tmp folder: " + path);
model.save(new File(path), true);
log.info("****************Example finished********************");
}
}
Loading…
Cancel
Save

Powered by TurnKey Linux.