Skip to content

Commit 97fc44a

Browse files
committed
fix conflict
2 parents 0b00dad + 1f1cc22 commit 97fc44a

File tree

785 files changed

+48644
-9686
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

785 files changed

+48644
-9686
lines changed

CMakeLists.txt

+1-17
Original file line numberDiff line numberDiff line change
@@ -59,19 +59,14 @@ option(WITH_DISTRIBUTE "Compile with distributed support" OFF)
5959
option(WITH_BRPC_RDMA "Use brpc rdma as the rpc protocal" OFF)
6060
option(ON_INFER "Turn on inference optimization." OFF)
6161
################################ Internal Configurations #######################################
62-
option(WITH_ANAKIN "Compile with Anakin library" OFF)
6362
option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF)
6463
option(WITH_NGRAPH "Compile PaddlePaddle with nGraph support." OFF)
6564
option(WITH_PROFILER "Compile PaddlePaddle with GPU profiler and gperftools" OFF)
66-
option(WITH_JEMALLOC "Compile PaddlePaddle with jemalloc" OFF)
6765
option(WITH_COVERAGE "Compile PaddlePaddle with code coverage" OFF)
6866
option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF)
6967
option(WITH_PSLIB "Compile with pslib support" OFF)
7068
option(WITH_CONTRIB "Compile the third-party contributation" OFF)
7169
option(REPLACE_ENFORCE_GLOG "Replace PADDLE_ENFORCE with glog/CHECK for better debug." OFF)
72-
# TODO(Superjomn) Remove WITH_ANAKIN option if not needed latter.
73-
option(ANAKIN_BUILD_FAT_BIN "Build anakin cuda fat-bin lib for all device plantform, ignored when WITH_ANAKIN=OFF" OFF)
74-
option(ANAKIN_BUILD_CROSS_PLANTFORM "Build anakin lib for any nvidia device plantform. ignored when WITH_ANAKIN=OFF" ON)
7570
option(WITH_GRPC "Use grpc as the default rpc framework" ${WITH_DISTRIBUTE})
7671
option(WITH_INFERENCE_API_TEST "Test fluid inference C++ high-level api interface" OFF)
7772
option(WITH_HIGH_LEVEL_API_TEST "Test fluid python high-level api interface" OFF)
@@ -185,6 +180,7 @@ if(WITH_BRPC_RDMA)
185180
endif()
186181
endif()
187182

183+
include(anakin_subgraph)
188184

189185
include(external/threadpool)
190186
include(flags) # set paddle compile flags
@@ -207,24 +203,12 @@ if(WITH_DGC)
207203
add_definitions(-DPADDLE_WITH_DGC)
208204
endif()
209205

210-
if(WITH_MKL OR WITH_MKLML)
211-
include(external/anakin)
212-
elseif()
213-
set(WITH_ANAKIN OFF CACHE STRING "Anakin is used in MKL only now." FORCE)
214-
endif()
215-
216206
if (WITH_PROFILER)
217207
find_package(Gperftools REQUIRED)
218208
include_directories(${GPERFTOOLS_INCLUDE_DIR})
219209
add_definitions(-DWITH_GPERFTOOLS)
220210
endif()
221211

222-
if (WITH_JEMALLOC)
223-
find_package(JeMalloc REQUIRED)
224-
include_directories(${JEMALLOC_INCLUDE_DIR})
225-
add_definitions(-DPADDLE_WITH_JEMALLOC)
226-
endif()
227-
228212
include(generic) # simplify cmake module
229213
include(package) # set paddle packages
230214
include(ccache) # set ccache for compilation

Dockerfile

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
# A image for building paddle binaries
22
# Use cuda devel base image for both cpu and gpu environment
33
# When you modify it, please be aware of cudnn-runtime version
4-
# and libcudnn.so.x in paddle/scripts/docker/build.sh
54
FROM nvidia/cuda:8.0-cudnn7-devel-ubuntu16.04
65
MAINTAINER PaddlePaddle Authors <paddle-dev@baidu.com>
76

@@ -76,7 +75,7 @@ RUN curl -s -q https://glide.sh/get | sh
7675
# 2. Manually add ~IPluginFactory() in IPluginFactory class of NvInfer.h, otherwise, it couldn't work in paddle.
7776
# See https://github.com/PaddlePaddle/Paddle/issues/10129 for details.
7877

79-
RUN wget -q https://paddlepaddledeps.cdn.bcebos.com/TensorRT-4.0.1.6-ubuntu14.04.x86_64-gnu.cuda.8.0.cudnn7.0.tar.gz --no-check-certificate && \
78+
RUN wget -q https://paddlepaddledeps.bj.bcebos.com/TensorRT-4.0.1.6-ubuntu14.04.x86_64-gnu.cuda.8.0.cudnn7.0.tar.gz --no-check-certificate && \
8079
tar -zxf TensorRT-4.0.1.6-ubuntu14.04.x86_64-gnu.cuda.8.0.cudnn7.0.tar.gz -C /usr/local && \
8180
cp -rf /usr/local/TensorRT/include /usr && \
8281
cp -rf /usr/local/TensorRT/lib /usr

README.md

+9-6
Original file line numberDiff line numberDiff line change
@@ -18,20 +18,21 @@ learning to many products at Baidu.
1818
Our vision is to enable deep learning for everyone via PaddlePaddle.
1919
Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddle/releases) to track the latest feature of PaddlePaddle.
2020

21-
### Latest PaddlePaddle Release: [Fluid 1.4.1](https://github.com/PaddlePaddle/Paddle/tree/release/1.4)
21+
### Latest PaddlePaddle Release: [Fluid 1.5.0](https://github.com/PaddlePaddle/Paddle/tree/release/1.5)
2222
### Install Latest Stable Release:
2323
```
2424
# Linux CPU
2525
pip install paddlepaddle
2626
# Linux GPU cuda9cudnn7
2727
pip install paddlepaddle-gpu
28+
# Linux GPU cuda10cudnn7
29+
pip install paddlepaddle-gpu==1.5.0.post107
2830
# Linux GPU cuda8cudnn7
29-
pip install paddlepaddle-gpu==1.4.1.post87
30-
# Linux GPU cuda8cudnn5
31-
pip install paddlepaddle-gpu==1.4.1.post85
31+
pip install paddlepaddle-gpu==1.5.0.post87
3232
3333
# For installation on other platform, refer to http://paddlepaddle.org/
3434
```
35+
Now our developers could acquire Tesla V100 online computing resources for free. If you create a program by AI Studio, you would obtain 12 hours to train models online per day. If you could insist on that for five consecutive days, then you would own extra 48 hours. [Click here to start](http://ai.baidu.com/support/news?action=detail&id=981).
3536

3637
## Features
3738

@@ -98,9 +99,11 @@ We provide [English](http://www.paddlepaddle.org/documentation/docs/en/1.4/begin
9899

99100
We appreciate your contributions!
100101

101-
## Ask Questions
102+
## Communication
102103

103-
You are welcome to submit questions and bug reports as [Github Issues](https://github.com/PaddlePaddle/Paddle/issues).
104+
- [Github Issues](https://github.com/PaddlePaddle/Paddle/issues): bug reports, feature requests, install issues, usage issues, etc.
105+
- QQ discussion group: 432676488 (PaddlePaddle).
106+
- [Forums](http://ai.baidu.com/forum/topic/list/168?pageNo=1): discuss implementations, research, etc.
104107

105108
## Copyright and License
106109
PaddlePaddle is provided under the [Apache-2.0 license](LICENSE).

README_cn.md

+9-6
Original file line numberDiff line numberDiff line change
@@ -16,20 +16,21 @@ PaddlePaddle (PArallel Distributed Deep LEarning) 是一个简单易用、高效
1616

1717
跟进PaddlePaddle最新特性请参考我们的[版本说明](https://github.com/PaddlePaddle/Paddle/releases)
1818

19-
### PaddlePaddle最新版本: [Fluid 1.4.1](https://github.com/PaddlePaddle/Paddle/tree/release/1.4)
19+
### PaddlePaddle最新版本: [Fluid 1.5.0](https://github.com/PaddlePaddle/Paddle/tree/release/1.5)
2020
### 安装最新稳定版本:
2121
```
2222
# Linux CPU
2323
pip install paddlepaddle
2424
# Linux GPU cuda9cudnn7
2525
pip install paddlepaddle-gpu
26+
# Linux GPU cuda10cudnn7
27+
pip install paddlepaddle-gpu==1.5.0.post107
2628
# Linux GPU cuda8cudnn7
27-
pip install paddlepaddle-gpu==1.4.1.post87
28-
# Linux GPU cuda8cudnn5
29-
pip install paddlepaddle-gpu==1.4.1.post85
29+
pip install paddlepaddle-gpu==1.5.0.post87
3030
3131
# 其他平台上的安装指引请参考 http://paddlepaddle.org/
3232
```
33+
PaddlePaddle用户可领取**免费Tesla V100在线算力资源**,训练模型更高效。**每日登陆即送12小时****连续五天运行再加送48小时**[前往使用免费算力](https://ai.baidu.com/support/news?action=detail&id=981)
3334

3435
## 特性
3536

@@ -80,9 +81,11 @@ pip install paddlepaddle-gpu==1.4.1.post85
8081

8182
欢迎您的贡献!
8283

83-
## 答疑
84+
## 交流与反馈
8485

85-
欢迎您将问题和bug报告以[Github Issues](https://github.com/PaddlePaddle/Paddle/issues)的形式提交
86+
- 欢迎您通过[Github Issues](https://github.com/PaddlePaddle/Paddle/issues)来提交问题、报告与建议
87+
- QQ群: 432676488 (PaddlePaddle)
88+
- [论坛](http://ai.baidu.com/forum/topic/list/168): 欢迎大家在PaddlePaddle论坛分享在使用PaddlePaddle中遇到的问题和经验, 营造良好的论坛氛围
8689

8790
## 版权和许可证
8891
PaddlePaddle由[Apache-2.0 license](LICENSE)提供

benchmark/fluid/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ python -c 'from recordio_converter import *; prepare_mnist("data", 1)'
5959
## Run Distributed Benchmark on Kubernetes Cluster
6060

6161
You may need to build a Docker image before submitting a cluster job onto Kubernetes, or you will
62-
have to start all those processes mannually on each node, which is not recommended.
62+
have to start all those processes manually on each node, which is not recommended.
6363

6464
To build the Docker image, you need to choose a paddle "whl" package to run with, you may either
6565
download it from

cmake/FindJeMalloc.cmake

-28
This file was deleted.

cmake/configure.cmake

+6-13
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ endif(NOT WITH_PROFILER)
3030

3131
if(WITH_AVX AND AVX_FOUND)
3232
set(SIMD_FLAG ${AVX_FLAG})
33+
add_definitions(-DPADDLE_WITH_AVX)
3334
elseif(SSE3_FOUND)
3435
set(SIMD_FLAG ${SSE3_FLAG})
3536
endif()
@@ -98,23 +99,15 @@ if(WITH_GPU)
9899
endif()
99100
include_directories(${TENSORRT_INCLUDE_DIR})
100101
endif()
101-
if(WITH_ANAKIN)
102+
if(ANAKIN_FOUND)
102103
if(${CUDA_VERSION_MAJOR} VERSION_LESS 8)
103-
message(WARNING "Anakin needs CUDA >= 8.0 to compile. Force WITH_ANAKIN=OFF")
104-
set(WITH_ANAKIN OFF CACHE STRING "Anakin is valid only when CUDA >= 8.0." FORCE)
104+
message(WARNING "Anakin needs CUDA >= 8.0 to compile. Force ANAKIN_FOUND = OFF")
105+
set(ANAKIN_FOUND OFF CACHE STRING "Anakin is valid only when CUDA >= 8.0." FORCE)
105106
endif()
106107
if(${CUDNN_MAJOR_VERSION} VERSION_LESS 7)
107-
message(WARNING "Anakin needs CUDNN >= 7.0 to compile. Force WITH_ANAKIN=OFF")
108-
set(WITH_ANAKIN OFF CACHE STRING "Anakin is valid only when CUDNN >= 7.0." FORCE)
108+
message(WARNING "Anakin needs CUDNN >= 7.0 to compile. Force ANAKIN_FOUND = OFF")
109+
set(ANAKIN_FOUND OFF CACHE STRING "Anakin is valid only when CUDNN >= 7.0." FORCE)
109110
endif()
110-
add_definitions(-DWITH_ANAKIN)
111-
endif()
112-
if(WITH_ANAKIN)
113-
# NOTICE(minqiyang): the end slash is important because $CUDNN_INCLUDE_DIR
114-
# is a softlink to real cudnn.h directory
115-
set(ENV{CUDNN_INCLUDE_DIR} "${CUDNN_INCLUDE_DIR}/")
116-
get_filename_component(CUDNN_LIBRARY_DIR ${CUDNN_LIBRARY} DIRECTORY)
117-
set(ENV{CUDNN_LIBRARY} ${CUDNN_LIBRARY_DIR})
118111
endif()
119112
elseif(WITH_AMD_GPU)
120113
add_definitions(-DPADDLE_WITH_HIP)

cmake/cuda.cmake

+1-5
Original file line numberDiff line numberDiff line change
@@ -141,31 +141,27 @@ endfunction()
141141
message(STATUS "CUDA detected: " ${CUDA_VERSION})
142142
if (${CUDA_VERSION} LESS 7.0)
143143
set(paddle_known_gpu_archs ${paddle_known_gpu_archs})
144-
add_definitions("-DPADDLE_CUDA_BINVER=\"60\"")
145144
elseif (${CUDA_VERSION} LESS 8.0) # CUDA 7.x
146145
set(paddle_known_gpu_archs ${paddle_known_gpu_archs7})
147146
list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
148147
list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
149-
add_definitions("-DPADDLE_CUDA_BINVER=\"70\"")
150148
elseif (${CUDA_VERSION} LESS 9.0) # CUDA 8.x
151149
set(paddle_known_gpu_archs ${paddle_known_gpu_archs8})
152150
list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
153151
list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
154152
# CUDA 8 may complain that sm_20 is no longer supported. Suppress the
155153
# warning for now.
156154
list(APPEND CUDA_NVCC_FLAGS "-Wno-deprecated-gpu-targets")
157-
add_definitions("-DPADDLE_CUDA_BINVER=\"80\"")
158155
elseif (${CUDA_VERSION} LESS 10.0) # CUDA 9.x
159156
set(paddle_known_gpu_archs ${paddle_known_gpu_archs9})
160157
list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
161158
list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
162-
add_definitions("-DPADDLE_CUDA_BINVER=\"90\"")
163159
elseif (${CUDA_VERSION} LESS 11.0) # CUDA 10.x
164160
set(paddle_known_gpu_archs ${paddle_known_gpu_archs10})
165161
list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
166162
list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
167-
add_definitions("-DPADDLE_CUDA_BINVER=\"100\"")
168163
endif()
164+
add_definitions("-DPADDLE_CUDA_BINVER=\"${CUDA_VERSION_MAJOR}${CUDA_VERSION_MINOR}\"")
169165

170166
include_directories(${CUDA_INCLUDE_DIRS})
171167
if(NOT WITH_DSO)

cmake/cudnn.cmake

+1-1
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ if(CUDNN_FOUND)
9696
endif()
9797

9898
message(STATUS "Current cuDNN header is ${CUDNN_INCLUDE_DIR}/cudnn.h. "
99-
"Current cuDNN version is v${CUDNN_MAJOR_VERSION}. ")
99+
"Current cuDNN version is v${CUDNN_MAJOR_VERSION}.${CUDNN_MINOR_VERSION}. ")
100100

101101
endif()
102102
endif()

cmake/external/anakin.cmake

-76
This file was deleted.

cmake/external/dgc.cmake

-2
Original file line numberDiff line numberDiff line change
@@ -38,5 +38,3 @@ ADD_LIBRARY(dgc STATIC IMPORTED GLOBAL)
3838
SET_PROPERTY(TARGET dgc PROPERTY IMPORTED_LOCATION ${DGC_LIBRARIES})
3939
ADD_DEPENDENCIES(dgc extern_dgc)
4040

41-
LIST(APPEND external_project_dependencies dgc)
42-

cmake/external/eigen.cmake

+9-2
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,13 @@ if(NOT WITH_FAST_MATH)
1212
add_definitions(-DEIGEN_FAST_MATH=0)
1313
endif()
1414

15+
if(WIN32)
16+
set(EIGEN_GIT_REPOSITORY https://github.com/wopeizl/eigen-git-mirror)
17+
set(EIGEN_GIT_TAG support_cuda9_win)
18+
else()
19+
set(EIGEN_GIT_REPOSITORY https://github.com/eigenteam/eigen-git-mirror)
20+
set(EIGEN_GIT_TAG 917060c364181f33a735dc023818d5a54f60e54c)
21+
endif()
1522
if(WITH_AMD_GPU)
1623
ExternalProject_Add(
1724
extern_eigen3
@@ -29,10 +36,10 @@ else()
2936
ExternalProject_Add(
3037
extern_eigen3
3138
${EXTERNAL_PROJECT_LOG_ARGS}
32-
GIT_REPOSITORY "https://github.com/eigenteam/eigen-git-mirror"
39+
GIT_REPOSITORY "${EIGEN_GIT_REPOSITORY}"
3340
# eigen on cuda9.1 missing header of math_funtions.hpp
3441
# https://stackoverflow.com/questions/43113508/math-functions-hpp-not-found-when-using-cuda-with-eigen
35-
GIT_TAG 917060c364181f33a735dc023818d5a54f60e54c
42+
GIT_TAG ${EIGEN_GIT_TAG}
3643
PREFIX ${EIGEN_SOURCE_DIR}
3744
DOWNLOAD_NAME "eigen"
3845
UPDATE_COMMAND ""

cmake/external/gflags.cmake

+1-8
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ SET(GFLAGS_SOURCES_DIR ${THIRD_PARTY_PATH}/gflags)
1818
SET(GFLAGS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gflags)
1919
SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE)
2020
IF(WIN32)
21-
set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
21+
set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags_static.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
2222
ELSE(WIN32)
2323
set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
2424
ENDIF(WIN32)
@@ -50,13 +50,6 @@ ExternalProject_Add(
5050
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
5151
-DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
5252
)
53-
IF(WIN32)
54-
IF(NOT EXISTS "${GFLAGS_INSTALL_DIR}/lib/libgflags.lib")
55-
add_custom_command(TARGET extern_gflags POST_BUILD
56-
COMMAND cmake -E copy ${GFLAGS_INSTALL_DIR}/lib/gflags_static.lib ${GFLAGS_INSTALL_DIR}/lib/libgflags.lib
57-
)
58-
ENDIF()
59-
ENDIF(WIN32)
6053
ADD_LIBRARY(gflags STATIC IMPORTED GLOBAL)
6154
SET_PROPERTY(TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARIES})
6255
ADD_DEPENDENCIES(gflags extern_gflags)

0 commit comments

Comments
 (0)