java用limit循环读取mysql直到全部读完,并能显示读了多少条

数据库数据很多,每次读5000条左右,用分页的方式循环读完,下面是测试代码,现在需要分页的代码,可以写在我发的测试代码上面,尽量详细,谢谢。
package webtest;
import java.sql.*;
import java.io.BufferedWriter;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.File;

public class test5{

static final String JDBC_DRIVER = "com.mysql.jdbc.Driver";

static final String DB_URL = "jdbc:mysql://localhost:3306/gumysql";

static final String USER = "root";
static final String PASS = "123456";
public static final String FILE_NAME = "again1.txt";//要创建的文件名
public static final String fn = "F:/NEWtest/";//文件指定存放的路径
public static void creatFile(String fn, String fileName) {
File folder = new File(fn);
//文件夹路径不存在
if (!folder.exists() && !folder.isDirectory()) {
System.out.println("文件夹路径不存在,创建路径:" + fn);
folder.mkdirs();
} else {
System.out.println("文件夹路径存在:" + fn);
}

// 如果文件不存在就创建
File file = new File(fn + fileName);
if (!file.exists()) {
    System.out.println("文件不存在,创建文件:" + fn+ fileName);
    try {
        file.createNewFile();
    } catch (IOException e) {
        e.printStackTrace();
    }
} else {
    System.out.println("文件已存在,文件为:" + fn+ fileName);
}

}

public static void wf(String file, String conent) {
BufferedWriter out = null;
try {
out = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(file, true)));
out.write(conent+"\r\n");
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}

public static void main(String[] args) {
Connection conn = null;
Statement stmt = null;
try{
creatFile(fn, FILE_NAME);
Class.forName("com.mysql.jdbc.Driver");

System.out.println("连接数据库...");
conn = DriverManager.getConnection(DB_URL,USER,PASS);


System.out.println(" 实例化Statement对象...");
stmt = conn.createStatement();
String sql;
sql = "SELECT id, name, url ,country FROM websites ";
ResultSet rs = stmt.executeQuery(sql);

String fn = "F://NEWtest//again1.txt";
wf(fn,"ID ,站点名称, 站点,country.");
while(rs.next()){

    int id  = rs.getInt("id");            
    String name = rs.getString("name");
    String url = rs.getString("url");
    String country = rs.getString("country");



    System.out.print("ID: " + id);

    System.out.print(", 站点名称: " + name);

    System.out.print(", 站点 URL: " + url);
    System.out.print(", country: " + country);

    System.out.print("\n");
    wf(fn,id+"," + name+ "," + url+ "," + country);
}

rs.close();
stmt.close();
conn.close();

}catch(SQLException se){

se.printStackTrace();

}catch(Exception e){

e.printStackTrace();

}finally{

try{
    if(stmt!=null) stmt.close();
}catch(SQLException se2){
}
try{
    if(conn!=null) conn.close();
}catch(SQLException se){
    se.printStackTrace();
}

}
System.out.println("Goodbye!");
}

}

4个回答

可以先查询表里所有数据的总数 sumNum;
SELECT count(*) FROM websites;
System.out.print("共获取到"+sumNum+"条数据");
再根据得到的总数值去除以5000,再用总数求余,这样获取循环的次数pageSize
int pageSize = 0;
pageSize = sumNum / 5000;
int tmpPage = 0;
tmpPage = sumNum % 5000 == 0 ? 0 : 1;
pageSize = pageSize + tmpPage;
然后循环
for(int i=0;i<=pageSize;i++){
// 处理逻辑查询
SELECT id, name, url ,country FROM websites limit i*5000,5000;
// 遍历ResultSet 得到数据去处理
}
既然分页查询那就要建立很多连接,注意关闭

qq_36291682
grace.liming 他这里的意思是动态拼接参数 你可以用SELECT id, name, url ,country FROM websites +拼接的内容 不过最好用占位符 用PreparedStatement SELECT id, name, url ,country FROM websites limit ?*5000,5000; 然后每次用setString(1,i)
接近 2 年之前 回复
ggx1abc
gu123xin for循环里面,没有办法对sql语句进行赋值吧?
接近 2 年之前 回复
package test;

import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Types;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;

public class DBUtil {

    public static void closeResult(ResultSet rs) {
        try {
            if (rs != null) {
                rs.close();
            }
        } catch (Exception e) {
        }
    }

    public static void closeStmt(PreparedStatement stmt) {
        try {
            if (stmt != null) {
                stmt.close();
            }
        } catch (Exception e) {
        }
    }

    public static void closeConn(Connection con) {
        try {
            if (con != null) {
                con.close();
            }
        } catch (Exception e) {
        }
    }

    public static Connection openConn() {//创建连接,自己写
        return null;
    }

    public static int getTotalCount(Connection con, String sql, Object... params) {
        PreparedStatement stmt = null;
        ResultSet rs = null;
        try {
             stmt = con.prepareStatement(sql);
             int idx = 1;
             if (params != null) {
                 for (Object param : params) {
                     if (param == null) {
                         stmt.setObject(idx++, param, Types.VARCHAR);
                     } else {
                         stmt.setObject(idx++, param);
                     }
                 }
             }
            rs = stmt.executeQuery();
            if (rs.next()) {
                return rs.getInt(1);
            }
        } catch (SQLException e) {
            e.printStackTrace();
        } finally {
            closeResult(rs);
            closeStmt(stmt);
        }
        return 0;
    }

    public static <T> List<T> queryList(Connection con, String sql, ResultMapping<T> mapper, Object... params) {
        PreparedStatement stmt = null;
        ResultSet rs = null;
        List<T> list = new ArrayList<T>();
        try {
             stmt = con.prepareStatement(sql);
             int idx = 1;
             if (params != null) {
                 for (Object param : params) {
                     if (param == null) {
                         stmt.setObject(idx++, param, Types.VARCHAR);
                     } else {
                         stmt.setObject(idx++, param);
                     }
                 }
             }
            rs = stmt.executeQuery();
            while (rs.next()) {
                list.add(mapper.convert(rs));
            }
        } catch (SQLException e) {
            e.printStackTrace();
        } finally {
            closeResult(rs);
            closeStmt(stmt);
        }
        return list;
    }

    public interface ResultMapping<T> {
        public T convert(ResultSet rs);
    }

    public static void main(String[] args) {
        String sql = "select * from t";
        String totlaSql = "select count(1) from (" + sql + ") as tmp";
        String pagesql = "select * from (" + sql + ") as tmp limit ?, ?";
        System.out.println(sql);System.out.println(totlaSql);System.out.println(pagesql);
        int pageSize = 100;
        Connection con = openConn();
        int total = getTotalCount(con, totlaSql);
        int fetchCount = (int) Math.floor(total * 1.0/pageSize);
        final AtomicInteger readcount = new AtomicInteger();
        ResultMapping mapper = new ResultMapping<Map>() {//假设查询的是map类型

            @Override
            public Map convert(ResultSet rs) {
                //这里解析取rs,取出来想怎么弄怎么弄,如果关系返回值就返回
                readcount.incrementAndGet();
                return null;
            }
        };
        for (int i = 0; i < fetchCount; i++) {
            queryList(con, pagesql,mapper , i * pageSize, pageSize, i * pageSize, pageSize);
        }

        closeConn(con);
    }
}


ResultSet rs 这种结果集不支持滚动的读去功能,所以,如果获得这样一个结果集,只能使用它里面的next()方法,逐个的读去数据;
建议你自己new 一个新的List 在你使用next()方法的时候将获取到的逐个添加进List; 之后使用list.subList(fromIndex , toIndex)方法实现分页;
参数代表下标区间,左闭右开.

Csdn user default icon
上传中...
上传图片
插入图片
抄袭、复制答案,以达到刷声望分或其他目的的行为,在CSDN问答是严格禁止的,一经发现立刻封号。是时候展现真正的技术了!
其他相关推荐
java循环读取mysql并存入java集合里

如果数据库数据过大,用java读取数据库,分页存入集合,每分页一次集合就被调用一次,这个java实现的方式是怎么样的?直接 直接在sql语句上面加个for循环吗?for(){ String sql = "SELECT ID,name, url From websites limit " + (i * 2) + ",2;";......}

mysql的limit分页查询可以在前面加条件吗?

mysql的limit来让一个模糊查询只在前端页面上分页中显示出3列,sql语句怎么写?

MYSQL limit 排序问题。

![图片说明](https://img-ask.csdn.net/upload/201506/26/1435286682_841130.png) **没添加limit 显示前十条是一个样, 加了limit 查询出来的结果就变了,** ![图片说明](https://img-ask.csdn.net/upload/201506/26/1435286756_232160.png) **这是为什么!求大神拯救!**

mysql 索引 limit优化

这是创建的索引: ``` mysql> show index from salaries; +----------+------------+----------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+ | Table | Non_unique | Key_name | Seq_in_index | Column_name | Collation | Cardinality | Sub_part | Packed | Null | Index_type | Comment | Index_comment | +----------+------------+----------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+ | salaries | 0 | PRIMARY | 1 | emp_no | A | 295694 | NULL | NULL | | BTREE | | | | salaries | 0 | PRIMARY | 2 | from_date | A | 2838426 | NULL | NULL | | BTREE | | | | salaries | 1 | salary | 1 | salary | A | 73927 | NULL | NULL | | BTREE | | | +----------+------------+----------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+ 3 rows in set (0.00 sec) ``` 这是按照网上的sql优化和没有优化的结果,为何优化的效率比没有优化还要低? ``` mysql> select * from salaries a join (select emp_no,from_date from salaries order by from_date limit 2000000,20) b on a.emp_no=b.emp_no and a.from_date=b.from_date; +--------+--------+------------+------------+--------+------------+ | emp_no | salary | from_date | to_date | emp_no | from_date | +--------+--------+------------+------------+--------+------------+ | 49095 | 52441 | 1999-03-29 | 2000-03-28 | 49095 | 1999-03-29 | | 49534 | 106829 | 1999-03-29 | 2000-03-28 | 49534 | 1999-03-29 | | 49661 | 88589 | 1999-03-29 | 2000-03-28 | 49661 | 1999-03-29 | | 47516 | 89305 | 1999-03-29 | 2000-03-28 | 47516 | 1999-03-29 | | 47520 | 48996 | 1999-03-29 | 2000-03-28 | 47520 | 1999-03-29 | | 48129 | 65100 | 1999-03-29 | 2000-03-28 | 48129 | 1999-03-29 | | 42510 | 43998 | 1999-03-29 | 2000-03-28 | 42510 | 1999-03-29 | | 43186 | 52203 | 1999-03-29 | 2000-03-28 | 43186 | 1999-03-29 | | 51434 | 58226 | 1999-03-29 | 2000-03-28 | 51434 | 1999-03-29 | | 45635 | 53164 | 1999-03-29 | 2000-03-28 | 45635 | 1999-03-29 | | 45823 | 65169 | 1999-03-29 | 2000-03-28 | 45823 | 1999-03-29 | | 45866 | 43137 | 1999-03-29 | 1999-05-16 | 45866 | 1999-03-29 | | 46780 | 55729 | 1999-03-29 | 2000-03-28 | 46780 | 1999-03-29 | | 68262 | 57090 | 1999-03-29 | 2000-03-28 | 68262 | 1999-03-29 | | 69335 | 77710 | 1999-03-29 | 2000-03-28 | 69335 | 1999-03-29 | | 69573 | 52088 | 1999-03-29 | 2000-03-28 | 69573 | 1999-03-29 | | 67465 | 81145 | 1999-03-29 | 2000-03-28 | 67465 | 1999-03-29 | | 67472 | 48556 | 1999-03-29 | 2000-03-28 | 67472 | 1999-03-29 | | 67475 | 79215 | 1999-03-29 | 2000-03-28 | 67475 | 1999-03-29 | | 71081 | 60817 | 1999-03-29 | 2000-03-28 | 71081 | 1999-03-29 | +--------+--------+------------+------------+--------+------------+ 20 rows in set (1.39 sec) mysql> select * from salaries limit 2000000,20; +--------+--------+------------+------------+ | emp_no | salary | from_date | to_date | +--------+--------+------------+------------+ | 410897 | 50288 | 1992-09-06 | 1993-09-06 | | 410897 | 51656 | 1993-09-06 | 1994-09-06 | | 410897 | 53408 | 1994-09-06 | 1995-09-06 | | 410897 | 55183 | 1995-09-06 | 1996-09-05 | | 410897 | 55166 | 1996-09-05 | 1997-09-05 | | 410897 | 59264 | 1997-09-05 | 1998-09-05 | | 410897 | 62534 | 1998-09-05 | 1999-09-05 | | 410897 | 62825 | 1999-09-05 | 2000-09-04 | | 410897 | 66931 | 2000-09-04 | 2001-09-04 | | 410897 | 68054 | 2001-09-04 | 9999-01-01 | | 410898 | 66156 | 1990-10-25 | 1991-10-25 | | 410898 | 69219 | 1991-10-25 | 1992-10-24 | | 410898 | 70371 | 1992-10-24 | 1993-10-24 | | 410898 | 70666 | 1993-10-24 | 1994-10-24 | | 410898 | 73620 | 1994-10-24 | 1995-10-24 | | 410898 | 75197 | 1995-10-24 | 1996-10-23 | | 410898 | 78083 | 1996-10-23 | 1997-10-23 | | 410898 | 81043 | 1997-10-23 | 1998-10-23 | | 410898 | 81128 | 1998-10-23 | 1999-10-23 | | 410898 | 84051 | 1999-10-23 | 2000-10-22 | +--------+--------+------------+------------+ 20 rows in set (0.42 sec) ```

mysql查询数据使用 limit 999999和 未使用limit 结果不同 limit的使用方法?

需求: 查询用户房间type为2的房间战绩排行榜 根据 主条件killNum做降序, 次条件scoreNum做降序,一个用户只能出现一次取其最高的数据 在同样环境,数据相同的情况下,查询出来的数据不同 纠结了许久 答案数据返回正确 但是我心里摸不着底SQL感觉还需要优化 也喜欢CSDN的大佬们有时间能看一看 有limit 正常: ![图片说明](https://img-ask.csdn.net/upload/201811/27/1543319092_471669.png) 无limit 不正常: ![图片说明](https://img-ask.csdn.net/upload/201811/27/1543319161_377684.png) 表结构如下: 用户表: CREATE TABLE `ss_account` ( `id` int(11) NOT NULL AUTO_INCREMENT, `name` varchar(10) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '', PRIMARY KEY (`id`) USING BTREE ) ENGINE = InnoDB AUTO_INCREMENT = 125 CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Dynamic; 房间id表: CREATE TABLE `ss_room_inc_id` ( `id` int(11) NOT NULL AUTO_INCREMENT , `type` tinyint(1) NOT NULL , `create_time` int(10) NOT NULL DEFAULT 0 , PRIMARY KEY (`id`) USING BTREE ) ENGINE = InnoDB AUTO_INCREMENT = 2065 CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Dynamic; 房间战绩表: CREATE TABLE `ss_game_record` ( `id` int(11) NOT NULL AUTO_INCREMENT , `account_id` int(11) NOT NULL, `room_id` int(11) NOT NULL , `score_num` int(11) NOT NULL, `kill_num` int(11) NOT NULL, PRIMARY KEY (`id`) USING BTREE, ) ENGINE = InnoDB AUTO_INCREMENT = 156 CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Dynamic; SET FOREIGN_KEY_CHECKS = 1; 表数据: INSERT INTO `ss_account` VALUES (120, 'Dean'); INSERT INTO `ss_account` VALUES (121, ''); INSERT INTO `ss_account` VALUES (122, '丑娃儿'); INSERT INTO `ss_account` VALUES (123, '一万年'); INSERT INTO `ss_account` VALUES (124, '单打独斗'); INSERT INTO `ss_game_record` VALUES (2, 120, 1905, 120, 2); INSERT INTO `ss_game_record` VALUES (3, 121, 1906, 80, 1); INSERT INTO `ss_game_record` VALUES (4, 122, 1907, 70, 5); INSERT INTO `ss_game_record` VALUES (5, 122, 1908, 80, 5); INSERT INTO `ss_game_record` VALUES (6, 120, 1909, 100, 0); INSERT INTO `ss_game_record` VALUES (7, 120, 1910, 70, 0); INSERT INTO `ss_game_record` VALUES (8, 120, 1911, 45, 1); INSERT INTO `ss_game_record` VALUES (9, 120, 1912, 195, 1); INSERT INTO `ss_game_record` VALUES (10, 122, 1913, 110, 4); INSERT INTO `ss_game_record` VALUES (11, 120, 1914, 75, 1); INSERT INTO `ss_game_record` VALUES (12, 120, 1915, 105, 0); INSERT INTO `ss_game_record` VALUES (13, 120, 1916, 140, 1); INSERT INTO `ss_game_record` VALUES (14, 120, 1917, 180, 1); INSERT INTO `ss_game_record` VALUES (15, 120, 1918, 495, 0); INSERT INTO `ss_game_record` VALUES (16, 120, 1919, 170, 1); INSERT INTO `ss_game_record` VALUES (17, 120, 1920, 205, 0); INSERT INTO `ss_game_record` VALUES (18, 120, 1921, 435, 0); INSERT INTO `ss_game_record` VALUES (19, 120, 1922, 95, 1); INSERT INTO `ss_game_record` VALUES (20, 120, 1923, 105, 1); INSERT INTO `ss_game_record` VALUES (21, 122, 1924, 230, 0); INSERT INTO `ss_game_record` VALUES (22, 122, 1925, 145, 0); INSERT INTO `ss_game_record` VALUES (23, 122, 1926, 55, 0); INSERT INTO `ss_game_record` VALUES (24, 122, 1927, 325, 0); INSERT INTO `ss_game_record` VALUES (25, 122, 1928, 235, 0); INSERT INTO `ss_game_record` VALUES (26, 122, 1930, 45, 0); INSERT INTO `ss_game_record` VALUES (27, 123, 1929, 70, 0); INSERT INTO `ss_game_record` VALUES (28, 122, 1931, 25, 0); INSERT INTO `ss_game_record` VALUES (29, 122, 1932, 25, 0); INSERT INTO `ss_game_record` VALUES (30, 122, 1933, 35, 0); INSERT INTO `ss_game_record` VALUES (31, 122, 1934, 25, 0); INSERT INTO `ss_game_record` VALUES (32, 122, 1935, 25, 0); INSERT INTO `ss_game_record` VALUES (33, 122, 1936, 55, 0); INSERT INTO `ss_game_record` VALUES (34, 122, 1937, 25, 0); INSERT INTO `ss_game_record` VALUES (35, 122, 1938, 115, 0); INSERT INTO `ss_game_record` VALUES (36, 122, 1939, 135, 0); INSERT INTO `ss_game_record` VALUES (37, 122, 1940, 105, 0); INSERT INTO `ss_game_record` VALUES (38, 122, 1941, 95, 0); INSERT INTO `ss_game_record` VALUES (39, 120, 1942, 380, 0); INSERT INTO `ss_game_record` VALUES (40, 122, 1943, 45, 0); INSERT INTO `ss_game_record` VALUES (41, 120, 1945, 2640, 3); INSERT INTO `ss_game_record` VALUES (42, 122, 1944, 35, 0); INSERT INTO `ss_game_record` VALUES (43, 122, 1946, 25, 0); INSERT INTO `ss_game_record` VALUES (44, 122, 1947, 35, 0); INSERT INTO `ss_game_record` VALUES (45, 122, 1948, 30, 0); INSERT INTO `ss_game_record` VALUES (46, 122, 1950, 45, 0); INSERT INTO `ss_game_record` VALUES (47, 122, 1951, 45, 0); INSERT INTO `ss_game_record` VALUES (48, 122, 1952, 35, 0); INSERT INTO `ss_game_record` VALUES (49, 122, 1954, 35, 0); INSERT INTO `ss_game_record` VALUES (50, 122, 1953, 100, 0); INSERT INTO `ss_game_record` VALUES (51, 122, 1956, 100, 0); INSERT INTO `ss_game_record` VALUES (52, 122, 1957, 25, 0); INSERT INTO `ss_game_record` VALUES (53, 122, 1958, 80, 0); INSERT INTO `ss_game_record` VALUES (54, 122, 1955, 30, 0); INSERT INTO `ss_game_record` VALUES (55, 122, 1959, 85, 0); INSERT INTO `ss_game_record` VALUES (56, 122, 1960, 45, 0); INSERT INTO `ss_game_record` VALUES (57, 122, 1961, 45, 0); INSERT INTO `ss_game_record` VALUES (58, 122, 1962, 30, 0); INSERT INTO `ss_game_record` VALUES (59, 122, 1963, 65, 0); INSERT INTO `ss_game_record` VALUES (60, 122, 1964, 75, 0); INSERT INTO `ss_game_record` VALUES (61, 122, 1967, 25, 0); INSERT INTO `ss_game_record` VALUES (62, 122, 1966, 110, 0); INSERT INTO `ss_game_record` VALUES (63, 122, 1968, 25, 0); INSERT INTO `ss_game_record` VALUES (64, 122, 1969, 55, 0); INSERT INTO `ss_game_record` VALUES (65, 122, 1970, 25, 0); INSERT INTO `ss_game_record` VALUES (66, 122, 1974, 45, 0); INSERT INTO `ss_game_record` VALUES (67, 122, 1973, 80, 0); INSERT INTO `ss_game_record` VALUES (68, 122, 1972, 90, 0); INSERT INTO `ss_game_record` VALUES (69, 122, 1971, 110, 0); INSERT INTO `ss_game_record` VALUES (70, 122, 1975, 25, 0); INSERT INTO `ss_game_record` VALUES (71, 122, 1977, 25, 0); INSERT INTO `ss_game_record` VALUES (72, 122, 1976, 100, 0); INSERT INTO `ss_game_record` VALUES (73, 122, 1978, 35, 0); INSERT INTO `ss_game_record` VALUES (74, 122, 1979, 45, 0); INSERT INTO `ss_game_record` VALUES (75, 122, 1980, 40, 0); INSERT INTO `ss_game_record` VALUES (76, 122, 1983, 25, 0); INSERT INTO `ss_game_record` VALUES (77, 122, 1984, 25, 0); INSERT INTO `ss_game_record` VALUES (78, 122, 1981, 180, 0); INSERT INTO `ss_game_record` VALUES (79, 122, 1985, 25, 0); INSERT INTO `ss_game_record` VALUES (80, 122, 1982, 50, 0); INSERT INTO `ss_game_record` VALUES (81, 122, 1989, 25, 0); INSERT INTO `ss_game_record` VALUES (82, 122, 1986, 85, 0); INSERT INTO `ss_game_record` VALUES (83, 122, 1990, 80, 0); INSERT INTO `ss_game_record` VALUES (84, 122, 1987, 165, 0); INSERT INTO `ss_game_record` VALUES (85, 122, 1988, 45, 0); INSERT INTO `ss_game_record` VALUES (86, 122, 1991, 65, 0); INSERT INTO `ss_game_record` VALUES (87, 122, 1992, 40, 0); INSERT INTO `ss_game_record` VALUES (88, 122, 1993, 80, 0); INSERT INTO `ss_game_record` VALUES (89, 122, 1994, 650, 0); INSERT INTO `ss_game_record` VALUES (90, 122, 1995, 140, 0); INSERT INTO `ss_game_record` VALUES (91, 122, 1997, 190, 0); INSERT INTO `ss_game_record` VALUES (92, 120, 2001, 0, 0); INSERT INTO `ss_game_record` VALUES (93, 122, 1999, 75, 0); INSERT INTO `ss_game_record` VALUES (94, 120, 2002, 585, 0); INSERT INTO `ss_game_record` VALUES (95, 120, 2005, 115, 0); INSERT INTO `ss_game_record` VALUES (96, 122, 2003, 125, 0); INSERT INTO `ss_game_record` VALUES (97, 122, 2004, 55, 0); INSERT INTO `ss_game_record` VALUES (98, 122, 2000, 35, 0); INSERT INTO `ss_game_record` VALUES (99, 122, 2006, 90, 0); INSERT INTO `ss_game_record` VALUES (100, 120, 2008, 880, 2); INSERT INTO `ss_game_record` VALUES (101, 120, 2011, 380, 0); INSERT INTO `ss_game_record` VALUES (102, 122, 2009, 180, 0); INSERT INTO `ss_game_record` VALUES (103, 120, 2012, 730, 1); INSERT INTO `ss_game_record` VALUES (104, 120, 2013, 0, 0); INSERT INTO `ss_game_record` VALUES (105, 122, 2007, 310, 0); INSERT INTO `ss_game_record` VALUES (106, 120, 2014, 635, 0); INSERT INTO `ss_game_record` VALUES (107, 122, 2015, 55, 0); INSERT INTO `ss_game_record` VALUES (108, 122, 2016, 75, 0); INSERT INTO `ss_game_record` VALUES (109, 122, 2017, 240, 0); INSERT INTO `ss_game_record` VALUES (110, 122, 2018, 25, 0); INSERT INTO `ss_game_record` VALUES (111, 122, 2019, 140, 0); INSERT INTO `ss_game_record` VALUES (112, 122, 2020, 35, 0); INSERT INTO `ss_game_record` VALUES (113, 122, 2021, 25, 0); INSERT INTO `ss_game_record` VALUES (114, 122, 2022, 40, 0); INSERT INTO `ss_game_record` VALUES (115, 122, 2023, 25, 0); INSERT INTO `ss_game_record` VALUES (116, 122, 2024, 35, 0); INSERT INTO `ss_game_record` VALUES (117, 122, 2025, 25, 0); INSERT INTO `ss_game_record` VALUES (118, 122, 2026, 60, 0); INSERT INTO `ss_game_record` VALUES (119, 122, 2027, 55, 0); INSERT INTO `ss_game_record` VALUES (120, 122, 2028, 25, 0); INSERT INTO `ss_game_record` VALUES (121, 122, 2029, 25, 0); INSERT INTO `ss_game_record` VALUES (122, 122, 2030, 25, 0); INSERT INTO `ss_game_record` VALUES (123, 122, 2031, 105, 0); INSERT INTO `ss_game_record` VALUES (124, 122, 2032, 50, 0); INSERT INTO `ss_game_record` VALUES (125, 122, 2033, 25, 0); INSERT INTO `ss_game_record` VALUES (126, 122, 2034, 25, 0); INSERT INTO `ss_game_record` VALUES (127, 122, 2035, 25, 0); INSERT INTO `ss_game_record` VALUES (128, 122, 2036, 45, 0); INSERT INTO `ss_game_record` VALUES (129, 122, 2037, 25, 0); INSERT INTO `ss_game_record` VALUES (130, 122, 2038, 425, 0); INSERT INTO `ss_game_record` VALUES (131, 122, 2039, 50, 0); INSERT INTO `ss_game_record` VALUES (132, 122, 2040, 65, 0); INSERT INTO `ss_game_record` VALUES (133, 122, 2041, 195, 0); INSERT INTO `ss_game_record` VALUES (134, 122, 2042, 250, 0); INSERT INTO `ss_game_record` VALUES (135, 122, 2043, 25, 0); INSERT INTO `ss_game_record` VALUES (136, 122, 2044, 100, 0); INSERT INTO `ss_game_record` VALUES (137, 122, 2045, 25, 0); INSERT INTO `ss_game_record` VALUES (138, 122, 2046, 70, 0); INSERT INTO `ss_game_record` VALUES (139, 122, 2047, 30, 0); INSERT INTO `ss_game_record` VALUES (140, 122, 2048, 110, 0); INSERT INTO `ss_game_record` VALUES (141, 122, 2050, 25, 0); INSERT INTO `ss_game_record` VALUES (142, 122, 2049, 330, 0); INSERT INTO `ss_game_record` VALUES (143, 122, 2051, 45, 0); INSERT INTO `ss_game_record` VALUES (144, 122, 2052, 35, 0); INSERT INTO `ss_game_record` VALUES (145, 122, 2053, 25, 0); INSERT INTO `ss_game_record` VALUES (146, 122, 2054, 110, 0); INSERT INTO `ss_game_record` VALUES (147, 122, 2055, 230, 0); INSERT INTO `ss_game_record` VALUES (148, 122, 2056, 105, 0); INSERT INTO `ss_game_record` VALUES (149, 122, 2057, 175, 0); INSERT INTO `ss_game_record` VALUES (150, 122, 2058, 585, 0); INSERT INTO `ss_game_record` VALUES (151, 122, 2059, 60, 0); INSERT INTO `ss_game_record` VALUES (152, 122, 2060, 160, 0); INSERT INTO `ss_game_record` VALUES (153, 122, 2061, 170, 0); INSERT INTO `ss_game_record` VALUES (154, 122, 2062, 255, 0); INSERT INTO `ss_game_record` VALUES (155, 122, 2063, 250, 0); INSERT INTO `ss_room_inc_id` VALUES (1905, 2, 1543304140); INSERT INTO `ss_room_inc_id` VALUES (1906, 2, 1543304154); INSERT INTO `ss_room_inc_id` VALUES (1907, 2, 1543304140); INSERT INTO `ss_room_inc_id` VALUES (1908, 2, 1143304770); INSERT INTO `ss_room_inc_id` VALUES (1909, 1, 1543304770); INSERT INTO `ss_room_inc_id` VALUES (1910, 1, 1543304770); INSERT INTO `ss_room_inc_id` VALUES (1911, 1, 1543304862); INSERT INTO `ss_room_inc_id` VALUES (1912, 2, 1543304862); INSERT INTO `ss_room_inc_id` VALUES (1913, 2, 1543304862); INSERT INTO `ss_room_inc_id` VALUES (1914, 1, 1543305379); INSERT INTO `ss_room_inc_id` VALUES (1915, 1, 1543305379); INSERT INTO `ss_room_inc_id` VALUES (1916, 1, 1543305491); INSERT INTO `ss_room_inc_id` VALUES (1917, 1, 1543305491); INSERT INTO `ss_room_inc_id` VALUES (1918, 1, 1543306739); INSERT INTO `ss_room_inc_id` VALUES (1919, 1, 1543306740); INSERT INTO `ss_room_inc_id` VALUES (1920, 2, 1543306872); INSERT INTO `ss_room_inc_id` VALUES (1921, 2, 1543306930); INSERT INTO `ss_room_inc_id` VALUES (1922, 1, 1543306934); INSERT INTO `ss_room_inc_id` VALUES (1923, 1, 1543306934); INSERT INTO `ss_room_inc_id` VALUES (1924, 1, 1543308579); INSERT INTO `ss_room_inc_id` VALUES (1925, 1, 1543308579); INSERT INTO `ss_room_inc_id` VALUES (1926, 1, 1543309072); INSERT INTO `ss_room_inc_id` VALUES (1927, 1, 1543309072); INSERT INTO `ss_room_inc_id` VALUES (1928, 1, 1543309199); INSERT INTO `ss_room_inc_id` VALUES (1929, 1, 1543309199); INSERT INTO `ss_room_inc_id` VALUES (1930, 1, 1543309277); INSERT INTO `ss_room_inc_id` VALUES (1931, 1, 1543309277); INSERT INTO `ss_room_inc_id` VALUES (1932, 1, 1543309626); INSERT INTO `ss_room_inc_id` VALUES (1933, 1, 1543309626); INSERT INTO `ss_room_inc_id` VALUES (1934, 1, 1543309635); INSERT INTO `ss_room_inc_id` VALUES (1935, 1, 1543309636); INSERT INTO `ss_room_inc_id` VALUES (1936, 1, 1543309685); INSERT INTO `ss_room_inc_id` VALUES (1937, 1, 1543309685); INSERT INTO `ss_room_inc_id` VALUES (1938, 1, 1543309722); INSERT INTO `ss_room_inc_id` VALUES (1939, 1, 1543309722); INSERT INTO `ss_room_inc_id` VALUES (1940, 1, 1543309770); INSERT INTO `ss_room_inc_id` VALUES (1941, 1, 1543309770); INSERT INTO `ss_room_inc_id` VALUES (1942, 2, 1543309797); INSERT INTO `ss_room_inc_id` VALUES (1943, 1, 1543309889); INSERT INTO `ss_room_inc_id` VALUES (1944, 1, 1243309889); INSERT INTO `ss_room_inc_id` VALUES (1945, 2, 1243310030); INSERT INTO `ss_room_inc_id` VALUES (1946, 1, 1543310151); INSERT INTO `ss_room_inc_id` VALUES (1947, 1, 1543310151); INSERT INTO `ss_room_inc_id` VALUES (1948, 1, 1543310160); INSERT INTO `ss_room_inc_id` VALUES (1949, 1, 1543310160); INSERT INTO `ss_room_inc_id` VALUES (1950, 1, 1543310233); INSERT INTO `ss_room_inc_id` VALUES (1951, 1, 1543310233); INSERT INTO `ss_room_inc_id` VALUES (1952, 1, 1543310244); INSERT INTO `ss_room_inc_id` VALUES (1953, 1, 1543310244); INSERT INTO `ss_room_inc_id` VALUES (1954, 1, 1543310255); INSERT INTO `ss_room_inc_id` VALUES (1955, 1, 1543310255); INSERT INTO `ss_room_inc_id` VALUES (1956, 1, 1543310300); INSERT INTO `ss_room_inc_id` VALUES (1957, 1, 1543310300); INSERT INTO `ss_room_inc_id` VALUES (1958, 1, 1543310462); INSERT INTO `ss_room_inc_id` VALUES (1959, 1, 1543310462); INSERT INTO `ss_room_inc_id` VALUES (1960, 1, 1543310487); INSERT INTO `ss_room_inc_id` VALUES (1961, 1, 1543310487); INSERT INTO `ss_room_inc_id` VALUES (1962, 1, 1543310496); INSERT INTO `ss_room_inc_id` VALUES (1963, 1, 1543310496); INSERT INTO `ss_room_inc_id` VALUES (1964, 1, 1543310561); INSERT INTO `ss_room_inc_id` VALUES (1965, 1, 1543310561); INSERT INTO `ss_room_inc_id` VALUES (1966, 1, 1543310902); INSERT INTO `ss_room_inc_id` VALUES (1967, 1, 1543310902); INSERT INTO `ss_room_inc_id` VALUES (1968, 1, 1543310946); INSERT INTO `ss_room_inc_id` VALUES (1969, 1, 1543310946); INSERT INTO `ss_room_inc_id` VALUES (1970, 1, 1543310953); INSERT INTO `ss_room_inc_id` VALUES (1971, 1, 1543310953); INSERT INTO `ss_room_inc_id` VALUES (1972, 1, 1543310961); INSERT INTO `ss_room_inc_id` VALUES (1973, 1, 1543310961); INSERT INTO `ss_room_inc_id` VALUES (1974, 1, 1543310967); INSERT INTO `ss_room_inc_id` VALUES (1975, 1, 1543310991); INSERT INTO `ss_room_inc_id` VALUES (1976, 1, 1543310991); INSERT INTO `ss_room_inc_id` VALUES (1977, 1, 1543311000); INSERT INTO `ss_room_inc_id` VALUES (1978, 1, 1543311000); INSERT INTO `ss_room_inc_id` VALUES (1979, 1, 1543311011); INSERT INTO `ss_room_inc_id` VALUES (1980, 1, 1543311011); INSERT INTO `ss_room_inc_id` VALUES (1981, 1, 1543311012); INSERT INTO `ss_room_inc_id` VALUES (1982, 1, 1543311012); INSERT INTO `ss_room_inc_id` VALUES (1983, 1, 1543311020); INSERT INTO `ss_room_inc_id` VALUES (1984, 1, 1543311020); INSERT INTO `ss_room_inc_id` VALUES (1985, 1, 1543311031); INSERT INTO `ss_room_inc_id` VALUES (1986, 1, 1543311031); INSERT INTO `ss_room_inc_id` VALUES (1987, 1, 1543311034); INSERT INTO `ss_room_inc_id` VALUES (1988, 1, 1543311034); INSERT INTO `ss_room_inc_id` VALUES (1989, 1, 1543311038); INSERT INTO `ss_room_inc_id` VALUES (1990, 1, 1543311038); INSERT INTO `ss_room_inc_id` VALUES (1991, 1, 1543311067); INSERT INTO `ss_room_inc_id` VALUES (1992, 1, 1543311067); INSERT INTO `ss_room_inc_id` VALUES (1993, 1, 1543311091); INSERT INTO `ss_room_inc_id` VALUES (1994, 1, 1543311091); INSERT INTO `ss_room_inc_id` VALUES (1995, 1, 1543311227); INSERT INTO `ss_room_inc_id` VALUES (1996, 1, 1543311227); INSERT INTO `ss_room_inc_id` VALUES (1997, 1, 1543311283); INSERT INTO `ss_room_inc_id` VALUES (1998, 1, 1543311283); INSERT INTO `ss_room_inc_id` VALUES (1999, 1, 1543311339); INSERT INTO `ss_room_inc_id` VALUES (2000, 1, 1543311339); INSERT INTO `ss_room_inc_id` VALUES (2001, 2, 1543311342); INSERT INTO `ss_room_inc_id` VALUES (2002, 2, 1543311400); INSERT INTO `ss_room_inc_id` VALUES (2003, 1, 1543311400); INSERT INTO `ss_room_inc_id` VALUES (2004, 1, 1543311400); INSERT INTO `ss_room_inc_id` VALUES (2005, 2, 1543311416); INSERT INTO `ss_room_inc_id` VALUES (2006, 1, 1543311514); INSERT INTO `ss_room_inc_id` VALUES (2007, 1, 1543311514); INSERT INTO `ss_room_inc_id` VALUES (2008, 2, 1543311526); INSERT INTO `ss_room_inc_id` VALUES (2009, 1, 1543311651); INSERT INTO `ss_room_inc_id` VALUES (2010, 1, 1543311651); INSERT INTO `ss_room_inc_id` VALUES (2011, 2, 1543311661); INSERT INTO `ss_room_inc_id` VALUES (2012, 2, 1543311716); INSERT INTO `ss_room_inc_id` VALUES (2013, 2, 1543311718); INSERT INTO `ss_room_inc_id` VALUES (2014, 2, 1543311787); INSERT INTO `ss_room_inc_id` VALUES (2015, 1, 1543312046); INSERT INTO `ss_room_inc_id` VALUES (2016, 1, 1543312046); INSERT INTO `ss_room_inc_id` VALUES (2017, 1, 1543312119); INSERT INTO `ss_room_inc_id` VALUES (2018, 1, 1543312119); INSERT INTO `ss_room_inc_id` VALUES (2019, 1, 1543312257); INSERT INTO `ss_room_inc_id` VALUES (2020, 1, 1543312257); INSERT INTO `ss_room_inc_id` VALUES (2021, 1, 1543312319); INSERT INTO `ss_room_inc_id` VALUES (2022, 1, 1543312319); INSERT INTO `ss_room_inc_id` VALUES (2023, 1, 1543312498); INSERT INTO `ss_room_inc_id` VALUES (2024, 1, 1543312498); INSERT INTO `ss_room_inc_id` VALUES (2025, 1, 1543312561); INSERT INTO `ss_room_inc_id` VALUES (2026, 1, 1543312561); INSERT INTO `ss_room_inc_id` VALUES (2027, 1, 1543312622); INSERT INTO `ss_room_inc_id` VALUES (2028, 1, 1543312622); INSERT INTO `ss_room_inc_id` VALUES (2029, 1, 1543312761); INSERT INTO `ss_room_inc_id` VALUES (2030, 1, 1543312761); INSERT INTO `ss_room_inc_id` VALUES (2031, 1, 1543312920); INSERT INTO `ss_room_inc_id` VALUES (2032, 1, 1543312920); INSERT INTO `ss_room_inc_id` VALUES (2033, 1, 1543313082); INSERT INTO `ss_room_inc_id` VALUES (2034, 1, 1543313082); INSERT INTO `ss_room_inc_id` VALUES (2035, 1, 1543313189); INSERT INTO `ss_room_inc_id` VALUES (2036, 1, 1543313189); INSERT INTO `ss_room_inc_id` VALUES (2037, 1, 1543313720); INSERT INTO `ss_room_inc_id` VALUES (2038, 1, 1543313720); INSERT INTO `ss_room_inc_id` VALUES (2039, 1, 1543313906); INSERT INTO `ss_room_inc_id` VALUES (2040, 1, 1543313906); INSERT INTO `ss_room_inc_id` VALUES (2041, 1, 1543314357); INSERT INTO `ss_room_inc_id` VALUES (2042, 1, 1543314357); INSERT INTO `ss_room_inc_id` VALUES (2043, 1, 1543314600); INSERT INTO `ss_room_inc_id` VALUES (2044, 1, 1543314600); INSERT INTO `ss_room_inc_id` VALUES (2045, 1, 1543314977); INSERT INTO `ss_room_inc_id` VALUES (2046, 1, 1543314977); INSERT INTO `ss_room_inc_id` VALUES (2047, 1, 1543315498); INSERT INTO `ss_room_inc_id` VALUES (2048, 1, 1543315498); INSERT INTO `ss_room_inc_id` VALUES (2049, 1, 1543315563); INSERT INTO `ss_room_inc_id` VALUES (2050, 1, 1543315563); INSERT INTO `ss_room_inc_id` VALUES (2051, 1, 1543315604); INSERT INTO `ss_room_inc_id` VALUES (2052, 1, 1543315604); INSERT INTO `ss_room_inc_id` VALUES (2053, 1, 1543315678); INSERT INTO `ss_room_inc_id` VALUES (2054, 1, 1543315678); INSERT INTO `ss_room_inc_id` VALUES (2055, 1, 1543316230); INSERT INTO `ss_room_inc_id` VALUES (2056, 1, 1543316230); INSERT INTO `ss_room_inc_id` VALUES (2057, 1, 1543316396); INSERT INTO `ss_room_inc_id` VALUES (2058, 1, 1543316396); INSERT INTO `ss_room_inc_id` VALUES (2059, 1, 1543318165); INSERT INTO `ss_room_inc_id` VALUES (2060, 1, 1543318165); INSERT INTO `ss_room_inc_id` VALUES (2061, 1, 1543318189); INSERT INTO `ss_room_inc_id` VALUES (2062, 1, 1543318189); INSERT INTO `ss_room_inc_id` VALUES (2063, 1, 1543318237); INSERT INTO `ss_room_inc_id` VALUES (2064, 1, 1543318237);

limit 后面如何用java变量赋值

int pagesize =0; int sumnum=30;pagesize =sumnum/2; // int tmpPage = 0; // tmpPage = sumnum % 2 == 0 ? 0 : 1; // pagesize = pagesize + tmpPage; for(int i=1;i<=pagesize;i++){ String sql = "SELECT ID,name, url From websites limit i*2,2;"; ResultSet rs = stmt.executeQuery(sql); limit后面需要将java i的变量赋值,麻烦再讲清楚些,怎么做,在我后面写上些代码刚刚接触编程呀,网上去找,都不对,大佬,拜托了。

求助mysql查询条件中有日期limit问题

<p>当mysql查询条件中有日期比较后,再limit限制后,返回结果有问题</p> <p>如</p> <p> </p> <pre name="code" class="java"> select createTime,a,b from Table where createTime &lt; date(DATE_FORMAT('2012-11-04 10:31:20.78','%Y-%m-%d %k.%i.%s') ) order by createTime desc limit 2, 2 </pre> <p> </p> <p> 这个sql返回一条记录</p> <p>去掉limit</p> <p> </p> <div class="quote_div">select createTime,a,b from Table where createTime &lt; date(DATE_FORMAT('2012-11-04 10:31:20.78','%Y-%m-%d %k.%i.%s') ) order by createTime desc</div> <p> </p> <p> 返回两条记录。</p> <p>再上limit,去掉where条件中是日期比较</p> <p> </p> <pre name="code" class="java">select createTime,a,b from Table order by createTime desc limit 2, 2 </pre> <p> 这个结果返回两条记录。</p> <p> </p> <p> </p> <p>有没有谁也遇到 过这种问题,在线急等。</p> <p> </p>

大数据量Mysql查询后经过循环使用python分片

1 问题描述: (1)使用mysql查询基础数据,这里只有三四个基础的查询条件,联了一个表,同时有limit分页了; (2)之后经过一系列逻辑处理,在这些处理中又包含了很多sql查询,而且是在第(1)条查询出来的结果基础上查询,以前是先分页的,第(1)一次只查询了十条,第二步最多循环十次,但是现在的新逻辑是,查询出来后,经过(2)的处理,不满足筛选条件的数据remove掉,然后再返回最后剩下的数据 (3)由于每一页都会remove()掉部分数据,我曾经尝试定义全局变量,记录删除数据,但是只能得到我当前查询这一页删除了多少,无法获取总共删除了多少,而且每一页的数量都不一定是10条,一般来说是10条以下(因为会删除部分不符合筛选条件的数据),但是要求是要获取满足筛选条件的总数据量,而且需要正常分页 (4)于是我不用limit分页,直接取全量数据,然后再记录删除的数据量,使用切片手动分页,就能获取总数据了,每页也都是10条,但是循环次数过多,数据量稍微大一点儿,就需要49秒左右 2 部分相关代码: (1)基础查询: ``` SELECT op.order_id, opc.order_code, op.created_at AS create_time, opc.departure_date, opc.end_date, opc.company, opc.channel_id, opc.retail, opc.final_cost, opc.has_pay, opc.commission_price, opc.commission_type, opc.commission_value \ FROM order_product_customize AS opc \ LEFT JOIN order_product AS op ON opc.order_product_id = op.order_product_id \ WHERE { 0 } ORDER BY opc.created_at DESC { 1 } ``` (2) 手动分页: ``` nextPage = limit_start+page_size result['data_list'] = result['data_list'][limit_start:nextPage] result['total_num'] = result['total_num'] - self.delNum ``` 3 报错信息: 没有报错,只是执行时间极其长 一台比较好的机器,执行时间为27.72秒,本地执行时间接近40秒,无法上传图片 4 已经尝试过的办法 (1)记录删除次数再减去(因为每次都只查一页,只能获取当前页删除的条数) (2)取符合筛选条件的全量数据(数据量太大,又有循环,导致速度极其慢) (3)每次查20条左右数据,然后获取没删除的前10条,记录最后一条的id(动态分页,无法获取每一页第一条数据,无法保证20条经过筛选后还能剩下10条)

从php使用for循环的mysql搜索

<div class="post-text" itemprop="text"> <p>i am a beginner. but I'm practicing a lot for few days with php mysql, and I am trying to use for loop to search an exploded string, one by one from mysql server. Till now I have no results. I'm giving my codes,</p> <pre><code>&lt;?php // Example 1 $var = @$_GET['s'] ; $limit=500; echo " "; echo "$var"; echo " "; $trimmed_array = explode(" ", $var); echo "$trimmed_array[0]"; // piece1 echo " "; $count= count($trimmed_array); echo $count; for($j=0;$j&lt;$count;$j++) { e cho "$trimmed_array[$j]";; echo " "; } echo " "; for($i=0; $i&lt;$count ; $i++){ $query = "select * from book where name like \"%$trimmed_array[$i]%\" order by name"; $numresults=mysql_query($query); $numrows =mysql_num_rows($numresults); if ($numrows == 0) { echo "&lt;h4&gt;Results&lt;/h4&gt;"; echo "&lt;p&gt;Sorry, your search: &amp;quot;" . $trimmed_array[i] . "&amp;quot; returned zero results&lt;/p&gt;"; } if (empty($s)) { $s=0; } $query .= " limit $s,$limit"; $result = mysql_query($query) or die("Couldn't execute query"); echo "&lt;p&gt;You searched for: &amp;quot;" . $var . "&amp;quot;&lt;/p&gt;"; echo "Results&lt;br /&gt;&lt;br /&gt;"; $count=1; while ($row= mysql_fetch_array($result)) { $name = $row["name"]; $publisher=$row["publisher"]; $total=$row["total"]; $issued=$row["issued"]; $available=$row["available"]; $category=$row["category"]; echo "&lt;table border='1'&gt;&lt;tr&gt;&lt;td&gt;$count)&lt;/td&gt;&lt;td&gt;$name&amp;nbsp;&lt;/td&gt;&lt;td&gt;$publisher&amp;nbsp;&lt;/td&gt;&lt;td&gt;$total&amp;nbsp;&lt;/td&gt;&lt;td&gt;$issued&amp;nbsp;&lt;/td&gt;&lt;td&gt;$available&amp;nbsp;&lt;/td&gt;&lt;td&gt;$category&amp;nbsp;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;" ; $count++ ; } } ?&gt; </code></pre> </div>

mysql limit函数传参问题

limit函数有限制参数必须为整型,现在有个需求是要给limit函数传入二个参数,而我这边能提供的二个参数是string型的,如果转化为int性被limit函数接受? 用cast(? as unsigned int)不行,求高手赐教

MySQL返回结果在while循环中显示两次?

<div class="post-text" itemprop="text"> <p>I have a database named as ps_megasearch_attributes. For that my database query is like this</p> <pre><code>CREATE TABLE IF NOT EXISTS `ps_megasearch_attributes` ( `attribute_id` int(11) unsigned NOT NULL AUTO_INCREMENT, `attribute_name` text NOT NULL, PRIMARY KEY (`attribute_id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8 ; -- -- Dumping data for table `ps_megasearch_attributes` -- INSERT INTO `ps_megasearch_attributes` (`attribute_id`, `attribute_name`) VALUES (1, 'Color'); </code></pre> <p>Now to get the values from the database I have this code in php</p> <pre><code>&lt;?php $attribute_name_fetch = mysqli_query($con,"SELECT `attribute_name` FROM `ps_megasearch_attributes` ORDER BY `attribute_id` DESC LIMIT 1 " ); while($attributes_list=mysqli_fetch_array($attribute_name_fetch )){ foreach($attributes_list as $attribute_name) { echo $attribute_name; } } ?&gt; </code></pre> <p>But this one is showing the return result array two times. So can someone kindly tell me how to do this? Any help will be really appreciable.</p> </div>

MySQL中的动态LIMIT准备语句

<div class="post-text" itemprop="text"> <p>Here is the piece of code:</p> <pre><code> $stmt1 = $conn-&gt;prepare("SELECT * FROM image_table WHERE username=:mname &amp;&amp; status=1 ORDER BY number DESC LIMIT :min, 5"); $stmt1-&gt;bindParam(':mname', $mname); $stmt1-&gt;bindParam(':min', $minimum, PDO::PARAM_INT); $stmt1-&gt;execute(); </code></pre> <p>This code is inside a loop that increases $minimum by 5 each time it runs starting at 0, to display all of a members images in rows of 5 columns. When I set :min manually it works but I think because it changes with each iteration, it won't display anything. Does bindParam not work with a dynamic variable?</p> </div>

java 实现 sparksql 时,mysql数据库查询结果只有表头没有数据

这两天尝试用java实现sparksql连接mysql数据库,经过调试可以成功连接到数据库,但奇怪的是只能够查询出表头和表结构却看不到表里面数据 代码如下 import java.util.Hashtable; import java.util.Properties; import javax.swing.JFrame; import org.apache.avro.hadoop.io.AvroKeyValue.Iterator; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB; import org.apache.hadoop.hive.ql.exec.vector.expressions.IsNull; import org.apache.log4j.Logger; import org.apache.spark.SparkConf; import org.apache.spark.SparkContext; import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.rdd.RDD; import org.apache.spark.sql.DataFrameReader; import org.apache.spark.sql.DataFrameWriter; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SQLContext; import org.apache.spark.sql.SaveMode; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.SparkSession.Builder; import org.apache.spark.sql.jdbc.JdbcDialect; import org.datanucleus.store.rdbms.identifier.IdentifierFactory; import antlr.collections.List; import scala.Enumeration.Val; public class Demo_Mysql3 { private static Logger logger = Logger.getLogger(Demo_Mysql3.class); public static void main(String[] args) { SparkConf sparkConf = new SparkConf(); sparkConf.setAppName("Demo_Mysql3"); sparkConf.setMaster("local[5]"); sparkConf.setSparkHome("F:\\DownLoad\\spark\\spark-2.0.0-bin-hadoop2.7"); sparkConf.set("spark.sql.warehouse.dir","F:\\DownLoad\\spark\\spark-2.0.0-bin-hadoop2.7"); SparkContext sc0=null; try { sc0=new SparkContext(sparkConf); SparkSession sparkSession=new SparkSession(sc0); SQLContext sqlContext = new SQLContext(sparkSession); // 一个条件表示一个分区 String[] predicates = new String[] { "1=1 order by id limit 400000,50000", "1=1 order by id limit 450000,50000", "1=1 order by id limit 500000,50000", "1=1 order by id limit 550000,50000", "1=1 order by id limit 600000,50000" }; String url = "jdbc:mysql://localhost:3306/clone"; String table = "image"; Properties connectionProperties = new Properties(); connectionProperties.setProperty("dbtable", table);// 设置表 connectionProperties.setProperty("user", "root");// 设置用户名 connectionProperties.setProperty("password", "root");// 设置密码 // 读取数据 DataFrameReader jread = sqlContext.read(); //Dataset<Row> jdbcDs=jread.jdbc(url, table, predicates, connectionProperties); sqlContext.read().jdbc(url, table, predicates, connectionProperties).select("*").show(); } catch (Exception e) { logger.error("|main|exception error", e); } finally { if (sc0 != null) { sc0.stop(); } } } } 控制台输出如下: ![图片说明](https://img-ask.csdn.net/upload/201707/22/1500708689_839040.png)

mysql limit 分页问题

SELECT * FROM city.u_device u where deviceType =1 limit 0,4; 查询结果: 24, 1, '格力', 'eeee', '04A3D3DEC8EFA99A21FB84A893D60FF7' 21, 1, '格力', 'TS2012', '04A3D3DEC8EFA99A21FB84A893D60FF7' 88, 1, '格力', 'TS202', '04A3D3DEC8EFA99A21FB84A893D60FF7' 31, 1, '格力', 'TS9999', '04A3D3DEC8EFA99A21FB84A893D60FF7' SELECT * FROM city.u_device u where deviceType =1 limit 0,20; 查询结果: 21, 1, '格力', 'TS2012', '04A3D3DEC8EFA99A21FB84A893D60FF7' 22, 1, '海尔', 'TD212', '04A3D3DEC8EFA99A21FB84A893D60FF7' 23, 1, '海尔', 'rrrrr', '04A3D3DEC8EFA99A21FB84A893D60FF7' 24, 1, '格力', 'eeee', '04A3D3DEC8EFA99A21FB84A893D60FF7' 31, 1, '格力', 'TS9999', '04A3D3DEC8EFA99A21FB84A893D60FF7' 88, 1, '格力', 'TS202', '04A3D3DEC8EFA99A21FB84A893D60FF7' 我不是很理解,mysql是不是在每页大小小的时候,不是按照id进行排序,如果每页大小大的时候,是按照id排序的?

关于mysql,limit,提前用id>=定位问题

我们知道mysql limit 起始位置越往后效率越低 这个时候如果id为主键并且连续 可以先先用id>=来定位然后limit分页 如果id不为连续应该怎么办? 怎么解决分页,页数越后效率越慢的情况 谢谢!

Mysql循环遍历行以检查行,直到找到计数为零的行

<div class="post-text" itemprop="text"> <p>I have looked thru many solutions that are similar but can not find one that meets my requirements.</p> <p>I want to limit my users to creating more than two shortURL per url in a 24 hour period</p> <p>people are abusing this and creating 10+ urls and its clogging up my database. so i want to limit them to 2 per url and leave a message if they have exceeded that</p> <p>right now i have this check to see if how many they have done per url</p> <pre><code> $sql = "SELECT * FROM ShortUrls WHERE user_id='$user' AND vidID='$urlMe' AND time &gt; UNIX_TIMESTAMP(DATE_SUB(NOW(), INTERVAL 1 DAY))"; $result = $conn-&gt;query($sql); echo "user created this many urls: " .$result-&gt;num_rows; if ($result-&gt;num_rows &gt; 2) { // keep checking till find one less that 2 } </code></pre> <p>Loop thru the database of urls and check if they used that same url more than twice in 24 hours. if they have not used that url more than twice in 24 hours they can create a new Shortened URL with that url.</p> <p>EDIT the solution i used:</p> <pre><code>$sql2 = "SELECT tube.vidID FROM tube LEFT JOIN ( SELECT COUNT(*) c, vidID FROM ShortUrls WHERE user_id='$user' AND time &gt; UNIX_TIMESTAMP(DATE_SUB(NOW(), INTERVAL 1 DAY)) GROUP BY vidID ) urls ON urls.vidID = tube.vidID WHERE urls.c &lt; 2 LIMIT 1;"; $result = $conn-&gt;query($sql2); if ($row = $result-&gt;fetch_assoc()) { echo "You can add a URL to this video: " . htmlspecialchars($row['vidID']); } else { echo "All videos used up!"; } </code></pre> <p>Hope this can be useful to others</p> </div>

mysql查询优化,加上limit 关键字速度快了很多

一个多表关联的sql,能够返回140条数据,不加 limit 耗时10秒钟,加上limit 100仅 耗时 0.5秒,sql有Order by,如果有排序不应该查出全部数据后才执行limit吗,求教大神原理。sql如下 SELECT sale.id, sale.sale_no, sale.express_no, sale.thermometer_return, sale.num, sale.weight, r.id AS "r.receiver_id", r.receive_company, r.receive_address, r.receive_mobile, r.receive_name FROM ( SELECT sdr.sale_id FROM dis_distribution_warehouse_relation dwr, dis_sale_distribution_relation sdr WHERE sdr.distribution_id = dwr.distribution_id AND dwr.warehouse_id = '021aa21cc9204bd7bcb9d1ad0d271141' AND dwr.`status` = 1 AND sdr.sale_id NOT IN ( SELECT sdr1.sale_id FROM dis_distribution_warehouse_relation dwr1 JOIN dis_sale_distribution_relation sdr1 ON sdr1.distribution_id = dwr1.distribution_id WHERE dwr1.`status` = 0 AND dwr1.warehouse_id = '021aa21cc9204bd7bcb9d1ad0d271141' ) ) m STRAIGHT_JOIN order_sale sale ON m.sale_id = sale.id AND sale.`status` = '1' AND sale.carrier_id = '23b40f744e954567ac2ab6caa79ef8e8' LEFT JOIN order_receiver r ON r.id = sale.receiver_id ORDER BY sale.create_date DESC LIMIT 100

MySQL,使用LIMIT 0,1800到LIMIT 72000,18000循环(40次)来创建平面文件,必须是更加DB的友好方式吗?

<div class="post-text" itemprop="text"> <p>I'm creating some flat files from my MySQL database in a php job. Each file is 225kb. I create 40 files.</p> <p>Basically, I'm running a PHP script which calls a query, the first time it has LIMIT 0, 1800. It then loops and runs 40 times and the last query uses LIMIT 72000,1800.</p> <p>In the loop I sleep for 0.7 seconds. The whole process takes 45 seconds.</p> <p>Heres some debug info I produced.</p> <pre><code>Query took 0.03 second for ../sitemap-0.xml done! Query took 0.06 second for ../sitemap-1800.xml done! ..snip .. Query took 0.9 second for ../sitemap-70200.xml done! Query took 0.9 second for ../sitemap-72000.xml done! Took: 44.7057 </code></pre> <p>You'll notice the queries take longer and longer as the job runs, this must be the amount of data that the query needs to look at to determine the position of the LIMIT. As a result the CPU usage increases with each query.</p> <p>The maximum cpu times limit on the server is 60 seconds.</p> <p>My prime consideration is to keep cpu time and database query time as low as possible. Having high values is unacceptable.</p> <p>Running the query over and over seems a bit waste of resources.</p> <p>Is there a better way to do this ? </p> </div>

获取最后x条记录的最佳方法是什么? 使用MySQL Limit和ORDER DESC?

<div class="post-text" itemprop="text"> <p>I was wondering if there are any other best/better practices then this as far as performance or optimizations. </p> <p>EDIT: I'm using PHP7. Should have made that clear, I apologize. I'm doing a volunteer project for a local non-profit and it's setup like this:</p> <p>Table: Volunteer</p> <p>pk: v_id</p> <p>So what I'm doing is: <code>SELECT * from Volunteer ORDER BY v_id DESC LIMIT 25;</code></p> <p>(They want the last 25 to display currently for the "last logs" report.)</p> <p>EDIT2: The only reason I'm asking this now, we've hit 10k volunteer logs in the system and I'm starting to realize MON-FRI they can add anywhere from 50-100 (or more) logs per day so it quickly adds up.</p> </div>

大学四年自学走来,这些私藏的实用工具/学习网站我贡献出来了

大学四年,看课本是不可能一直看课本的了,对于学习,特别是自学,善于搜索网上的一些资源来辅助,还是非常有必要的,下面我就把这几年私藏的各种资源,网站贡献出来给你们。主要有:电子书搜索、实用工具、在线视频学习网站、非视频学习网站、软件下载、面试/求职必备网站。 注意:文中提到的所有资源,文末我都给你整理好了,你们只管拿去,如果觉得不错,转发、分享就是最大的支持了。 一、电子书搜索 对于大部分程序员...

在中国程序员是青春饭吗?

今年,我也32了 ,为了不给大家误导,咨询了猎头、圈内好友,以及年过35岁的几位老程序员……舍了老脸去揭人家伤疤……希望能给大家以帮助,记得帮我点赞哦。 目录: 你以为的人生 一次又一次的伤害 猎头界的真相 如何应对互联网行业的「中年危机」 一、你以为的人生 刚入行时,拿着傲人的工资,想着好好干,以为我们的人生是这样的: 等真到了那一天,你会发现,你的人生很可能是这样的: ...

程序员请照顾好自己,周末病魔差点一套带走我。

程序员在一个周末的时间,得了重病,差点当场去世,还好及时挽救回来了。

技术大佬:我去,你写的 switch 语句也太老土了吧

昨天早上通过远程的方式 review 了两名新来同事的代码,大部分代码都写得很漂亮,严谨的同时注释也很到位,这令我非常满意。但当我看到他们当中有一个人写的 switch 语句时,还是忍不住破口大骂:“我擦,小王,你丫写的 switch 语句也太老土了吧!” 来看看小王写的代码吧,看完不要骂我装逼啊。 private static String createPlayer(PlayerTypes p...

和黑客斗争的 6 天!

互联网公司工作,很难避免不和黑客们打交道,我呆过的两家互联网公司,几乎每月每天每分钟都有黑客在公司网站上扫描。有的是寻找 Sql 注入的缺口,有的是寻找线上服务器可能存在的漏洞,大部分都...

点沙成金:英特尔芯片制造全过程揭密

“亚马逊丛林里的蝴蝶扇动几下翅膀就可能引起两周后美国德州的一次飓风……” 这句人人皆知的话最初用来描述非线性系统中微小参数的变化所引起的系统极大变化。 而在更长的时间尺度内,我们所生活的这个世界就是这样一个异常复杂的非线性系统…… 水泥、穹顶、透视——关于时间与技艺的蝴蝶效应 公元前3000年,古埃及人将尼罗河中挖出的泥浆与纳特龙盐湖中的矿物盐混合,再掺入煅烧石灰石制成的石灰,由此得来了人...

讲一个程序员如何副业月赚三万的真实故事

loonggg读完需要3分钟速读仅需 1 分钟大家好,我是你们的校长。我之前讲过,这年头,只要肯动脑,肯行动,程序员凭借自己的技术,赚钱的方式还是有很多种的。仅仅靠在公司出卖自己的劳动时...

上班一个月,后悔当初着急入职的选择了

最近有个老铁,告诉我说,上班一个月,后悔当初着急入职现在公司了。他之前在美图做手机研发,今年美图那边今年也有一波组织优化调整,他是其中一个,在协商离职后,当时捉急找工作上班,因为有房贷供着,不能没有收入来源。所以匆忙选了一家公司,实际上是一个大型外包公司,主要派遣给其他手机厂商做外包项目。**当时承诺待遇还不错,所以就立马入职去上班了。但是后面入职后,发现薪酬待遇这块并不是HR所说那样,那个HR自...

女程序员,为什么比男程序员少???

昨天看到一档综艺节目,讨论了两个话题:(1)中国学生的数学成绩,平均下来看,会比国外好?为什么?(2)男生的数学成绩,平均下来看,会比女生好?为什么?同时,我又联想到了一个技术圈经常讨...

副业收入是我做程序媛的3倍,工作外的B面人生是怎样的?

提到“程序员”,多数人脑海里首先想到的大约是:为人木讷、薪水超高、工作枯燥…… 然而,当离开工作岗位,撕去层层标签,脱下“程序员”这身外套,有的人生动又有趣,马上展现出了完全不同的A/B面人生! 不论是简单的爱好,还是正经的副业,他们都干得同样出色。偶尔,还能和程序员的特质结合,产生奇妙的“化学反应”。 @Charlotte:平日素颜示人,周末美妆博主 大家都以为程序媛也个个不修边幅,但我们也许...

MySQL数据库面试题(2020最新版)

文章目录数据库基础知识为什么要使用数据库什么是SQL?什么是MySQL?数据库三大范式是什么mysql有关权限的表都有哪几个MySQL的binlog有有几种录入格式?分别有什么区别?数据类型mysql有哪些数据类型引擎MySQL存储引擎MyISAM与InnoDB区别MyISAM索引与InnoDB索引的区别?InnoDB引擎的4大特性存储引擎选择索引什么是索引?索引有哪些优缺点?索引使用场景(重点)...

如果你是老板,你会不会踢了这样的员工?

有个好朋友ZS,是技术总监,昨天问我:“有一个老下属,跟了我很多年,做事勤勤恳恳,主动性也很好。但随着公司的发展,他的进步速度,跟不上团队的步伐了,有点...

我入职阿里后,才知道原来简历这么写

私下里,有不少读者问我:“二哥,如何才能写出一份专业的技术简历呢?我总感觉自己写的简历太烂了,所以投了无数份,都石沉大海了。”说实话,我自己好多年没有写过简历了,但我认识的一个同行,他在阿里,给我说了一些他当年写简历的方法论,我感觉太牛逼了,实在是忍不住,就分享了出来,希望能够帮助到你。 01、简历的本质 作为简历的撰写者,你必须要搞清楚一点,简历的本质是什么,它就是为了来销售你的价值主张的。往深...

我说我不会算法,阿里把我挂了。

不说了,字节跳动也反手把我挂了。

优雅的替换if-else语句

场景 日常开发,if-else语句写的不少吧??当逻辑分支非常多的时候,if-else套了一层又一层,虽然业务功能倒是实现了,但是看起来是真的很不优雅,尤其是对于我这种有强迫症的程序"猿",看到这么多if-else,脑袋瓜子就嗡嗡的,总想着解锁新姿势:干掉过多的if-else!!!本文将介绍三板斧手段: 优先判断条件,条件不满足的,逻辑及时中断返回; 采用策略模式+工厂模式; 结合注解,锦...

离职半年了,老东家又发 offer,回不回?

有小伙伴问松哥这个问题,他在上海某公司,在离职了几个月后,前公司的领导联系到他,希望他能够返聘回去,他很纠结要不要回去? 俗话说好马不吃回头草,但是这个小伙伴既然感到纠结了,我觉得至少说明了两个问题:1.曾经的公司还不错;2.现在的日子也不是很如意。否则应该就不会纠结了。 老实说,松哥之前也有过类似的经历,今天就来和小伙伴们聊聊回头草到底吃不吃。 首先一个基本观点,就是离职了也没必要和老东家弄的苦...

为什么你不想学习?只想玩?人是如何一步一步废掉的

不知道是不是只有我这样子,还是你们也有过类似的经历。 上学的时候总有很多光辉历史,学年名列前茅,或者单科目大佬,但是虽然慢慢地长大了,你开始懈怠了,开始废掉了。。。 什么?你说不知道具体的情况是怎么样的? 我来告诉你: 你常常潜意识里或者心理觉得,自己真正的生活或者奋斗还没有开始。总是幻想着自己还拥有大把时间,还有无限的可能,自己还能逆风翻盘,只不是自己还没开始罢了,自己以后肯定会变得特别厉害...

男生更看重女生的身材脸蛋,还是思想?

往往,我们看不进去大段大段的逻辑。深刻的哲理,往往短而精悍,一阵见血。问:产品经理挺漂亮的,有点心动,但不知道合不合得来。男生更看重女生的身材脸蛋,还是...

为什么程序员做外包会被瞧不起?

二哥,有个事想询问下您的意见,您觉得应届生值得去外包吗?公司虽然挺大的,中xx,但待遇感觉挺低,马上要报到,挺纠结的。

当HR压你价,说你只值7K,你该怎么回答?

当HR压你价,说你只值7K时,你可以流畅地回答,记住,是流畅,不能犹豫。 礼貌地说:“7K是吗?了解了。嗯~其实我对贵司的面试官印象很好。只不过,现在我的手头上已经有一份11K的offer。来面试,主要也是自己对贵司挺有兴趣的,所以过来看看……”(未完) 这段话主要是陪HR互诈的同时,从公司兴趣,公司职员印象上,都给予对方正面的肯定,既能提升HR的好感度,又能让谈判气氛融洽,为后面的发挥留足空间。...

面试:第十六章:Java中级开发(16k)

HashMap底层实现原理,红黑树,B+树,B树的结构原理 Spring的AOP和IOC是什么?它们常见的使用场景有哪些?Spring事务,事务的属性,传播行为,数据库隔离级别 Spring和SpringMVC,MyBatis以及SpringBoot的注解分别有哪些?SpringMVC的工作原理,SpringBoot框架的优点,MyBatis框架的优点 SpringCould组件有哪些,他们...

面试阿里p7,被按在地上摩擦,鬼知道我经历了什么?

面试阿里p7被问到的问题(当时我只知道第一个):@Conditional是做什么的?@Conditional多个条件是什么逻辑关系?条件判断在什么时候执...

你打算用Java 8一辈子都不打算升级到Java 14,真香

我们程序员应该抱着尝鲜、猎奇的心态,否则就容易固步自封,技术停滞不前。

无代码时代来临,程序员如何保住饭碗?

编程语言层出不穷,从最初的机器语言到如今2500种以上的高级语言,程序员们大呼“学到头秃”。程序员一边面临编程语言不断推陈出新,一边面临由于许多代码已存在,程序员编写新应用程序时存在重复“搬砖”的现象。 无代码/低代码编程应运而生。无代码/低代码是一种创建应用的方法,它可以让开发者使用最少的编码知识来快速开发应用程序。开发者通过图形界面中,可视化建模来组装和配置应用程序。这样一来,开发者直...

面试了一个 31 岁程序员,让我有所触动,30岁以上的程序员该何去何从?

最近面试了一个31岁8年经验的程序猿,让我有点感慨,大龄程序猿该何去何从。

大三实习生,字节跳动面经分享,已拿Offer

说实话,自己的算法,我一个不会,太难了吧

程序员垃圾简历长什么样?

已经连续五年参加大厂校招、社招的技术面试工作,简历看的不下于万份 这篇文章会用实例告诉你,什么是差的程序员简历! 疫情快要结束了,各个公司也都开始春招了,作为即将红遍大江南北的新晋UP主,那当然要为小伙伴们做点事(手动狗头)。 就在公众号里公开征简历,义务帮大家看,并一一点评。《启舰:春招在即,义务帮大家看看简历吧》 一石激起千层浪,三天收到两百多封简历。 花光了两个星期的所有空闲时...

《经典算法案例》01-08:如何使用质数设计扫雷(Minesweeper)游戏

我们都玩过Windows操作系统中的经典游戏扫雷(Minesweeper),如果把质数当作一颗雷,那么,表格中红色的数字哪些是雷(质数)?您能找出多少个呢?文中用列表的方式罗列了10000以内的自然数、质数(素数),6的倍数等,方便大家观察质数的分布规律及特性,以便对算法求解有指导意义。另外,判断质数是初学算法,理解算法重要性的一个非常好的案例。

《Oracle Java SE编程自学与面试指南》最佳学习路线图(2020最新版)

正确选择比瞎努力更重要!

一文带你入门Java Stream流,太强了

两个星期以前,就有读者强烈要求我写一篇 Java Stream 流的文章,我说市面上不是已经有很多了吗,结果你猜他怎么说:“就想看你写的啊!”你看你看,多么苍白的喜欢啊。那就“勉为其难”写一篇吧,嘻嘻。 单从“Stream”这个单词上来看,它似乎和 java.io 包下的 InputStream 和 OutputStream 有些关系。实际上呢,没毛关系。Java 8 新增的 Stream 是为...

立即提问
相关内容推荐