This commit is contained in:
Cool 2024-05-09 19:04:11 +08:00
parent 1044ae97ad
commit 13c67150e6
14 changed files with 316 additions and 29 deletions

View File

@ -1,11 +1,12 @@
package org.jeecg.duan.java.controller;
package org.jeecg.modules.duan.java.controller;
import io.swagger.annotations.Api;
import lombok.extern.slf4j.Slf4j;
import net.sf.json.JSONArray;
import org.jeecg.common.api.vo.Result;
import org.jeecg.duan.java.service.MusicService;
import org.jeecg.modules.duan.java.service.MusicService;
import org.jeecg.modules.duan.java.service.MusicService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;

View File

@ -1,4 +1,4 @@
package org.jeecg.duan.java.entity;
package org.jeecg.modules.duan.java.entity;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableId;
@ -26,4 +26,25 @@ public class Ranking {
@ApiModelProperty(value = "评论数")
private Integer comment;
public Ranking(String name, String singer, Integer comment) {
this.name = name;
this.singer = singer;
this.comment = comment;
}
public Integer getComment() {
return this.comment;
}
public String getName() {
return this.name;
}
public String getSinger() {
return this.singer;
}
public Ranking() {
}
}

View File

@ -1,8 +1,8 @@
package org.jeecg.duan.java.mapper;
package org.jeecg.modules.duan.java.mapper;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import org.jeecg.duan.java.entity.Ranking;
import org.jeecg.modules.duan.java.entity.Ranking;
public interface MusicMapper extends BaseMapper<Ranking> {

View File

@ -1,4 +1,4 @@
package org.jeecg.duan.java.service;
package org.jeecg.modules.duan.java.service;
import net.sf.json.JSONArray;
import org.jeecg.common.api.vo.Result;

View File

@ -1,4 +1,4 @@
package org.jeecg.duan.java.service.impl;
package org.jeecg.modules.duan.java.service.impl;
import com.baomidou.mybatisplus.core.conditions.update.UpdateWrapper;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
@ -6,9 +6,11 @@ import net.sf.json.JSONArray;
import net.sf.json.JSONObject;
import org.jeecg.common.api.vo.Result;
import org.jeecg.duan.java.entity.Ranking;
import org.jeecg.duan.java.mapper.MusicMapper;
import org.jeecg.duan.java.service.MusicService;
import org.jeecg.modules.duan.java.entity.Ranking;
import org.jeecg.modules.duan.java.mapper.MusicMapper;
import org.jeecg.modules.duan.java.service.MusicService;
import org.jeecg.modules.duan.java.mapper.MusicMapper;
import org.jeecg.modules.duan.java.service.MusicService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

View File

@ -1,35 +1,37 @@
package org.jeecg.duan.scala
package org.jeecg.modules.duan.scala
import com.alibaba.fastjson.{JSONArray, JSONObject}
import com.baomidou.mybatisplus.core.conditions.update.UpdateWrapper
import org.jeecg.duan.java.entity.Ranking
import org.jeecg.duan.java.mapper.MusicMapper
import org.jeecg.modules.drag.config.common.Result
import org.springframework.beans.factory.annotation.Autowired
import org.jeecg.modules.duan.java.entity.Ranking
import org.jeecg.modules.duan.java.mapper.MusicMapper
import java.lang
import java.util.{Collections, Comparator}
import scala.jdk.CollectionConverters.{IterableHasAsJava, IterableHasAsScala, ListHasAsScala}
/**
* 这个对象提供了排行榜分析的功能
*/
object Analysis {
@Autowired
private val musicMapper: MusicMapper = null
private var musicMapper: MusicMapper = _ // Define the field for MusicMapper
/**
* 检索排行榜并根据评论进行排序
*
* @return Result 包含根据评论排序的排行榜的 JSON 数组
*/
def getRanking: Result[JSONArray] = {
val comparatorList: ComparatorList = new ComparatorList
val rankings: List[Ranking] = null // 从数据库或其他来源获取排行榜
val result: JSONArray = new JSONArray
System.out.println(rankings) // 用于调试的日志排行榜
Collections sort(rankings, comparatorList) // 对排行榜进行排序
for (ranking <- rankings) {
// Sort the rankings using Scala's sorting methods
val sortedRankings = rankings.sortBy(_.getComment)
for (ranking <- sortedRankings) {
System.out.println("\n\n\n\n\n" + ranking) // 打印每个排名信息用于调试
val `object`: JSONObject = new JSONObject
`object`.put("comment", ranking.getComment) // 将评论信息添加到 JSON 对象中
@ -37,23 +39,25 @@ object Analysis {
`object`.put("songName", ranking.getName) // 将歌曲名称添加到 JSON 对象中
result.add(`object`) // JSON 对象添加到结果数组中
}
val updateWrapper: UpdateWrapper[Ranking] = new UpdateWrapper[Ranking]
updateWrapper.setSql("comment=comment- FLOOR( RAND()*100 )") // 随机减少评论数量
musicMapper.update(null, updateWrapper) // 更新数据库中的排行榜评论数量
return Result.OK(result) // 返回带有排序后排行榜信息的成功结果
Result.OK(result) // 返回带有排序后排行榜信息的成功结果
}
// Define a method to set the MusicMapper field
def setMusicMapper(musicMapper: MusicMapper): Unit = {
this.musicMapper = musicMapper
}
// Define a class for ComparatorList implementing Comparator[Ranking]
private class ComparatorList extends Comparator[Ranking] {
override def compare(o1: Ranking, o2: Ranking): Int = {
if (o1.getComment > o2.getComment) {
1
}
else {
-(1)
}
if (o1.getComment > o2.getComment) 1
else -1
}
}
}

View File

@ -1,4 +1,4 @@
package org.jeecg.duan
package org.jeecg.modules.duan.scala
import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}

View File

@ -0,0 +1,50 @@
package org.jeecg.sy.java;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import java.io.IOException;
public class HDFSUtils {
private static FileSystem fileSystem;
// 初始化HDFS文件系统
public static void initFileSystem(String hdfsUri) throws IOException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", hdfsUri);
fileSystem = FileSystem.get(conf);
}
// 写入数据到HDFS文件
public static void writeToHDFS(String path, String data) throws IOException {
Path filePath = new Path(path);
FSDataOutputStream outputStream = fileSystem.create(filePath, true);
outputStream.writeUTF(data);
outputStream.close();
}
// 从HDFS文件读取数据
public static String readFromHDFS(String path) throws IOException {
Path filePath = new Path(path);
FSDataInputStream inputStream = fileSystem.open(filePath);
String data = inputStream.readUTF();
inputStream.close();
return data;
}
// 删除HDFS文件
public static boolean deleteFromHDFS(String path) throws IOException {
Path filePath = new Path(path);
return fileSystem.delete(filePath, true);
}
// 关闭文件系统连接
public static void closeFileSystem() throws IOException {
if (fileSystem != null) {
fileSystem.close();
}
}
}

View File

@ -0,0 +1,30 @@
package org.jeecg.sy.java;
import java.io.IOException;
public class MainClass {
public static void main(String[] args) {
try {
// 初始化HDFS连接
HDFSUtils.initFileSystem("hdfs://localhost:9000");
// 写入数据到HDFS
HDFSUtils.writeToHDFS("/user/hadoop/testfile.txt", "Hello HDFS!");
// 从HDFS读取数据
String data = HDFSUtils.readFromHDFS("/user/hadoop/testfile.txt");
System.out.println("Read from HDFS: " + data);
// 删除HDFS文件
HDFSUtils.deleteFromHDFS("/user/hadoop/testfile.txt");
// 关闭HDFS连接
HDFSUtils.closeFileSystem();
} catch (IOException e) {
e.printStackTrace();
}
}
}

View File

@ -0,0 +1,56 @@
package org.jeecg.sy.java;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.functions;
public class MusicSpark {
public static void main(String[] args) {
SparkSession spark = SparkSession.builder()
.appName("Music Shot Analysis")
.master("local[2]")
.getOrCreate();
// 读取数据
Dataset<Row> data = spark.read()
.format("csv")
.option("header", "true")
.load("path_to_your_csv_file");
// 投篮成功率分析
Dataset<Row> shotTypeSuccess = data
.groupBy("shot_type")
.agg(
functions.avg(data.col("result").cast("integer")).alias("success_rate")
);
shotTypeSuccess.show();
// 每个球赛节次的得分情况
Dataset<Row> scoringByQuarter = data
.groupBy("qtr")
.agg(
functions.sum(data.col("lebron_team_score")).alias("total_points")
);
scoringByQuarter.show();
// 根据投篮距离计算得分分布
Dataset<Row> scoreDistribution = data
.withColumn("distance_range", functions.when(data.col("distance_ft").$less(10), "0-10")
.otherwise(functions.when(data.col("distance_ft").$less(20), "10-20")
.otherwise("20+")))
.groupBy("distance_range")
.agg(
functions.count("shot_type").alias("shots_made"),
functions.sum(data.col("result").cast("integer")).alias("shots_scored")
);
scoreDistribution.show();
// 关闭Spark会话
spark.stop();
}
}

View File

@ -0,0 +1,62 @@
package org.jeecg.sy.java;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.ml.classification.LogisticRegression;
import org.apache.spark.ml.feature.VectorAssembler;
import org.apache.spark.ml.Pipeline;
import org.apache.spark.ml.PipelineModel;
import org.apache.spark.ml.PipelineStage;
public class ShotPrediction {
public static void main(String[] args) {
SparkSession spark = SparkSession.builder()
.appName("Music Shot Prediction")
.master("local[2]")
.getOrCreate();
// 加载数据
Dataset<Row> data = spark.read()
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load("path_to_your_csv_file");
// 数据预处理
data = data.withColumn("label", data.col("result").cast("integer"))
.withColumn("shot_type_num", data.col("shot_type").cast("integer"))
.withColumn("distance_ft_num", data.col("distance_ft").cast("integer"))
.withColumn("lead_num", data.col("lead").cast("integer"));
// 特征向量化
VectorAssembler assembler = new VectorAssembler()
.setInputCols(new String[]{"shot_type_num", "distance_ft_num", "lead_num"})
.setOutputCol("features");
// 创建逻辑回归模型
LogisticRegression lr = new LogisticRegression();
// 创建管道
Pipeline pipeline = new Pipeline()
.setStages(new PipelineStage[]{assembler, lr});
// 划分数据集
Dataset<Row>[] splits = data.randomSplit(new double[]{0.7, 0.3}, 42);
Dataset<Row> trainingData = splits[0];
Dataset<Row> testData = splits[1];
// 训练模型
PipelineModel model = pipeline.fit(trainingData);
// 模型评估
Dataset<Row> predictions = model.transform(testData);
predictions.select("features", "label", "prediction").show();
// 关闭Spark会话
spark.stop();
}
}

View File

@ -0,0 +1,19 @@
package org.jeecg.sy.java;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.SparkSession;
public class SparkConn {
public static void main(String[] args) {
// 初始化Spark
SparkSession session = SparkUtils.initSpark("Spark Java Application");
// 使用SparkUtils的方法处理数据
JavaRDD<Integer> doubledNumbers = SparkUtils.doubleNumbers();
SparkUtils.printRDD(doubledNumbers);
// 关闭Spark
SparkUtils.stopSpark();
}
}

View File

@ -0,0 +1,42 @@
package org.jeecg.sy.java;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.SparkConf;
import java.util.Arrays;
import java.util.List;
public class SparkUtils {
private static SparkSession sparkSession = null;
// 初始化SparkSession
public static SparkSession initSpark(String appName) {
if (sparkSession == null) {
SparkConf conf = new SparkConf().setAppName(appName).setMaster("local[2]");
sparkSession = SparkSession.builder().config(conf).getOrCreate();
}
return sparkSession;
}
// 关闭SparkSession
public static void stopSpark() {
if (sparkSession != null) {
sparkSession.stop();
sparkSession = null;
}
}
// 示例方法创建一个整数RDD并将每个元素乘以2
public static JavaRDD<Integer> doubleNumbers() {
JavaSparkContext sc = new JavaSparkContext(sparkSession.sparkContext());
JavaRDD<Integer> numbersRDD = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
return numbersRDD.map(number -> number * 2);
}
// 示例方法打印RDD的内容
public static void printRDD(JavaRDD<?> rdd) {
List<?> result = rdd.collect();
result.forEach(System.out::println);
}
}