pyspark sparksession_PySpark 处理数据和数据建模

v2-84400658a753f0b6be718410650dc2ab_1440w.jpg?source=172ae18b

安装相关包

from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, when, count, countDistinct
from pyspark.sql.types import IntegerType,StringType
from pyspark.ml.feature import OneHotEncoderEstimator, StringIndexer, VectorAssembler
from pyspark.ml.classification import RandomForestClassifier, GBTClassifier
from pyspark.ml.evaluation import BinaryClassificationEvaluator
spark = SparkSession.builder.config("spark.some.config.option", "some-value") .config('spark.debug.maxToStringFields', '50') .appName("Python Spark SQL Hive integration example").enableHiveSupport().getOrCreate()
sc = spark.sparkContext

1.读入数据

读入数据库中的数据X

data = spark.sql('''select * from db_so_default_tenant.entity_clueinfowhere custom_username not like '%测试%' ''')
# 时间部分的code报错,尚未修改
#                           and FROM_UNIXTIME(custom_create_time,'%Y-%m-%d') between date_format(date_sub(current_date,365), '%Y-%m-01') 
#                           and date_format(date_sub(current_date, 15), '%Y-%m-%d')
## Let's have a look at the data type
data.printSchema()

# 保留部分列:

keep_var_lst=['custom_clue_id', 'custom_create_time', 'custom_post_time', 'custom_username', 'custom_sex', 'custom_mobile', 'custom_mobile_area', 'custom_approach_id', 'custom_channel_id', 'custom_product_id', 'custom_pattern_id','custom_media_id','custom_ctype_id', 'custom_activity_id','custom_detail','custom_province_id','custom_city_id','custom_district_id','custom_utm_source','custom_utm_content','custom_utm_medium', 'custom_utm_campaign', 'custom_resource','custom_detail', 'custom_dealer_id', 'custom_area_id','custom_two_area_id']data = data.select(keep_var_lst)

读入数据库中的Y

# 读入数据,查看数据结构

lead_feedback = spark.sql("select * from db_so_default_tenant.entity_clueinfosync")
lead_feedback.printSchema()

# 仅保留部分列

keep_var_lst2 = ['custom_clue_id', 'custom_verify_status', 'custom_sync_time']
lead_feedback = lead_feedback.select(keep_var_lst2)# print((lead_feedback.count(), len(lead_feedback.columns)))
## (1577626, 3)

join表,得到包含X和Y的基础表

# data表append lead flag需要的字段
df = data.join(lead_feedback, on=['custom_clue_id'], how='left')# print((df.count(), len(df.columns)))
## (1466832, 29)
# (1560986, 29)

2. 数据整合

定义Y值

# 利用pyspark.sql.functions中的when进行数据重塑

df = df.withColumn('label',when(df['custom_verify_status']==2,1).otherwise(0))

日期数据的处理

# 时间戳转换为日期#注册临时表供SQL查询使用
df.createOrReplaceTempView("temp")
# newDF = spark.sql("select *, to_date('create_time', 'dim_month_id'), to_date('create_time', 'dim_day_id')  from df_sql ")
newDF = spark.sql("""select *, from_unixtime(custom_create_time, 'yyyy-MM')as dim_month_id,from_unixtime(custom_create_time, 'yyyy-MM-dd')as dim_day_id,from_unixtime(custom_create_time, 'yyyy-MM-dd HH:mm:ss')as create_time_new,from_unixtime(custom_post_time, 'yyyy-MM-dd HH:mm:ss')as post_time_newfrom temp """)
# 提取相应日期字段#注册临时表供SQL查询使用
newDF.createOrReplaceTempView("temp")
# newDF = spark.sql("select *, to_date('create_time', 'dim_month_id'), to_date('create_time', 'dim_day_id')  from df_sql ")
newDF = spark.sql("""select *, month(create_time_new) as create_monthofyear,FLOOR((day(create_time_new)-1)/7)+1 as create_weekofmonth,dayofweek(create_time_new) as create_dayofweek,weekofyear(create_time_new) as create_weekofyear,hour(create_time_new) as create_hourofday,floor(hour(create_time_new)/2) as create_hourofday2,case when hour(create_time_new) between 8  and 11 then 'a.8-11'when hour(create_time_new) =12               then 'b.12'when hour(create_time_new) between 13 and 17 then 'c.13-17'when hour(create_time_new) between 18 and 19 then 'd.18-19'when hour(create_time_new) between 20 and 23 then 'e.20-23'when hour(create_time_new) =0                then 'f.0'when hour(create_time_new) between 1  and 2  then 'g.1-2'when hour(create_time_new) =3                then 'h.3'when hour(create_time_new) between 4 and 5   then 'j.4-5'when hour(create_time_new) between 6 and 7   then 'k.6-7'end as create_hour_flag,month(post_time_new) as post_monthofyear,FLOOR((day(post_time_new)-1)/7)+1 as post_weekofmonth,dayofweek(post_time_new) as post_dayofweek,weekofyear(post_time_new) as post_weekofyear,hour(post_time_new) as post_hourofday,floor(hour(post_time_new)/2) as post_hourofday2,case when hour(post_time_new) between 9  and 11 then 'a.9-11'when hour(post_time_new) =12 then 'b.12'when hour(post_time_new) between 13 and 19 then 'c.13-19'when hour(post_time_new) =20 then 'd.20'when hour(post_time_new) between 21 and 23 then 'e.21-23'when hour(post_time_new) between 0  and 2  then 'f.0-2'when hour(post_time_new) =3  then 'g.3'when hour(post_time_new) between 4  and 8  then 'h.4-8'end as post_hour_flagfrom temp""")

1.创建简单flag,判断是否为null值,返回0,1

df2 = newDF
# 创建简单flag,如果为null值则为0,否则为1
def func_var_flag(var):if var == None or var == 0 or var == '' or var == '0':return 0else:return 1func_var_flag_udf = udf(func_var_flag, IntegerType())
unknown_flag=['custom_username','custom_mobile_area','custom_approach_id','custom_channel_id','custom_product_id','custom_pattern_id','custom_media_id','custom_ctype_id','custom_activity_id','custom_utm_source', 'custom_utm_content','custom_utm_medium','custom_utm_campaign','custom_province_id', 'custom_city_id','custom_district_id','custom_dealer_id','custom_area_id','custom_two_area_id','custom_resource','custom_detail']
for column in unknown_flag:df2=df2.withColumn(column + '_flag',  func_var_flag_udf(df2[column]))# df2.limit(2).toPandas()
# df2.groupBy('mobile_area', 'mobile_area_flag').count().sort("count",ascending=False).show(4)

2.创建简单flag,是否为null值,是返回'Unk’,否则返回本身的结果

对于数值型的数据未做处理

# 创建简单flag,如果为null值则返回unk,否则返回其本身,字符型数据的处理,数值型呢?????
def func_var_grp_flag(var):if var == None or var == '':return 'Unk'else:return varfunc_var_grp_udf = udf(func_var_grp_flag, StringType())
unknown_grp_flag=['custom_sex','custom_utm_medium']
for column in unknown_grp_flag:df2=df2.withColumn(column + '_grp',  func_var_grp_udf(df2[column]))# df2.limit(2).toPandas()
# df2.dtypes
len(df2.columns)

3.字符串格式的case when,使用sql

#注册临时表供SQL查询使用
df2.createOrReplaceTempView("temp")
# newDF = spark.sql("select *, to_date('create_time', 'dim_month_id'), to_date('create_time', 'dim_day_id')  from df_sql ")
df3 = spark.sql("""select *, CHAR_LENGTH(trim(custom_username)) as name_len,case when CHAR_LENGTH(custom_username) = 1 then 'len=1'when custom_username in ('400用户','询价客户','客户','团购用户','微聊客户','网友','报价用户','匿名用户','汽车之家用户','车主','佚名','爱卡用户','询价用户','17汽车来电客户','团购客户','匿名','意向客户') then custom_usernamewhen custom_username like '%先生%' or custom_username like '%女士%' then 'x Mr/Mrs'when SUBSTR(trim(custom_mobile),1,1) ="1" and CHAR_LENGTH(trim(custom_username))=11 then 'phone_num'when substr(custom_username,1,1) in ('0','1','2','3','4','5','6','7','8','9') then 'numbers'when CHAR_LENGTH(custom_username) > 3 then 'len>3'else 'Normal'end as name_flag2,CHAR_LENGTH(trim(custom_mobile)) as mobile_len,case when SUBSTR(trim(custom_mobile),1,1) =0 then 'fixed-line telephone'when SUBSTR(trim(custom_mobile),1,1) =1 and CHAR_LENGTH(trim(custom_mobile)) =11 then 'mobile phone'else 'No-valid'end as tel_flag,case when SUBSTR(trim(custom_mobile),1,1) =1 and CHAR_LENGTH(custom_mobile)=11 then SUBSTR(trim(custom_mobile),1,2)end as tel_head2,case when SUBSTR(trim(custom_mobile),1,1) =1 and CHAR_LENGTH(custom_mobile)=11 then SUBSTR(trim(custom_mobile),1,3)end as tel_head3,case when CHAR_LENGTH(custom_mobile)<>11 then 'Not-Phone'when SUBSTR(trim(custom_mobile),1,3) in ('186','138','139','135','136','137','159','158','150','151','187','182','189','152','188','176','185','180','183','133','181','177','131','130','132','156','134','153','155','173','157','199','178','175','166','184','198','147','191','170','171') then 'valid'else 'No-Valid' end as tel_head3_grp,      case when custom_mobile_area is null or custom_mobile_area="" then 'Unk'when custom_mobile_area in ('海口市','大连市','昆明市','吉林市','江门市','西宁市','珠海市','呼和浩特市','张家口市') then 'level1'when custom_mobile_area in ('金华市','赣州市','湖州市','徐州市','盐城市') then 'level2'when custom_mobile_area in ('沈阳市','成都市') then 'level3'when custom_mobile_area in ('杭州市','南京市','宜春市','吉安市') then 'level4' else 'Others'end as mobile_area_grp,  case when custom_channel_id in ('73','72','10070','62','10063','61','10012','10061','65','60','10072','76','10062','10071','63','10073','36','77') then custom_channel_idelse 'Others' end as channel_grp,case when custom_media_id in ('4f15069347ea4') then 'level1'when custom_media_id in ('4f15069348034') then 'level2'when custom_media_id in ('5c7397fa8c5f3') then 'level3'when custom_media_id in ('5aa8e618a1915','58107fdf18a64') then 'level4'when custom_media_id in ('588176b5dc052','4f150a09d9a7d','541994c0e4126','54068f14cde9b','5a308c5df0537','54052681387a5','54068f14cde9h','5c6d2672f1f95','57d2a59bc8dbb','4f15053feac73','5c233d3561514','4f150693481c2','4f15069348647','4f150a09db456','4f150a09d608c') then 'level5'when custom_media_id in ('0') then 'Unk'else 'Others'end as media_grp,case when custom_detail is null or custom_detail= "" then NULLwhen custom_detail like '%询价%'   then 'Inquire'when custom_detail like '%经销商%' then 'Retail'when custom_detail like '%试驾%'   then 'Trial run'when custom_detail like '2.0L %' or custom_detail like '2.5L %' then 'car_type'when custom_detail like '%通话%'   then 'comment6'when custom_detail like '%失败%'   then 'comment2'when custom_detail like '%成功%'   then 'comment1'when custom_detail like '%无效%'   then 'comment3'when custom_detail like '%黑名单%' then 'comment4'when custom_detail like '%姓名%'   then 'comment5'end as comment_type,case when custom_province_id in ('150000','460000','630000','530000','620000','520000','650000','24') then 'level1'when custom_province_id in ('440000','610000','31','220000','640000') then 'level2'when custom_province_id in ('130000','430000','370000','25','410000','210000','340000') then 'level3'when custom_province_id in ('420000','350000','230000') then 'level4'when custom_province_id in ('320000','450000','510000','360000','140000','330000','2') then 'level5'end as custom_province_grp,case when custom_area_id in ('215','499') then 'South'when custom_area_id in ('497')       then 'North'when custom_area_id in ('500')       then 'East2'when custom_area_id in ('20004')     then 'East1'when custom_area_id in ('221','501') then 'North-East'when custom_area_id in ('502')       then 'West'end as area_grpfrom temp """)
# df3.dtypes
len(df3.columns)
#  84

删除一些不需要的列

# 删除一些不需要的列
drop_list1 = ['custom_create_time','custom_post_time', 'create_time_new','post_time_new','custom_verify_status', 'custom_sync_time','custom_username','custom_mobile','custom_mobile_area','custom_media_id','custom_utm_source','custom_utm_content','custom_utm_medium','custom_utm_campaign','custom_detail']
df4 = df3.select([column for column in df3.columns if column not in drop_list1])len(df4.columns)

删除一些ID字段

# List of variables to drop - only independent variables should be left in final dataset
drop_attrs = [ "custom_clue_id", "dim_month_id","dim_day_id"]
df4 = df4.select([column for column in df4.columns if column not in drop_attrs])
# df4.select('resource_flag').distinct().show()
# df4.dtypes
len(df4.columns)

判断是否有唯一值的无关列,并进行删除

# 运行时间长
# Check if there are categorical vars with 25+ levels
one_value_flag=[]
for column in df4.columns:if df4.select(column).distinct().count()==1:one_value_flag.append(column)
one_value_flag
df4=df4.drop(*one_value_flag)
len(df4.columns)

数值转换为字符串格式

# 数值转为字符,有一些列读进来的时候转为了数值型
df5=df4
int_to_string_list=['custom_approach_id','custom_channel_id','custom_product_id','custom_pattern_id','custom_ctype_id','custom_activity_id','custom_province_id','custom_city_id','custom_district_id','custom_dealer_id','custom_area_id','custom_two_area_id']
for col in int_to_string_list:df5 = df5.withColumn(col, df5[col].cast(StringType()))# 单个列测试    
# df5 = df4.withColumn('approach_id', df4['approach_id'].cast(StringType()))
# df5.dtypes
numeric_cols = [x[0] for x in df5.dtypes if (x[1] != 'string')& (x[0] != 'label') ]
numeric_cols
# 字符串,其中'Attrition'是因变量
string_cols = [x[0] for x in df5.dtypes if (x[1] == 'string') ]
string_cols

字符串填充缺失值

# 当字符串中包含null值时,onehot编码会报错
for col in string_cols:df5 = df5.na.fill(col, 'EMPTY')df5 = df5.na.replace('', 'EMPTY',col)

判断每一个分类列,其分类是否大于25

方便之后进行管道处理,分类大于25的只进行stringindex转换,小于25的进行onehot变换

If any column has > 25 categories, add that column to drop list (line 24) or convert to continious variable if possible

# 运行时间长
# Check if there are categorical vars with 25+ levels
string_more_than32=[]
string_more_than25=[]
string_less_than25=[]for column in string_cols:if df5.select(column).distinct().count()>=32:string_more_than32.append(column)if df5.select(column).distinct().count()>=25:string_more_than25.append(column)else:string_less_than25.append(column)# df_long_factors = df5.select([when(countDistinct(column) >=25, 'T').otherwise('F').alias(column) for column in string_cols]) 
# df5.select('custom_sex').distinct().count()string_more_than32
# df5.select('custom_district_id').distinct().count() # 166

删除取值大于32分类的分类变量

#删除在drop_attrs中的列
df5 = df5.drop(*string_more_than32)
len(df5.columns)
string_more_than25
string_25_than32 = list(set(string_more_than25).difference(set(string_more_than32)))
string_25_than32
string_less_than25
string_cols = [x[0] for x in df5.dtypes if (x[1] == 'string') ]
string_cols

3、管道处理

# 1. Encode the categorical data
main_stages = []
for col in string_cols:indexer = StringIndexer(inputCol = col, outputCol = col + 'Index', handleInvalid="keep")main_stages += [indexer]# ?StringIndexer
# 2. OneHotEncoder for string_less_than25
for col in string_less_than25:encoder = OneHotEncoderEstimator(inputCols = [col + 'Index'], outputCols = [col + 'Vec'])main_stages += [encoder]
# 1.Variables numericas
assemblerInputs = numeric_cols
# 2.Index the label feature
assemblerInputs = assemblerInputs + [col + 'Index' for col in string_25_than32]# 3.Add continuous variable -- one hot encoding
assemblerInputs = assemblerInputs + [col + 'Vec' for col in string_less_than25]# len(df5.columns)# 65 Y
# len(assemblerInputs) # 64
# 4.Assemble the steps.pass all the steps in the VectorAssembler
# 导入VerctorAssembler 将多个列合并成向量列的特征转换器,即将表中各列用一个类似list表示,输出预测列为单独一列。
assembler = VectorAssembler(inputCols=assemblerInputs, outputCol='features')
main_stages += [assembler]
# 5.Create a Pipeline.Now that all the steps are ready, you push the data to the pipeline
# 花费时间比较长
from pyspark.ml import Pipeline
pipeline = Pipeline(stages = main_stages)
pipelineModel = pipeline.fit(df5)
df6 = pipelineModel.transform(df5)

4、建立模型

划分数据集

# 创建新的只有label和features的表
# dfi = data_features.select(['label', 'features'])
dfi = df6.select(['label', 'features'])# 将数据集分为训练集和测试集
train, test = dfi.randomSplit([0.7,0.3], 100)
# train,test,validation = dfi.randomSplit([0.6,0.2,0.2],seed=2020)# 运行时间超长
# print("Training Dataset Count: " + str(train.count()))
# print("Test Dataset Count: " + str(test.count()))# Training Dataset Count: 1249630
# Test Dataset Count: 311356

Random Forest Classifier

# 模型配置
rf = RandomForestClassifier( labelCol='label', featuresCol='features', numTrees=100, maxBins=32)# 训练模型
# Fit the data to the model
rfModel = rf.fit(train)# 用 transform() 方法在测试集上做预测
predictions = rfModel.transform(test)#选择预测结果中字段进行查看
predictions.select( 'label', 'rawPrediction', 'prediction', 'probability').orderBy('probability', ascending=False).show(n=10, truncate=30) 

+-----+------------------------------+----------+------------------------------+
|label| rawPrediction|prediction| probability|
+-----+------------------------------+----------+------------------------------+
| 0|[79.15890827146472,20.84109...| 0.0|[0.7915890827146475,0.20841...|
| 0|[79.10923525773862,20.89076...| 0.0|[0.7910923525773864,0.20890...|
| 0|[78.98945518105177,21.01054...| 0.0|[0.7898945518105179,0.21010...|
| 0|[78.9282993850366,21.071700...| 0.0|[0.7892829938503662,0.21071...|
| 0|[78.91212774787148,21.08787...| 0.0|[0.7891212774787151,0.21087...|
| 0|[78.89054837885494,21.10945...| 0.0|[0.7889054837885496,0.21109...|
| 0|[78.89054837885494,21.10945...| 0.0|[0.7889054837885496,0.21109...|
| 0|[78.89054837885494,21.10945...| 0.0|[0.7889054837885496,0.21109...|
| 0|[78.89054837885494,21.10945...| 0.0|[0.7889054837885496,0.21109...|
| 0|[78.89054837885494,21.10945...| 0.0|[0.7889054837885496,0.21109...|
+-----+------------------------------+----------+------------------------------+

#检验模型效果
evaluator = BinaryClassificationEvaluator() print("Test Area Under ROC: " + str(evaluator.evaluate(predictions, {evaluator.metricName: "areaUnderROC"})))  
# Test Area Under ROC: 0.6160155402990332

保存模型

# import sys, os
# os.getcwd() 
rfModel.write().overwrite().save('Model test/rfModel') 

加载模型

from pyspark.ml.classification import RandomForestClassificationModel 
model_1 = RandomForestClassificationModel.load('Model test/rfModel') 

Gradient-Boosted Tree Classifier

# 模型配置 train a GBTC model
gbt = GBTClassifier(maxIter=10)
# 训练模型
# Fit the data to the model
gbtModel = gbt.fit(train)
# 用 transform() 方法在测试集上做预测
predictions = gbtModel.transform(test)#选择预测结果中字段进行查看
predictions.select( 'label', 'rawPrediction', 'prediction', 'probability').show(10)

+-----+--------------------+----------+--------------------+
|label| rawPrediction|prediction| probability|
+-----+--------------------+----------+--------------------+
| 0|[-0.0582178194283...| 1.0|[0.47092393217850...|
| 0|[-0.0667980984304...| 1.0|[0.46665053764714...|
| 0|[-0.0560469563372...| 1.0|[0.47200582803120...|
| 0|[0.04211971652931...| 0.0|[0.52104741320470...|
| 0|[0.08544882017875...| 0.0|[0.54262072878469...|
| 0|[-0.0728647167488...| 1.0|[0.46363198136231...|
| 0|[-0.0142166646760...| 1.0|[0.49289214652005...|
| 0|[0.08754857661758...| 0.0|[0.54366279043135...|
| 0|[-0.0676538770780...| 1.0|[0.46622457631215...|
| 0|[-0.0713656699888...| 1.0|[0.46437762010753...|
+-----+--------------------+----------+--------------------+

#模型检验
evaluator = BinaryClassificationEvaluator()
print("Test Area Under ROC: " + str(evaluator.evaluate(predictions, {evaluator.metricName: "areaUnderROC"})))

# 保存Gradient-Boosted 模型

gbtModel.write().overwrite().save('Model test/gbtModel')

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/533722.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

html loader使用方法,webpack中loader的使用方法,以及几个常用loader的应用小实例

loader&#xff1a;是webpack用来预处理模块的&#xff0c;在一个模块被引入之前&#xff0c;会预先使用loader处理模块的内容。可能&#xff0c;你会遇到当你用webpack打包的时候&#xff0c;提示你需要一个loader来处理文件&#xff0c;那webpack中的loader就是帮助预处理下模…

linq查询不包含某个值的记录_MySQL行(记录)的详细操作

阅读目录一 介绍二 插入数据INSERT三 更新数据UPDATE四 删除数据DELETE五 查询数据SELECT六 权限管理一 介绍MySQL数据操作&#xff1a; DML在MySQL管理软件中&#xff0c;可以通过SQL语句中的DML语言来实现数据的操作&#xff0c;包括使用INSERT实现数据的插入UPDATE实现数据的…

听课评课记录计算机应用,教师听课的评语(精选10篇)

教师听课的评语(精选10篇)通过引导学生提炼信息提出问题解决问题&#xff0c;使学生再次感受了数学与现实生活的密切联系&#xff0c;经历了运用乘法口诀求商的计算方法的形成过程&#xff0c;培养了学生对知识的迁移能力。下面是小编整理的教师听课的评语(精选10篇)&#xff0…

java音频实时传输_会议室智能系统建设方案,实时远程视频协作

2019年&#xff0c;预计会议协作需求将持续增长&#xff0c;创建多功能会议室促进本地、异地协作仍然是一个强大的趋势。无论空间大小或距离远近&#xff0c;政府部门、企业单位以及团体组织为了实现决策指令畅通、管理层次分明&#xff0c;需要通过对会议室环境、多功能会议系…

依赖 netty spring_十分钟带你了解Spring的七大知识点,程序员必了解

Spring框架自诞生以来一直备受开发者青睐&#xff0c;有人亲切的称之为&#xff1a;Spring 全家桶。它包括SpringMVC、SpringBoot、Spring Cloud、Spring Cloud Dataflow等解决方案。很多研发人员把spring看作心目中最好的java项目&#xff0c;没有之一。所以这是重点也是难点&…

南通大学计算机组成原理期末考试题,南通大学计算机组成原理期末考试范围.docx...

南通大学计算机组成原理期末考试范围计算机组成原理选择填空题 15分填空题 20 分计算题 50分设计题 15分第一章计算机的主要性能指标冯诺依曼型计算机的体系结构、组成部分控制器的基本任务计算机系统的层次结构第二章数据与文字的表示方法定点数的表示方法数的机器码表示校验码…

android实现手机拍照以及图片预览功能_手机系统将有A/B分区?Android 11这些变化你关注过吗...

跳票让Android 11沉淀下来并吸引了更多的消费者&#xff0c;在iPhone SE大规模进军主流消费市场的今天&#xff0c;Android这边难道不想依靠新系统扳回一局吗&#xff1f;在人们感叹iOS一些功能似曾相似的时候&#xff0c;Android 11新的突破与创新格外让人振奋。01Android 11欲…

小程序的点赞功能能和浏览次数功能_扫码点餐小程序好用吗?小程序还能实现哪些功能?...

有不少的餐厅现在都可以用小程序扫码点餐了&#xff0c;为什么现在很少用公众号点餐了&#xff1f;原因其实很简单&#xff0c;用公众号点餐用户还要关注公众号&#xff0c;第二个就是在于公众号每天发消息很烦&#xff0c;而小程序则没有这样的烦恼&#xff0c;只在使用的时候…

word计算机课教学反思,《WORD》初中信息技术的教学反思

《WORD》初中信息技术的教学反思本节课在建构主义学习理论指导下&#xff0c;采用“任务驱动”教学策略&#xff0c;借助多媒体课件&#xff0c;对学生实施研究式自主学习教学模式&#xff0c;教学中注重培养学生分析问题、解决问题的能力。通过学习和实际操作&#xff0c;培养…

simulink中mask设置_(实现BPSK学习Verilog)1. Simulink仿真实现

欢迎关注BUG记录知乎专栏和BUG记录公众号&#xff0c;关注BUG记录公众号回复101获取本文使用的Simulink仿真文件微信号&#xff1a;BugRec由于最近研究某个高速接口没什么实质性的突破&#xff0c;实在写不出太好的东西&#xff0c;所以就写点更为基础的东西分享给大家&#xf…

arima模型_[不说人话系列]-ARIMA模型

看文献的时候看到几个统计推理的模型&#xff0c;想要大概了解&#xff0c;想做个系列&#xff0c;记录自己胡说八道的数理笔记过程。如有错误&#xff0c;感谢您指正。文前感谢冯小姐详细认真的技术援助&#xff01;1- 名称简称&#xff1a;ARIMA模型英文名&#xff1a;Auto r…

认识计算机网络试讲稿,操作系统简介试讲教案.pdf

学习必备 欢迎下载课题 操作系统简介内容章节&#xff1a;第二章第一节 课型&#xff1a;新授课教学时间: 本节内容共授课2课时 授课班级:学习者分析&#xff1a;学生通过第一章的学习&#xff0c;对计算机的软、硬件知识有了初步的了解&#xff0c;。但由于学生普遍对计算机理…

pcb天线和纯铜天线_如何简化天线设计?相控阵波束成形IC来助您

为提高性能&#xff0c;无线通信和雷达系统对天线架构的需求不断增长。只有那些功耗低于传统机械操纵碟形天线的天线才能实现许多新的应用。除了这些要求以外&#xff0c;还需要针对新的威胁或新的用户快速重新定位&#xff0c;传输多个数据流&#xff0c;并以超低的成本……正…

c语言贪吃蛇_C语言贪吃蛇完整代码

#include #include #include //windows编程头文件#include #include //控制台输入输出头文件#ifndef __cplusplustypedef char bool;#define false 0#define true 1#endif//将光标移动到控制台的(x,y)坐标点处void gotoxy(int x, int y){COORD coord;coord.X x;coord.Y y;Se…

上海市计算机一级客观题,2012年上海市高校计算机等级考试(一级)模拟卷客观题...

2012年上海市高校计算机等级考试(一级)模拟卷客观题 (5页)本资源提供全文预览&#xff0c;点击全文预览即可全文预览,如果喜欢文档就下载吧&#xff0c;查找使用更方便哦&#xff01;11.9 积分关于防火墙&#xff0c;以下说法不正确的是____A____。下列属于视频制作的常用软件的…

百度云盘云知梦php_教你搭建私有云盘,简单快速,完全傻瓜式!不限速,永久有效!...

免费的云盘服务有很多&#xff0c;但是总感觉把重要文件存在上面不安全&#xff0c;动不动就跑路或者限速。辛辛苦苦收集的学习资料&#xff0c;一夜直接可能就会付诸东流。最稳妥的方式是存放在本地硬盘中&#xff0c;但是要进行共享或者不同设备之间进行交换&#xff0c;就显…

结束 txt进程_Python多进程抓取拉钩网十万数据

转载&#xff1a;Python多进程抓取拉钩网十万数据准备安装Mongodb数据库其实不是一定要使用MongoDB&#xff0c;大家完全可以使用MySQL或者Redis&#xff0c;全看大家喜好。这篇文章我们的例子是Mongodb&#xff0c;所以大家需要下载它。在Windows中。由于MongoDB默认的数据目录…

计算机休眠怎么唤醒,电脑休眠后,就无法唤醒了?怎么办?快速教你解决这个问题...

电脑是目前人们生活、工作中使用最多的电子产品之一&#xff0c;我们都经常会使用到电脑。但是如果电脑在一段时间后不使用&#xff0c;电脑就会自动进入休眠状态&#xff0c;而在进入休眠状态的之后&#xff0c;只有当我们将电脑唤醒了&#xff0c;电脑才能够回到菜单界面当中…

python爬虫爬取音乐单曲_Python爬取qq音乐的过程实例

一、前言qq music上的音乐还是不少的&#xff0c;有些时候想要下载好听的音乐&#xff0c;但有每次在网页下载都是烦人的登录什么的。于是&#xff0c;来了个qqmusic的爬虫。至少我觉得for循环爬虫&#xff0c;最核心的应该就是找到待爬元素所在url吧。二、Python爬取QQ音乐单曲…

python按照日期筛选数据_日期时间范围上的Python筛选器?

我有一些包含开始时间和结束时间的数据&#xff08;datetime格式&#xff09;&#xff0c;我想根据更受限制的日期范围计算持续时间&#xff08;以小时为单位&#xff09;。但到目前为止&#xff0c;我还没有成功。&#xff08;对python还是个业余爱好者。&#xff09; 示例&am…