diff --git a/.gitignore b/.gitignore index f5d6d0983..03319a058 100644 --- a/.gitignore +++ b/.gitignore @@ -40,3 +40,11 @@ # Graphics *.plt *.png + +# archives and compressed files +*.gz +*.bz2 +*.tar +*.tbz2 +*.tgz +*.zip diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index f22f4b343..e4c026ea6 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -1,6 +1,7 @@ ! Copyright (c), The Regents of the University of California ! Terms of use are as specified in LICENSE.txt +#include "fiats-language-support.F90" #include "julienne-assert-macros.h" program train_cloud_microphysics @@ -28,15 +29,13 @@ program train_cloud_microphysics character(len=*), parameter :: usage = new_line('') // new_line('') // & 'Usage: ' // new_line('') // new_line('') // & './build/run-fpm.sh run train-cloud-microphysics -- \' // new_line('') // & - ' --base --epochs \' // new_line('') // & - ' [--start ] [--end ] [--stride ] [--bins ] [--report ] [--tolerance ]'// & + ' --base --epochs [--bins ] [--report ] [--tolerance ] ' // new_line('') // & new_line('') // new_line('') // & 'where angular brackets denote user-provided values and square brackets denote optional arguments.' // new_line('') // & 'The presence of a file named "stop" halts execution gracefully.' // new_line('') type command_line_arguments_t - integer num_epochs, start_step, stride, num_bins, report_step - integer, allocatable :: end_step + integer num_epochs, num_bins, report_step character(len=:), allocatable :: base_name real cost_tolerance end type @@ -49,16 +48,15 @@ program train_cloud_microphysics integer(int64) t_start, t_finish, clock_rate call system_clock(t_start, clock_rate) - associate( & training_configuration => training_configuration_t(file_t("training_configuration.json")) & ,training_data_files => training_data_files_t(file_t("training_data_files.json")) & ) -#if defined(MULTI_IMAGE_SUPPORT) +#if defined(FIATS_MULTI_IMAGE_SUPPORT) if (this_image()==1) then #endif call read_train_write(training_configuration, training_data_files, get_command_line_arguments(), create_or_append_to("cost.plt")) -#if defined(MULTI_IMAGE_SUPPORT) +#if defined(FIATS_MULTI_IMAGE_SUPPORT) else call read_train_write(training_configuration, training_data_files, get_command_line_arguments()) end if @@ -110,16 +108,12 @@ function get_command_line_arguments() result(command_line_arguments) type(command_line_arguments_t) command_line_arguments type(command_line_t) command_line character(len=:), allocatable :: & - base_name, epochs_string, start_string, end_string, stride_string, bins_string, report_string, tolerance_string + base_name, epochs_string, bins_string, report_string, tolerance_string real cost_tolerance - integer, allocatable :: end_step - integer num_epochs, num_bins, start_step, stride, report_step + integer num_epochs, num_bins, report_step base_name = command_line%flag_value("--base") epochs_string = command_line%flag_value("--epochs") - start_string = command_line%flag_value("--start") - end_string = command_line%flag_value("--end") - stride_string = command_line%flag_value("--stride") bins_string = command_line%flag_value("--bins") report_string = command_line%flag_value("--report") tolerance_string = command_line%flag_value("--tolerance") @@ -130,27 +124,12 @@ function get_command_line_arguments() result(command_line_arguments) read(epochs_string,*) num_epochs - stride = default_or_internal_read(1, stride_string) - start_step = default_or_internal_read(1, start_string) report_step = default_or_internal_read(1, report_string) num_bins = default_or_internal_read(3, bins_string) cost_tolerance = default_or_internal_read(5E-8, tolerance_string) - if (len(end_string)/=0) then - allocate(end_step) - read(end_string,*) end_step - end if - - if (allocated(end_step)) then - command_line_arguments = command_line_arguments_t( & - num_epochs, start_step, stride, num_bins, report_step, end_step, base_name, cost_tolerance & - ) - else - command_line_arguments = command_line_arguments_t( & - num_epochs, start_step, stride, num_bins, report_step, null(), base_name, cost_tolerance & - ) - end if - + command_line_arguments = command_line_arguments_t(num_epochs, num_bins, report_step, base_name, cost_tolerance) + end function get_command_line_arguments subroutine read_train_write(training_configuration, training_data_files, args, plot_file) @@ -158,9 +137,10 @@ subroutine read_train_write(training_configuration, training_data_files, args, p type(training_data_files_t), intent(in) :: training_data_files type(command_line_arguments_t), intent(in) :: args type(plot_file_t), intent(in), optional :: plot_file - type(NetCDF_variable_t), allocatable :: input_variable(:), output_variable(:) - type(time_derivative_t), allocatable :: derivative(:) + type(time_derivative_t), allocatable, dimension(:,:) :: derivative + type(NetCDF_variable_t), allocatable, dimension(:,:) :: input_variable, output_variable type(NetCDF_variable_t) input_time, output_time + type(NetCDF_file_t) , allocatable, dimension(:) :: NetCDF_input_file, NetCDF_output_file ! local variables: type(trainable_network_t) trainable_network @@ -169,107 +149,106 @@ subroutine read_train_write(training_configuration, training_data_files, args, p type(input_output_pair_t), allocatable :: input_output_pairs(:) type(tensor_t), allocatable, dimension(:) :: input_tensors, output_tensors real, allocatable :: cost(:) - integer i, network_unit, io_status, epoch, end_step, t, b, t_end, v + integer f, v, network_unit, io_status, epoch, t, b, t_end integer(int64) start_training, finish_training logical stop_requested - input_names: & - associate(input_names => training_configuration%input_variable_names()) - - allocate(input_variable(size(input_names))) - - input_file_name: & - associate(input_file_name => args%base_name // "_input.nc") - - print *,"Reading physics-based model inputs from " // input_file_name - - input_file: & - associate(input_file => netCDF_file_t(input_file_name)) - - do v=1, size(input_variable) - print *,"- reading ", input_names(v)%string() - call input_variable(v)%input(input_names(v), input_file, rank=4) - end do - - do v = 2, size(input_variable) - call_julienne_assert(input_variable(v)%conformable_with(input_variable(1))) - end do - - print *,"- reading time" - call input_time%input("time", input_file, rank=1) - - end associate input_file - end associate input_file_name - end associate input_names - - output_names: & - associate(output_names => training_configuration%output_variable_names()) - - allocate(output_variable(size(output_names))) - - output_file_name: & - associate(output_file_name => args%base_name // "_output.nc") - - print *,"Reading physics-based model outputs from " // output_file_name - - output_file: & - associate(output_file => netCDF_file_t(output_file_name)) - - do v=1, size(output_variable) - print *,"- reading ", output_names(v)%string() - call output_variable(v)%input(output_names(v), output_file, rank=4) - end do - - do v = 1, size(output_variable) - call_julienne_assert(output_variable(v)%conformable_with(input_variable(1))) - end do - - print *,"- reading time" - call output_time%input("time", output_file, rank=1) - - call_julienne_assert(output_time%conformable_with(input_time)) - - end associate output_file - end associate output_file_name - - print *,"Calculating desired neural-network model outputs" - - allocate(derivative(size(output_variable))) - - print '(a)',"- reading time from JSON file" - associate(time_data => time_data_t(file_t(training_data_files%fully_qualified_time_file()))) - do v = 1, size(derivative) - derivative_name: & - associate(derivative_name => "d" // output_names(v)%string() // "/dt") - print *,"- " // derivative_name - derivative(v) = time_derivative_t(old = input_variable(v), new = output_variable(v), dt=time_data%dt()) - call_julienne_assert(.not. derivative(v)%any_nan()) - end associate derivative_name - end do - end associate - end associate output_names + input_variable_files: & + associate( & + input_tensor_file_names => training_data_files%fully_qualified_inputs_files() & + ,input_component_names => training_configuration%input_variable_names() & + ) + allocate(NetCDF_input_file(size(input_tensor_file_names))) + allocate(input_variable(size(input_component_names), size(NetCDF_input_file))) + + count_files_and_variables: & + associate(num_input_files => size(NetCDF_input_file), num_variables => size(input_variable,1)) + + read_input_files: & + do f = 1, num_input_files + + print '(a)',"Reading physics-based model inputs from " // input_tensor_file_names(f)%string() + NetCDF_input_file(f) = netCDF_file_t(input_tensor_file_names(f)) + + read_variables: & + do v = 1, num_variables + print '(a)',"- reading " // input_component_names(v)%string() // " from " // input_tensor_file_names(f)%string() + call input_variable(v,f)%input(input_component_names(v), NetCDF_input_file(f), rank=4) + call_julienne_assert(input_variable(v,f)%conformable_with(input_variable(1,f))) + end do read_variables + + end do read_input_files + + end associate count_files_and_variables + end associate input_variable_files + + output_variable_and_time_files: & + associate( & + output_tensor_file_names => training_data_files%fully_qualified_outputs_files() & + ,output_component_names => training_configuration%output_variable_names() & + ,time_data_file_name => training_data_files%fully_qualified_time_file() & + ) + allocate(NetCDF_output_file(size(output_tensor_file_names))) + allocate(output_variable(size(output_component_names), size(NetCDF_output_file))) + + output_file_and_variable_count: & + associate(num_output_files => size(NetCDF_output_file), num_output_variables => size(output_variable,1)) + + print '(a)',"- reading time from JSON file" + read_times: & + associate(time_data => time_data_t(file_t(time_data_file_name))) + + print '(a)',"Calculating the desired neural-network model outputs: time derivatives of the outputs" + allocate(derivative(num_output_variables, num_output_files)) + + read_files: & + do f = 1, num_output_files + + print '(a)',"Reading physics-based model outputs from " // output_tensor_file_names(f)%string() + NetCDF_output_file(f) = netCDF_file_t(output_tensor_file_names(f)) + + read_variables: & + do v = 1, num_output_variables + + print '(a)',"- reading " // output_component_names(v)%string() // " from " // output_tensor_file_names(f)%string() + call output_variable(v,f)%input(output_component_names(v), NetCDF_output_file(f), rank=4) + call_julienne_assert(output_variable(v,f)%conformable_with(output_variable(1,f))) + + derivative_name: & + associate(derivative_name => "d" // output_component_names(v)%string() // "_dt") + print '(a)',"- calculating " // derivative_name + derivative(v,f) = time_derivative_t(old = input_variable(v,1), new = output_variable(v,1), dt=time_data%dt()) + call_julienne_assert(.not. derivative(v,f)%any_nan()) + end associate derivative_name + end do read_variables + end do read_files + + end associate read_times + end associate output_file_and_variable_count + end associate output_variable_and_time_files + + associate(num_steps => sum( (input_variable(1,:)%end_step()+1) - input_variable(1,:)%start_step())) + print *,"Defining input tensors for ", num_steps, "time steps" + end associate - if (allocated(args%end_step)) then - end_step = args%end_step - else - end_step = input_variable(1)%end_step() - end if + input_tensors = tensors(input_variable) - print *,"Defining input tensors for time step", args%start_step, "through", end_step, "with strides of", args%stride - input_tensors = tensors(input_variable, step_start = args%start_step, step_end = end_step, step_stride = args%stride) + associate(num_steps => sum( (derivative(1,:)%end_step()+1) - derivative(1,:)%start_step())) + print *,"Defining output tensors for ", num_steps, "time steps" + end associate - print *,"Defining output tensors for time step", args%start_step, "through", end_step, "with strides of", args%stride - output_tensors = tensors(derivative, step_start = args%start_step, step_end = end_step, step_stride = args%stride) + output_tensors = tensors(derivative) output_map_and_network_file: & associate( & output_map => tensor_map_t( & layer = "outputs" & - ,minima = [( derivative(v)%minimum(), v=1, size(derivative) )] & - ,maxima = [( derivative(v)%maximum(), v=1, size(derivative) )] & + ,minima = [( [( derivative(v,f)%minimum(), v=1, size(derivative,1) )], f = 1, size(derivative,2) )] & + ,maxima = [( [( derivative(v,f)%maximum(), v=1, size(derivative,1) )], f = 1, size(derivative,2) )] & ), & network_file => args%base_name // "_network.json" & ) + check_for_network_file: & block logical preexisting_network_file @@ -304,10 +283,10 @@ subroutine read_train_write(training_configuration, training_data_files, args, p ,activation%function_name( ) & ,string_t(trim(merge("true ", "false", training_configuration%skip_connections()))) & ] & - ,input_map = tensor_map_t( & - layer = "inputs" & - ,minima = [( input_variable(v)%minimum(), v=1, size( input_variable) )] & - ,maxima = [( input_variable(v)%maximum(), v=1, size( input_variable) )] & + ,input_map = tensor_map_t( & + layer = "inputs" & + ,minima = [( [( input_variable(v,f)%minimum(), v = 1, size(input_variable,1) )], f = 1, size(input_variable,2) )] & + ,maxima = [( [( input_variable(v,f)%maximum(), v = 1, size(input_variable,1) )], f = 1, size(input_variable,2) )] & ) & ,output_map = output_map & ) @@ -315,7 +294,6 @@ subroutine read_train_write(training_configuration, training_data_files, args, p end block initialize_network end if read_or_initialize_network - end block check_for_network_file print *, "Conditionally sampling for a flat distribution of output values" @@ -324,25 +302,32 @@ subroutine read_train_write(training_configuration, training_data_files, args, p block integer i logical occupied(args%num_bins, args%num_bins) - logical keepers(size(output_tensors)) type(phase_space_bin_t), allocatable :: bin(:) type(occupancy_t) occupancy +#if !defined(__flang__) + logical keepers(size(output_tensors)) + keepers = .false. +#else + logical, allocatable :: keepers(:) + allocate(keepers(size(output_tensors)), source = .false.) +#endif + print *, "Determine the phase-space bin that holds each output tensor" ! Determine the phase-space bin that holds each output tensor associate(output_minima => output_map%minima(), output_maxima => output_map%maxima()) bin = [(phase_space_bin_t(output_tensors(i), output_minima, output_maxima, args%num_bins), i = 1, size(output_tensors))] end associate - call occupancy%vacate( dims = [( args%num_bins, i = 1, size(output_variable))] ) - - keepers = .false. + call occupancy%vacate( dims = [( args%num_bins, i = 1, size(derivative,1))] ) + print *, "Populate bins" do i = 1, size(output_tensors) if (occupancy%occupied(bin(i)%loc)) cycle call occupancy%occupy(bin(i)%loc) keepers(i) = .true. end do + print *, "Pack remaining input/output tensor pairs" input_output_pairs = input_output_pair_t(pack(input_tensors, keepers), pack(output_tensors, keepers)) print '(*(a,i))' & @@ -370,18 +355,18 @@ subroutine read_train_write(training_configuration, training_data_files, args, p print *, " Epoch Cost (avg)" call system_clock(start_training) - + train_write_and_maybe_exit: & block integer first_epoch integer me -#if defined(MULTI_IMAGE_SUPPORT) +#if defined(FIATS_MULTI_IMAGE_SUPPORT) me = this_image() #else me = 1 #endif if (me==1) first_epoch = plot_file%previous_epoch + 1 -#if defined(MULTI_IMAGE_SUPPORT) +#if defined(FIATS_MULTI_IMAGE_SUPPORT) call co_broadcast(first_epoch, source_image=1) #endif last_epoch: & @@ -402,7 +387,7 @@ subroutine read_train_write(training_configuration, training_data_files, args, p image_1_maybe_writes: & if (me==1 .and. any([converged, epoch==[first_epoch,last_epoch], mod(epoch,args%report_step)==0])) then - !print '(*(g0,4x))', epoch, average_cost + print '(*(g0,4x))', epoch, average_cost write(plot_file%plot_unit,'(*(g0,4x))') epoch, average_cost associate(json_file => trainable_network%to_json()) @@ -411,7 +396,7 @@ subroutine read_train_write(training_configuration, training_data_files, args, p end if image_1_maybe_writes - signal_convergence: & + signal_convergence: & if (converged) then block integer unit @@ -439,6 +424,7 @@ subroutine read_train_write(training_configuration, training_data_files, args, p end associate output_map_and_network_file call system_clock(finish_training) + print *,"Training time: ", real(finish_training - start_training, real64)/real(clock_rate, real64),"for", & args%num_epochs,"epochs" close(plot_file%plot_unit) diff --git a/demo/fpm.toml b/demo/fpm.toml index 7095b0469..10a9fd64d 100644 --- a/demo/fpm.toml +++ b/demo/fpm.toml @@ -1,6 +1,6 @@ name = "Fiats-Demonstration-Applications" [dependencies] -julienne = {git = "https://github.com/berkeleylab/julienne", tag = "3.2.1"} +julienne = {git = "https://github.com/berkeleylab/julienne", tag = "3.6.1"} fiats = {path = "../"} netcdf-interfaces = {git = "https://github.com/berkeleylab/netcdf-interfaces.git", rev = "d2bbb71ac52b4e346b62572b1ca1620134481096"} diff --git a/demo/src/NetCDF_variable_m.f90 b/demo/src/NetCDF_variable_m.f90 index f3cf4d022..79ef22003 100644 --- a/demo/src/NetCDF_variable_m.f90 +++ b/demo/src/NetCDF_variable_m.f90 @@ -26,6 +26,8 @@ module NetCDF_variable_m procedure, private, non_overridable :: default_real_conformable_with, double_precision_conformable_with generic :: rank => default_real_rank , double_precision_rank procedure, private, non_overridable :: default_real_rank , double_precision_rank + generic :: start_step => default_real_start_step , double_precision_start_step + procedure, private, non_overridable :: default_real_start_step , double_precision_start_step generic :: end_step => default_real_end_step , double_precision_end_step procedure, private, non_overridable :: default_real_end_step , double_precision_end_step generic :: any_nan => default_real_any_nan , double_precision_any_nan @@ -145,6 +147,18 @@ elemental module function double_precision_rank(self) result(my_rank) integer my_rank end function + elemental module function default_real_start_step(self) result(start_step) + implicit none + class(NetCDF_variable_t), intent(in) :: self + integer start_step + end function + + elemental module function double_precision_start_step(self) result(start_step) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: self + integer start_step + end function + elemental module function default_real_end_step(self) result(end_step) implicit none class(NetCDF_variable_t), intent(in) :: self @@ -205,11 +219,10 @@ elemental module function double_precision_maximum(self) result(maximum) real maximum end function - module function tensors(NetCDF_variables, step_start, step_end, step_stride) + module function tensors(NetCDF_variables) implicit none - class(NetCDF_variable_t), intent(in) :: NetCDF_variables(:) + class(NetCDF_variable_t), intent(in) :: NetCDF_variables(:,:) type(tensor_t), allocatable :: tensors(:) - integer, optional :: step_start, step_end, step_stride end function elemental module function default_real_end_time(self) result(end_time) diff --git a/demo/src/NetCDF_variable_s.F90 b/demo/src/NetCDF_variable_s.F90 index 760286a8b..71624ebfa 100644 --- a/demo/src/NetCDF_variable_s.F90 +++ b/demo/src/NetCDF_variable_s.F90 @@ -213,6 +213,36 @@ pure function double_precision_components_allocated(NetCDF_variable) result(allo end associate end procedure + module procedure default_real_start_step + select case(self%rank()) + case(1) + start_step = lbound(self%values_1D_,1) + case(2) + start_step = lbound(self%values_2D_,2) + case(3) + start_step = lbound(self%values_3D_,3) + case(4) + start_step = lbound(self%values_4D_,4) + case default + error stop "NetCDF_variable_s(default_real_start_step): unsupported rank" + end select + end procedure + + module procedure double_precision_start_step + select case(self%rank()) + case(1) + start_step = lbound(self%values_1D_,1) + case(2) + start_step = lbound(self%values_2D_,2) + case(3) + start_step = lbound(self%values_3D_,3) + case(4) + start_step = lbound(self%values_4D_,4) + case default + error stop "NetCDF_variable_s(double_precision_start_step): unsupported rank" + end select + end procedure + module procedure default_real_end_step select case(self%rank()) case(1) @@ -399,30 +429,34 @@ pure function double_precision_upper_bounds(NetCDF_variable) result(ubounds) module procedure tensors - integer t_start, t_end, t_stride - - select case(NetCDF_variables(1)%rank()) - case(4) + integer v, f, lon, lat, lev, time - t_start = default_or_present_value(1, step_start ) - t_stride = default_or_present_value(1, step_stride) - t_end = default_or_present_value(size(NetCDF_variables(1)%values_4D_,4), step_end) + associate(component_rank => NetCDF_variables(1,1)%rank()) - associate( longitudes => size(NetCDF_variables(1)%values_4D_,1) & - ,latitudes => size(NetCDF_variables(1)%values_4D_,2) & - ,levels => size(NetCDF_variables(1)%values_4D_,3) & - ) - block - integer v, lon, lat, lev, time + call_julienne_assert(.all. (NetCDF_variables(:,:)%rank() .equalsExpected. component_rank)) - tensors = [( [( [( [( tensor_t( [( NetCDF_variables(v)%values_4D_(lon,lat,lev,time), v=1,size(NetCDF_variables) )] ), & - lon = 1, longitudes)], lat = 1, latitudes)], lev = 1, levels)], time = t_start, t_end, t_stride)] - end block - end associate + select case(component_rank) + case(4) + associate( longitudes => size(NetCDF_variables(1,1)%values_4D_,1) & + ,latitudes => size(NetCDF_variables(1,1)%values_4D_,2) & + ,levels => size(NetCDF_variables(1,1)%values_4D_,3) & + ,t_end => size(NetCDF_variables(1,1)%values_4D_,4) & + ,variables => size(NetCDF_variables,1) & + ,files => size(NetCDF_variables,2) & + ) + call_julienne_assert(.all. ([( [( size(NetCDF_variables(v,f)%values_4D_,1), v = 1, variables)], f = 1, files)] .equalsExpected. longitudes)) + call_julienne_assert(.all. ([( [( size(NetCDF_variables(v,f)%values_4D_,2), v = 1, variables)], f = 1, files)] .equalsExpected. latitudes)) + call_julienne_assert(.all. ([( [( size(NetCDF_variables(v,f)%values_4D_,3), v = 1, variables)], f = 1, files)] .equalsExpected. levels)) + call_julienne_assert(.all. ([( [( size(NetCDF_variables(v,f)%values_4D_,4), v = 1, variables)], f = 1, files)] .equalsExpected. t_end)) + + tensors = [( [( [( [( [( tensor_t( [( NetCDF_variables(v,f)%values_4D_(lon,lat,lev,time), v=1,size(NetCDF_variables,1) )] ), & + lon = 1, longitudes)], lat = 1, latitudes)], lev = 1, levels)], time = 1, t_end )], f = 1, files )] + end associate + case default + error stop "NetCDF_variable_s(tensors): unsupported rank)" + end select - case default - error stop "NetCDF_variable_s(tensors): unsupported rank)" - end select + end associate end procedure diff --git a/demo/train.sh b/demo/train.sh index 3dffe24cf..c28df20cd 100755 --- a/demo/train.sh +++ b/demo/train.sh @@ -1,6 +1,16 @@ #!/bin/bash -min_bins=$1 -max_bins=$2 + +set -e # exit if any simple command returns a non-zero exit code + +min_bins=${1:-3} +max_bins=${2:-4} +executable=${3:-"train-cloud-microphysics"} + +if [ ! -x $executable ]; then + printf "\n $executable not found or not executable. Search ./build and, if you find $executable, create a soft link to it in this directory.\n\n" + exit 1 +fi + let subfloor=$min_bins-1 j=subfloor while (( j++ < max_bins )); do @@ -19,7 +29,7 @@ while (( j++ < max_bins )); do echo "" echo "---------> Run $i <---------" - ./train-cloud-microphysics --base training --epochs 1000000 --bins $j --report 1000 --start 360 --stride 10 --tolerance "5.0E-08" + ./"$executable" --base fiats-training-data/training --epochs 1000000 --bins $j --report 1000 --tolerance "5.0E-08" if [ -f converged ]; then echo "" diff --git a/demo/training_data_files.json b/demo/training_data_files.json index baff97882..d88ffee0b 100644 --- a/demo/training_data_files.json +++ b/demo/training_data_files.json @@ -3,6 +3,6 @@ "path" : "fiats-training-data", "inputs prefix" : "training_input-image-_", "outputs prefix" : "training_output-image-_", - "infixes" : ["0", "50"] + "infixes" : ["450", "500"] } } diff --git a/example/supporting-modules/saturated_mixing_ratio_m.f90 b/example/supporting-modules/saturated_mixing_ratio_m.F90 similarity index 95% rename from example/supporting-modules/saturated_mixing_ratio_m.f90 rename to example/supporting-modules/saturated_mixing_ratio_m.F90 index a0ac2e90a..3468ee8dd 100644 --- a/example/supporting-modules/saturated_mixing_ratio_m.f90 +++ b/example/supporting-modules/saturated_mixing_ratio_m.F90 @@ -31,7 +31,7 @@ module saturated_mixing_ratio_m !! in the Intermediate Complexity Atmospheric Research (ICAR) model file src/physics/mp_simple.f90. !! ICAR is distributed under the above MIT license. See https://github.com/ncar/icar. use fiats_m, only : tensor_t - use julienne_m, only : call_julienne_assert_, operator(.equalsExpected.) + use julienne_m, only : call_julienne_assert_, operator(.also.), operator(.equalsExpected.), operator(//) implicit none private @@ -75,7 +75,7 @@ elemental function y(x_in) result(a) type(tensor_t), intent(in) :: x_in type(tensor_t) a associate(x => x_in%values()) - call_julienne_assert((lbound(x,1) .equalsExpected. 1) .also. (ubound(x,1) .equalsExpected. 2,"y(x) :: sufficient input")) + call_julienne_assert((lbound(x,1) .equalsExpected. 1) .also. ((ubound(x,1) .equalsExpected. 2) // "y(x) :: sufficient input")) a = tensor_t([saturated_mixing_ratio(x(1),x(2))]) end associate end function diff --git a/fpm.toml b/fpm.toml index e70424e5b..920a17431 100644 --- a/fpm.toml +++ b/fpm.toml @@ -1,3 +1,3 @@ name = "fiats" [dependencies] -julienne = {git = "https://github.com/berkeleylab/julienne", tag = "3.2.1"} +julienne = {git = "https://github.com/berkeleylab/julienne", tag = "3.6.1"} diff --git a/include/language-support.F90 b/include/fiats-language-support.F90 similarity index 87% rename from include/language-support.F90 rename to include/fiats-language-support.F90 index d64f0096f..8aef911f7 100644 --- a/include/language-support.F90 +++ b/include/fiats-language-support.F90 @@ -1,6 +1,9 @@ ! Copyright (c), The Regents of the University of California ! Terms of use are as specified in LICENSE.txt +#ifndef FIATS_LANGUAGE_SUPPORT +#define FIATS_LANGUAGE_SUPPORT + #ifndef F2023_LOCALITY #if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 202400) # define F2023_LOCALITY 1 @@ -18,3 +21,5 @@ # define MULTI_IMAGE_SUPPORT 1 #endif #endif + +#endif